repo_name
string
path
string
copies
string
size
string
content
string
license
string
zhantss/linux
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
1388
28417
/* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation * * See LICENSE.qlcnic for copyright and licensing details. */ #include <linux/types.h> #include "qlcnic.h" #define QLC_DCB_NUM_PARAM 3 #define QLC_DCB_LOCAL_IDX 0 #define QLC_DCB_OPER_IDX 1 #define QLC_DCB_PEER_IDX 2 #define QLC_DCB_GET_MAP(V) (1 << V) #define QLC_DCB_FW_VER 0x2 #define QLC_DCB_MAX_TC 0x8 #define QLC_DCB_MAX_APP 0x8 #define QLC_DCB_MAX_PRIO QLC_DCB_MAX_TC #define QLC_DCB_MAX_PG QLC_DCB_MAX_TC #define QLC_DCB_TSA_SUPPORT(V) (V & 0x1) #define QLC_DCB_ETS_SUPPORT(V) ((V >> 1) & 0x1) #define QLC_DCB_VERSION_SUPPORT(V) ((V >> 2) & 0xf) #define QLC_DCB_MAX_NUM_TC(V) ((V >> 20) & 0xf) #define QLC_DCB_MAX_NUM_ETS_TC(V) ((V >> 24) & 0xf) #define QLC_DCB_MAX_NUM_PFC_TC(V) ((V >> 28) & 0xf) #define QLC_DCB_GET_TC_PRIO(X, P) ((X >> (P * 3)) & 0x7) #define QLC_DCB_GET_PGID_PRIO(X, P) ((X >> (P * 8)) & 0xff) #define QLC_DCB_GET_BWPER_PG(X, P) ((X >> (P * 8)) & 0xff) #define QLC_DCB_GET_TSA_PG(X, P) ((X >> (P * 8)) & 0xff) #define QLC_DCB_GET_PFC_PRIO(X, P) (((X >> 24) >> P) & 0x1) #define QLC_DCB_GET_PROTO_ID_APP(X) ((X >> 8) & 0xffff) #define QLC_DCB_GET_SELECTOR_APP(X) (X & 0xff) #define QLC_DCB_LOCAL_PARAM_FWID 0x3 #define QLC_DCB_OPER_PARAM_FWID 0x1 #define QLC_DCB_PEER_PARAM_FWID 0x2 #define QLC_83XX_DCB_GET_NUMAPP(X) ((X >> 2) & 0xf) #define QLC_83XX_DCB_TSA_VALID(X) (X & 0x1) #define QLC_83XX_DCB_PFC_VALID(X) ((X >> 1) & 0x1) #define QLC_83XX_DCB_GET_PRIOMAP_APP(X) (X >> 24) #define QLC_82XX_DCB_GET_NUMAPP(X) ((X >> 12) & 0xf) #define QLC_82XX_DCB_TSA_VALID(X) ((X >> 4) & 0x1) #define QLC_82XX_DCB_PFC_VALID(X) ((X >> 5) & 0x1) #define QLC_82XX_DCB_GET_PRIOVAL_APP(X) ((X >> 24) & 0x7) #define QLC_82XX_DCB_GET_PRIOMAP_APP(X) (1 << X) #define QLC_82XX_DCB_PRIO_TC_MAP (0x76543210) static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops; static void qlcnic_dcb_aen_work(struct work_struct *); static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *); static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *); static void __qlcnic_dcb_free(struct qlcnic_dcb *); static int __qlcnic_dcb_attach(struct qlcnic_dcb *); static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *); static void __qlcnic_dcb_get_info(struct qlcnic_dcb *); static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *); static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8); static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *); static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *); static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *); static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8); static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *); static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *); struct qlcnic_dcb_capability { bool tsa_capability; bool ets_capability; u8 max_num_tc; u8 max_ets_tc; u8 max_pfc_tc; u8 dcb_capability; }; struct qlcnic_dcb_param { u32 hdr_prio_pfc_map[2]; u32 prio_pg_map[2]; u32 pg_bw_map[2]; u32 pg_tsa_map[2]; u32 app[QLC_DCB_MAX_APP]; }; struct qlcnic_dcb_mbx_params { /* 1st local, 2nd operational 3rd remote */ struct qlcnic_dcb_param type[3]; u32 prio_tc_map; }; struct qlcnic_82xx_dcb_param_mbx_le { __le32 hdr_prio_pfc_map[2]; __le32 prio_pg_map[2]; __le32 pg_bw_map[2]; __le32 pg_tsa_map[2]; __le32 app[QLC_DCB_MAX_APP]; }; enum qlcnic_dcb_selector { QLC_SELECTOR_DEF = 0x0, QLC_SELECTOR_ETHER, QLC_SELECTOR_TCP, QLC_SELECTOR_UDP, }; enum qlcnic_dcb_prio_type { QLC_PRIO_NONE = 0, QLC_PRIO_GROUP, QLC_PRIO_LINK, }; enum qlcnic_dcb_pfc_type { QLC_PFC_DISABLED = 0, QLC_PFC_FULL, QLC_PFC_TX, QLC_PFC_RX }; struct qlcnic_dcb_prio_cfg { bool valid; enum qlcnic_dcb_pfc_type pfc_type; }; struct qlcnic_dcb_pg_cfg { bool valid; u8 total_bw_percent; /* of Link/ port BW */ u8 prio_count; u8 tsa_type; }; struct qlcnic_dcb_tc_cfg { bool valid; struct qlcnic_dcb_prio_cfg prio_cfg[QLC_DCB_MAX_PRIO]; enum qlcnic_dcb_prio_type prio_type; /* always prio_link */ u8 link_percent; /* % of link bandwidth */ u8 bwg_percent; /* % of BWG's bandwidth */ u8 up_tc_map; u8 pgid; }; struct qlcnic_dcb_app { bool valid; enum qlcnic_dcb_selector selector; u16 protocol; u8 priority; }; struct qlcnic_dcb_cee { struct qlcnic_dcb_tc_cfg tc_cfg[QLC_DCB_MAX_TC]; struct qlcnic_dcb_pg_cfg pg_cfg[QLC_DCB_MAX_PG]; struct qlcnic_dcb_app app[QLC_DCB_MAX_APP]; bool tc_param_valid; bool pfc_mode_enable; }; struct qlcnic_dcb_cfg { /* 0 - local, 1 - operational, 2 - remote */ struct qlcnic_dcb_cee type[QLC_DCB_NUM_PARAM]; struct qlcnic_dcb_capability capability; u32 version; }; static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { .init_dcbnl_ops = __qlcnic_init_dcbnl_ops, .free = __qlcnic_dcb_free, .attach = __qlcnic_dcb_attach, .query_hw_capability = __qlcnic_dcb_query_hw_capability, .get_info = __qlcnic_dcb_get_info, .get_hw_capability = qlcnic_83xx_dcb_get_hw_capability, .query_cee_param = qlcnic_83xx_dcb_query_cee_param, .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg, .aen_handler = qlcnic_83xx_dcb_aen_handler, }; static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { .init_dcbnl_ops = __qlcnic_init_dcbnl_ops, .free = __qlcnic_dcb_free, .attach = __qlcnic_dcb_attach, .query_hw_capability = __qlcnic_dcb_query_hw_capability, .get_info = __qlcnic_dcb_get_info, .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability, .query_cee_param = qlcnic_82xx_dcb_query_cee_param, .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg, .aen_handler = qlcnic_82xx_dcb_aen_handler, }; static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val) { if (qlcnic_82xx_check(adapter)) return QLC_82XX_DCB_GET_NUMAPP(val); else return QLC_83XX_DCB_GET_NUMAPP(val); } static inline u8 qlcnic_dcb_pfc_hdr_valid(struct qlcnic_adapter *adapter, u32 val) { if (qlcnic_82xx_check(adapter)) return QLC_82XX_DCB_PFC_VALID(val); else return QLC_83XX_DCB_PFC_VALID(val); } static inline u8 qlcnic_dcb_tsa_hdr_valid(struct qlcnic_adapter *adapter, u32 val) { if (qlcnic_82xx_check(adapter)) return QLC_82XX_DCB_TSA_VALID(val); else return QLC_83XX_DCB_TSA_VALID(val); } static inline u8 qlcnic_dcb_get_prio_map_app(struct qlcnic_adapter *adapter, u32 val) { if (qlcnic_82xx_check(adapter)) return QLC_82XX_DCB_GET_PRIOMAP_APP(val); else return QLC_83XX_DCB_GET_PRIOMAP_APP(val); } static int qlcnic_dcb_prio_count(u8 up_tc_map) { int j; for (j = 0; j < QLC_DCB_MAX_TC; j++) if (up_tc_map & QLC_DCB_GET_MAP(j)) break; return j; } static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb) { if (test_bit(QLCNIC_DCB_STATE, &dcb->state)) dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops; } static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter) { if (qlcnic_82xx_check(adapter)) adapter->dcb->ops = &qlcnic_82xx_dcb_ops; else if (qlcnic_83xx_check(adapter)) adapter->dcb->ops = &qlcnic_83xx_dcb_ops; } int qlcnic_register_dcb(struct qlcnic_adapter *adapter) { struct qlcnic_dcb *dcb; if (qlcnic_sriov_vf_check(adapter)) return 0; dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC); if (!dcb) return -ENOMEM; adapter->dcb = dcb; dcb->adapter = adapter; qlcnic_set_dcb_ops(adapter); dcb->state = 0; return 0; } static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb) { struct qlcnic_adapter *adapter; if (!dcb) return; adapter = dcb->adapter; while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state)) usleep_range(10000, 11000); cancel_delayed_work_sync(&dcb->aen_work); if (dcb->wq) { destroy_workqueue(dcb->wq); dcb->wq = NULL; } kfree(dcb->cfg); dcb->cfg = NULL; kfree(dcb->param); dcb->param = NULL; kfree(dcb); adapter->dcb = NULL; } static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb) { qlcnic_dcb_get_hw_capability(dcb); qlcnic_dcb_get_cee_cfg(dcb); } static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb) { int err = 0; INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work); dcb->wq = create_singlethread_workqueue("qlcnic-dcb"); if (!dcb->wq) { dev_err(&dcb->adapter->pdev->dev, "DCB workqueue allocation failed. DCB will be disabled\n"); return -1; } dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC); if (!dcb->cfg) { err = -ENOMEM; goto out_free_wq; } dcb->param = kzalloc(sizeof(struct qlcnic_dcb_mbx_params), GFP_ATOMIC); if (!dcb->param) { err = -ENOMEM; goto out_free_cfg; } return 0; out_free_cfg: kfree(dcb->cfg); dcb->cfg = NULL; out_free_wq: destroy_workqueue(dcb->wq); dcb->wq = NULL; return err; } static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf) { struct qlcnic_adapter *adapter = dcb->adapter; struct qlcnic_cmd_args cmd; u32 mbx_out; int err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_CAP); if (err) return err; err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_err(&adapter->pdev->dev, "Failed to query DCBX capability, err %d\n", err); } else { mbx_out = cmd.rsp.arg[1]; if (buf) memcpy(buf, &mbx_out, sizeof(u32)); } qlcnic_free_mbx_args(&cmd); return err; } static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val) { struct qlcnic_dcb_capability *cap = &dcb->cfg->capability; u32 mbx_out; int err; memset(cap, 0, sizeof(struct qlcnic_dcb_capability)); err = qlcnic_dcb_query_hw_capability(dcb, (char *)val); if (err) return err; mbx_out = *val; if (QLC_DCB_TSA_SUPPORT(mbx_out)) cap->tsa_capability = true; if (QLC_DCB_ETS_SUPPORT(mbx_out)) cap->ets_capability = true; cap->max_num_tc = QLC_DCB_MAX_NUM_TC(mbx_out); cap->max_ets_tc = QLC_DCB_MAX_NUM_ETS_TC(mbx_out); cap->max_pfc_tc = QLC_DCB_MAX_NUM_PFC_TC(mbx_out); if (cap->max_num_tc > QLC_DCB_MAX_TC || cap->max_ets_tc > cap->max_num_tc || cap->max_pfc_tc > cap->max_num_tc) { dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n"); return -EINVAL; } return err; } static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb) { struct qlcnic_dcb_cfg *cfg = dcb->cfg; struct qlcnic_dcb_capability *cap; u32 mbx_out; int err; err = __qlcnic_dcb_get_capability(dcb, &mbx_out); if (err) return err; cap = &cfg->capability; cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED; if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability) set_bit(QLCNIC_DCB_STATE, &dcb->state); return err; } static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type) { u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le); struct qlcnic_adapter *adapter = dcb->adapter; struct qlcnic_82xx_dcb_param_mbx_le *prsp_le; struct device *dev = &adapter->pdev->dev; dma_addr_t cardrsp_phys_addr; struct qlcnic_dcb_param rsp; struct qlcnic_cmd_args cmd; u64 phys_addr; void *addr; int err, i; switch (type) { case QLC_DCB_LOCAL_PARAM_FWID: case QLC_DCB_OPER_PARAM_FWID: case QLC_DCB_PEER_PARAM_FWID: break; default: dev_err(dev, "Invalid parameter type %d\n", type); return -EINVAL; } addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL); if (addr == NULL) return -ENOMEM; prsp_le = addr; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM); if (err) goto out_free_rsp; phys_addr = cardrsp_phys_addr; cmd.req.arg[1] = size | (type << 16); cmd.req.arg[2] = MSD(phys_addr); cmd.req.arg[3] = LSD(phys_addr); err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_err(dev, "Failed to query DCBX parameter, err %d\n", err); goto out; } memset(&rsp, 0, sizeof(struct qlcnic_dcb_param)); rsp.hdr_prio_pfc_map[0] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[0]); rsp.hdr_prio_pfc_map[1] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[1]); rsp.prio_pg_map[0] = le32_to_cpu(prsp_le->prio_pg_map[0]); rsp.prio_pg_map[1] = le32_to_cpu(prsp_le->prio_pg_map[1]); rsp.pg_bw_map[0] = le32_to_cpu(prsp_le->pg_bw_map[0]); rsp.pg_bw_map[1] = le32_to_cpu(prsp_le->pg_bw_map[1]); rsp.pg_tsa_map[0] = le32_to_cpu(prsp_le->pg_tsa_map[0]); rsp.pg_tsa_map[1] = le32_to_cpu(prsp_le->pg_tsa_map[1]); for (i = 0; i < QLC_DCB_MAX_APP; i++) rsp.app[i] = le32_to_cpu(prsp_le->app[i]); if (buf) memcpy(buf, &rsp, size); out: qlcnic_free_mbx_args(&cmd); out_free_rsp: dma_free_coherent(dev, size, addr, cardrsp_phys_addr); return err; } static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) { struct qlcnic_dcb_mbx_params *mbx; int err; mbx = dcb->param; if (!mbx) return 0; err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0], QLC_DCB_LOCAL_PARAM_FWID); if (err) return err; err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1], QLC_DCB_OPER_PARAM_FWID); if (err) return err; err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2], QLC_DCB_PEER_PARAM_FWID); if (err) return err; mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP; qlcnic_dcb_data_cee_param_map(dcb->adapter); return err; } static void qlcnic_dcb_aen_work(struct work_struct *work) { struct qlcnic_dcb *dcb; dcb = container_of(work, struct qlcnic_dcb, aen_work.work); qlcnic_dcb_get_cee_cfg(dcb); clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state); } static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data) { if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state)) return; queue_delayed_work(dcb->wq, &dcb->aen_work, 0); } static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb) { struct qlcnic_dcb_capability *cap = &dcb->cfg->capability; u32 mbx_out; int err; err = __qlcnic_dcb_get_capability(dcb, &mbx_out); if (err) return err; if (mbx_out & BIT_2) cap->dcb_capability = DCB_CAP_DCBX_VER_CEE; if (mbx_out & BIT_3) cap->dcb_capability |= DCB_CAP_DCBX_VER_IEEE; if (cap->dcb_capability) cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED; if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability) set_bit(QLCNIC_DCB_STATE, &dcb->state); return err; } static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 idx) { struct qlcnic_adapter *adapter = dcb->adapter; struct qlcnic_dcb_mbx_params mbx_out; int err, i, j, k, max_app, size; struct qlcnic_dcb_param *each; struct qlcnic_cmd_args cmd; u32 val; char *p; size = 0; memset(&mbx_out, 0, sizeof(struct qlcnic_dcb_mbx_params)); memset(buf, 0, sizeof(struct qlcnic_dcb_mbx_params)); err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM); if (err) return err; cmd.req.arg[0] |= QLC_DCB_FW_VER << 29; err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_err(&adapter->pdev->dev, "Failed to query DCBX param, err %d\n", err); goto out; } mbx_out.prio_tc_map = cmd.rsp.arg[1]; p = memcpy(buf, &mbx_out, sizeof(u32)); k = 2; p += sizeof(u32); for (j = 0; j < QLC_DCB_NUM_PARAM; j++) { each = &mbx_out.type[j]; each->hdr_prio_pfc_map[0] = cmd.rsp.arg[k++]; each->hdr_prio_pfc_map[1] = cmd.rsp.arg[k++]; each->prio_pg_map[0] = cmd.rsp.arg[k++]; each->prio_pg_map[1] = cmd.rsp.arg[k++]; each->pg_bw_map[0] = cmd.rsp.arg[k++]; each->pg_bw_map[1] = cmd.rsp.arg[k++]; each->pg_tsa_map[0] = cmd.rsp.arg[k++]; each->pg_tsa_map[1] = cmd.rsp.arg[k++]; val = each->hdr_prio_pfc_map[0]; max_app = qlcnic_dcb_get_num_app(adapter, val); for (i = 0; i < max_app; i++) each->app[i] = cmd.rsp.arg[i + k]; size = 16 * sizeof(u32); memcpy(p, &each->hdr_prio_pfc_map[0], size); p += size; if (j == 0) k = 18; else k = 34; } out: qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) { int err; err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0); if (err) return err; qlcnic_dcb_data_cee_param_map(dcb->adapter); return err; } static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data) { u32 *val = data; if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state)) return; if (*val & BIT_8) set_bit(QLCNIC_DCB_STATE, &dcb->state); else clear_bit(QLCNIC_DCB_STATE, &dcb->state); queue_delayed_work(dcb->wq, &dcb->aen_work, 0); } static void qlcnic_dcb_fill_cee_tc_params(struct qlcnic_dcb_mbx_params *mbx, struct qlcnic_dcb_param *each, struct qlcnic_dcb_cee *type) { struct qlcnic_dcb_tc_cfg *tc_cfg; u8 i, tc, pgid; for (i = 0; i < QLC_DCB_MAX_PRIO; i++) { tc = QLC_DCB_GET_TC_PRIO(mbx->prio_tc_map, i); tc_cfg = &type->tc_cfg[tc]; tc_cfg->valid = true; tc_cfg->up_tc_map |= QLC_DCB_GET_MAP(i); if (QLC_DCB_GET_PFC_PRIO(each->hdr_prio_pfc_map[1], i) && type->pfc_mode_enable) { tc_cfg->prio_cfg[i].valid = true; tc_cfg->prio_cfg[i].pfc_type = QLC_PFC_FULL; } if (i < 4) pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[0], i); else pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[1], i); tc_cfg->pgid = pgid; tc_cfg->prio_type = QLC_PRIO_LINK; type->pg_cfg[tc_cfg->pgid].prio_count++; } } static void qlcnic_dcb_fill_cee_pg_params(struct qlcnic_dcb_param *each, struct qlcnic_dcb_cee *type) { struct qlcnic_dcb_pg_cfg *pg_cfg; u8 i, tsa, bw_per; for (i = 0; i < QLC_DCB_MAX_PG; i++) { pg_cfg = &type->pg_cfg[i]; pg_cfg->valid = true; if (i < 4) { bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[0], i); tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[0], i); } else { bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[1], i); tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[1], i); } pg_cfg->total_bw_percent = bw_per; pg_cfg->tsa_type = tsa; } } static void qlcnic_dcb_fill_cee_app_params(struct qlcnic_adapter *adapter, u8 idx, struct qlcnic_dcb_param *each, struct qlcnic_dcb_cee *type) { struct qlcnic_dcb_app *app; u8 i, num_app, map, cnt; struct dcb_app new_app; num_app = qlcnic_dcb_get_num_app(adapter, each->hdr_prio_pfc_map[0]); for (i = 0; i < num_app; i++) { app = &type->app[i]; app->valid = true; /* Only for CEE (-1) */ app->selector = QLC_DCB_GET_SELECTOR_APP(each->app[i]) - 1; new_app.selector = app->selector; app->protocol = QLC_DCB_GET_PROTO_ID_APP(each->app[i]); new_app.protocol = app->protocol; map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]); cnt = qlcnic_dcb_prio_count(map); if (cnt >= QLC_DCB_MAX_TC) cnt = 0; app->priority = cnt; new_app.priority = cnt; if (idx == QLC_DCB_OPER_IDX && adapter->netdev->dcbnl_ops) dcb_setapp(adapter->netdev, &new_app); } } static void qlcnic_dcb_map_cee_params(struct qlcnic_adapter *adapter, u8 idx) { struct qlcnic_dcb_mbx_params *mbx = adapter->dcb->param; struct qlcnic_dcb_param *each = &mbx->type[idx]; struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; struct qlcnic_dcb_cee *type = &cfg->type[idx]; type->tc_param_valid = false; type->pfc_mode_enable = false; memset(type->tc_cfg, 0, sizeof(struct qlcnic_dcb_tc_cfg) * QLC_DCB_MAX_TC); memset(type->pg_cfg, 0, sizeof(struct qlcnic_dcb_pg_cfg) * QLC_DCB_MAX_TC); if (qlcnic_dcb_pfc_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) && cfg->capability.max_pfc_tc) type->pfc_mode_enable = true; if (qlcnic_dcb_tsa_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) && cfg->capability.max_ets_tc) type->tc_param_valid = true; qlcnic_dcb_fill_cee_tc_params(mbx, each, type); qlcnic_dcb_fill_cee_pg_params(each, type); qlcnic_dcb_fill_cee_app_params(adapter, idx, each, type); } static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *adapter) { int i; for (i = 0; i < QLC_DCB_NUM_PARAM; i++) qlcnic_dcb_map_cee_params(adapter, i); dcbnl_cee_notify(adapter->netdev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); } static u8 qlcnic_dcb_get_state(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state); } static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr) { memcpy(addr, netdev->perm_addr, netdev->addr_len); } static void qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio, u8 *pgid, u8 *bw_per, u8 *up_tc_map) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_tc_cfg *tc_cfg, *temp; struct qlcnic_dcb_cee *type; u8 i, cnt, pg; type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; *prio = *pgid = *bw_per = *up_tc_map = 0; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) || !type->tc_param_valid) return; if (tc < 0 || (tc >= QLC_DCB_MAX_TC)) return; tc_cfg = &type->tc_cfg[tc]; if (!tc_cfg->valid) return; *pgid = tc_cfg->pgid; *prio = tc_cfg->prio_type; *up_tc_map = tc_cfg->up_tc_map; pg = *pgid; for (i = 0, cnt = 0; i < QLC_DCB_MAX_TC; i++) { temp = &type->tc_cfg[i]; if (temp->valid && (pg == temp->pgid)) cnt++; } tc_cfg->bwg_percent = (100 / cnt); *bw_per = tc_cfg->bwg_percent; } static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_pg_cfg *pgcfg; struct qlcnic_dcb_cee *type; *bw_pct = 0; type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) || !type->tc_param_valid) return; if (pgid < 0 || pgid >= QLC_DCB_MAX_PG) return; pgcfg = &type->pg_cfg[pgid]; if (!pgcfg->valid) return; *bw_pct = pgcfg->total_bw_percent; } static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_tc_cfg *tc_cfg; u8 val = QLC_DCB_GET_MAP(prio); struct qlcnic_dcb_cee *type; u8 i; *setting = 0; type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) || !type->pfc_mode_enable) return; for (i = 0; i < QLC_DCB_MAX_TC; i++) { tc_cfg = &type->tc_cfg[i]; if (!tc_cfg->valid) continue; if ((val & tc_cfg->up_tc_map) && (tc_cfg->prio_cfg[prio].valid)) *setting = tc_cfg->prio_cfg[prio].pfc_type; } } static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid, u8 *cap) { struct qlcnic_adapter *adapter = netdev_priv(netdev); if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; switch (capid) { case DCB_CAP_ATTR_PG: case DCB_CAP_ATTR_UP2TC: case DCB_CAP_ATTR_PFC: case DCB_CAP_ATTR_GSP: *cap = true; break; case DCB_CAP_ATTR_PG_TCS: case DCB_CAP_ATTR_PFC_TCS: *cap = 0x80; /* 8 priorities for PGs */ break; case DCB_CAP_ATTR_DCBX: *cap = adapter->dcb->cfg->capability.dcb_capability; break; default: *cap = false; } return 0; } static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return -EINVAL; switch (attr) { case DCB_NUMTCS_ATTR_PG: *num = cfg->capability.max_ets_tc; return 0; case DCB_NUMTCS_ATTR_PFC: *num = cfg->capability.max_pfc_tc; return 0; default: return -EINVAL; } } static int qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct dcb_app app = { .selector = idtype, .protocol = id, }; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return -EINVAL; return dcb_getapp(netdev, &app); } static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb *dcb = adapter->dcb; if (!test_bit(QLCNIC_DCB_STATE, &dcb->state)) return 0; return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable; } static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; return cfg->capability.dcb_capability; } static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cee *type; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 1; type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; *flag = 0; switch (fid) { case DCB_FEATCFG_ATTR_PG: if (type->tc_param_valid) *flag |= DCB_FEATCFG_ENABLE; else *flag |= DCB_FEATCFG_ERROR; break; case DCB_FEATCFG_ATTR_PFC: if (type->pfc_mode_enable) { if (type->tc_cfg[0].prio_cfg[0].pfc_type) *flag |= DCB_FEATCFG_ENABLE; } else { *flag |= DCB_FEATCFG_ERROR; } break; case DCB_FEATCFG_ATTR_APP: *flag |= DCB_FEATCFG_ENABLE; break; default: netdev_err(netdev, "Invalid Feature ID %d\n", fid); return 1; } return 0; } static inline void qlcnic_dcb_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, u8 *prio_type, u8 *pgid, u8 *bw_pct, u8 *up_map) { *prio_type = *pgid = *bw_pct = *up_map = 0; } static inline void qlcnic_dcb_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct) { *bw_pct = 0; } static int qlcnic_dcb_peer_app_info(struct net_device *netdev, struct dcb_peer_app_info *info, u16 *app_count) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cee *peer; int i; memset(info, 0, sizeof(*info)); *app_count = 0; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; for (i = 0; i < QLC_DCB_MAX_APP; i++) { if (peer->app[i].valid) (*app_count)++; } return 0; } static int qlcnic_dcb_peer_app_table(struct net_device *netdev, struct dcb_app *table) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cee *peer; struct qlcnic_dcb_app *app; int i, j; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; for (i = 0, j = 0; i < QLC_DCB_MAX_APP; i++) { app = &peer->app[i]; if (!app->valid) continue; table[j].selector = app->selector; table[j].priority = app->priority; table[j++].protocol = app->protocol; } return 0; } static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev, struct cee_pg *pg) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cee *peer; u8 i, j, k, map; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; for (i = 0, j = 0; i < QLC_DCB_MAX_PG; i++) { if (!peer->pg_cfg[i].valid) continue; pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent; for (k = 0; k < QLC_DCB_MAX_TC; k++) { if (peer->tc_cfg[i].valid && (peer->tc_cfg[i].pgid == i)) { map = peer->tc_cfg[i].up_tc_map; pg->prio_pg[j++] = map; break; } } } return 0; } static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev, struct cee_pfc *pfc) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; struct qlcnic_dcb_tc_cfg *tc; struct qlcnic_dcb_cee *peer; u8 i, setting, prio; pfc->pfc_en = 0; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; peer = &cfg->type[QLC_DCB_PEER_IDX]; for (i = 0; i < QLC_DCB_MAX_TC; i++) { tc = &peer->tc_cfg[i]; prio = qlcnic_dcb_prio_count(tc->up_tc_map); setting = 0; qlcnic_dcb_get_pfc_cfg(netdev, prio, &setting); if (setting) pfc->pfc_en |= QLC_DCB_GET_MAP(i); } pfc->tcs_supported = cfg->capability.max_pfc_tc; return 0; } static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops = { .getstate = qlcnic_dcb_get_state, .getpermhwaddr = qlcnic_dcb_get_perm_hw_addr, .getpgtccfgtx = qlcnic_dcb_get_pg_tc_cfg_tx, .getpgbwgcfgtx = qlcnic_dcb_get_pg_bwg_cfg_tx, .getpfccfg = qlcnic_dcb_get_pfc_cfg, .getcap = qlcnic_dcb_get_capability, .getnumtcs = qlcnic_dcb_get_num_tcs, .getapp = qlcnic_dcb_get_app, .getpfcstate = qlcnic_dcb_get_pfc_state, .getdcbx = qlcnic_dcb_get_dcbx, .getfeatcfg = qlcnic_dcb_get_feat_cfg, .getpgtccfgrx = qlcnic_dcb_get_pg_tc_cfg_rx, .getpgbwgcfgrx = qlcnic_dcb_get_pg_bwg_cfg_rx, .peer_getappinfo = qlcnic_dcb_peer_app_info, .peer_getapptable = qlcnic_dcb_peer_app_table, .cee_peer_getpg = qlcnic_dcb_cee_peer_get_pg, .cee_peer_getpfc = qlcnic_dcb_cee_peer_get_pfc, };
gpl-2.0
Stane1983/amlogic-m6_m8
net/ceph/osdmap.c
1644
29162
#include <linux/ceph/ceph_debug.h> #include <linux/module.h> #include <linux/slab.h> #include <asm/div64.h> #include <linux/ceph/libceph.h> #include <linux/ceph/osdmap.h> #include <linux/ceph/decode.h> #include <linux/crush/hash.h> #include <linux/crush/mapper.h> char *ceph_osdmap_state_str(char *str, int len, int state) { if (!len) return str; if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP)) snprintf(str, len, "exists, up"); else if (state & CEPH_OSD_EXISTS) snprintf(str, len, "exists"); else if (state & CEPH_OSD_UP) snprintf(str, len, "up"); else snprintf(str, len, "doesn't exist"); return str; } /* maps */ static int calc_bits_of(unsigned int t) { int b = 0; while (t) { t = t >> 1; b++; } return b; } /* * the foo_mask is the smallest value 2^n-1 that is >= foo. */ static void calc_pg_masks(struct ceph_pg_pool_info *pi) { pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1; pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1; } /* * decode crush map */ static int crush_decode_uniform_bucket(void **p, void *end, struct crush_bucket_uniform *b) { dout("crush_decode_uniform_bucket %p to %p\n", *p, end); ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); b->item_weight = ceph_decode_32(p); return 0; bad: return -EINVAL; } static int crush_decode_list_bucket(void **p, void *end, struct crush_bucket_list *b) { int j; dout("crush_decode_list_bucket %p to %p\n", *p, end); b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); if (b->item_weights == NULL) return -ENOMEM; b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); if (b->sum_weights == NULL) return -ENOMEM; ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); for (j = 0; j < b->h.size; j++) { b->item_weights[j] = ceph_decode_32(p); b->sum_weights[j] = ceph_decode_32(p); } return 0; bad: return -EINVAL; } static int crush_decode_tree_bucket(void **p, void *end, struct crush_bucket_tree *b) { int j; dout("crush_decode_tree_bucket %p to %p\n", *p, end); ceph_decode_32_safe(p, end, b->num_nodes, bad); b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); if (b->node_weights == NULL) return -ENOMEM; ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); for (j = 0; j < b->num_nodes; j++) b->node_weights[j] = ceph_decode_32(p); return 0; bad: return -EINVAL; } static int crush_decode_straw_bucket(void **p, void *end, struct crush_bucket_straw *b) { int j; dout("crush_decode_straw_bucket %p to %p\n", *p, end); b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); if (b->item_weights == NULL) return -ENOMEM; b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); if (b->straws == NULL) return -ENOMEM; ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); for (j = 0; j < b->h.size; j++) { b->item_weights[j] = ceph_decode_32(p); b->straws[j] = ceph_decode_32(p); } return 0; bad: return -EINVAL; } static int skip_name_map(void **p, void *end) { int len; ceph_decode_32_safe(p, end, len ,bad); while (len--) { int strlen; *p += sizeof(u32); ceph_decode_32_safe(p, end, strlen, bad); *p += strlen; } return 0; bad: return -EINVAL; } static struct crush_map *crush_decode(void *pbyval, void *end) { struct crush_map *c; int err = -EINVAL; int i, j; void **p = &pbyval; void *start = pbyval; u32 magic; u32 num_name_maps; dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); c = kzalloc(sizeof(*c), GFP_NOFS); if (c == NULL) return ERR_PTR(-ENOMEM); /* set tunables to default values */ c->choose_local_tries = 2; c->choose_local_fallback_tries = 5; c->choose_total_tries = 19; c->chooseleaf_descend_once = 0; ceph_decode_need(p, end, 4*sizeof(u32), bad); magic = ceph_decode_32(p); if (magic != CRUSH_MAGIC) { pr_err("crush_decode magic %x != current %x\n", (unsigned int)magic, (unsigned int)CRUSH_MAGIC); goto bad; } c->max_buckets = ceph_decode_32(p); c->max_rules = ceph_decode_32(p); c->max_devices = ceph_decode_32(p); c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); if (c->buckets == NULL) goto badmem; c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); if (c->rules == NULL) goto badmem; /* buckets */ for (i = 0; i < c->max_buckets; i++) { int size = 0; u32 alg; struct crush_bucket *b; ceph_decode_32_safe(p, end, alg, bad); if (alg == 0) { c->buckets[i] = NULL; continue; } dout("crush_decode bucket %d off %x %p to %p\n", i, (int)(*p-start), *p, end); switch (alg) { case CRUSH_BUCKET_UNIFORM: size = sizeof(struct crush_bucket_uniform); break; case CRUSH_BUCKET_LIST: size = sizeof(struct crush_bucket_list); break; case CRUSH_BUCKET_TREE: size = sizeof(struct crush_bucket_tree); break; case CRUSH_BUCKET_STRAW: size = sizeof(struct crush_bucket_straw); break; default: err = -EINVAL; goto bad; } BUG_ON(size == 0); b = c->buckets[i] = kzalloc(size, GFP_NOFS); if (b == NULL) goto badmem; ceph_decode_need(p, end, 4*sizeof(u32), bad); b->id = ceph_decode_32(p); b->type = ceph_decode_16(p); b->alg = ceph_decode_8(p); b->hash = ceph_decode_8(p); b->weight = ceph_decode_32(p); b->size = ceph_decode_32(p); dout("crush_decode bucket size %d off %x %p to %p\n", b->size, (int)(*p-start), *p, end); b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); if (b->items == NULL) goto badmem; b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS); if (b->perm == NULL) goto badmem; b->perm_n = 0; ceph_decode_need(p, end, b->size*sizeof(u32), bad); for (j = 0; j < b->size; j++) b->items[j] = ceph_decode_32(p); switch (b->alg) { case CRUSH_BUCKET_UNIFORM: err = crush_decode_uniform_bucket(p, end, (struct crush_bucket_uniform *)b); if (err < 0) goto bad; break; case CRUSH_BUCKET_LIST: err = crush_decode_list_bucket(p, end, (struct crush_bucket_list *)b); if (err < 0) goto bad; break; case CRUSH_BUCKET_TREE: err = crush_decode_tree_bucket(p, end, (struct crush_bucket_tree *)b); if (err < 0) goto bad; break; case CRUSH_BUCKET_STRAW: err = crush_decode_straw_bucket(p, end, (struct crush_bucket_straw *)b); if (err < 0) goto bad; break; } } /* rules */ dout("rule vec is %p\n", c->rules); for (i = 0; i < c->max_rules; i++) { u32 yes; struct crush_rule *r; ceph_decode_32_safe(p, end, yes, bad); if (!yes) { dout("crush_decode NO rule %d off %x %p to %p\n", i, (int)(*p-start), *p, end); c->rules[i] = NULL; continue; } dout("crush_decode rule %d off %x %p to %p\n", i, (int)(*p-start), *p, end); /* len */ ceph_decode_32_safe(p, end, yes, bad); #if BITS_PER_LONG == 32 err = -EINVAL; if (yes > (ULONG_MAX - sizeof(*r)) / sizeof(struct crush_rule_step)) goto bad; #endif r = c->rules[i] = kmalloc(sizeof(*r) + yes*sizeof(struct crush_rule_step), GFP_NOFS); if (r == NULL) goto badmem; dout(" rule %d is at %p\n", i, r); r->len = yes; ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); for (j = 0; j < r->len; j++) { r->steps[j].op = ceph_decode_32(p); r->steps[j].arg1 = ceph_decode_32(p); r->steps[j].arg2 = ceph_decode_32(p); } } /* ignore trailing name maps. */ for (num_name_maps = 0; num_name_maps < 3; num_name_maps++) { err = skip_name_map(p, end); if (err < 0) goto done; } /* tunables */ ceph_decode_need(p, end, 3*sizeof(u32), done); c->choose_local_tries = ceph_decode_32(p); c->choose_local_fallback_tries = ceph_decode_32(p); c->choose_total_tries = ceph_decode_32(p); dout("crush decode tunable choose_local_tries = %d", c->choose_local_tries); dout("crush decode tunable choose_local_fallback_tries = %d", c->choose_local_fallback_tries); dout("crush decode tunable choose_total_tries = %d", c->choose_total_tries); ceph_decode_need(p, end, sizeof(u32), done); c->chooseleaf_descend_once = ceph_decode_32(p); dout("crush decode tunable chooseleaf_descend_once = %d", c->chooseleaf_descend_once); done: dout("crush_decode success\n"); return c; badmem: err = -ENOMEM; bad: dout("crush_decode fail %d\n", err); crush_destroy(c); return ERR_PTR(err); } /* * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid * to a set of osds) */ static int pgid_cmp(struct ceph_pg l, struct ceph_pg r) { if (l.pool < r.pool) return -1; if (l.pool > r.pool) return 1; if (l.seed < r.seed) return -1; if (l.seed > r.seed) return 1; return 0; } static int __insert_pg_mapping(struct ceph_pg_mapping *new, struct rb_root *root) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct ceph_pg_mapping *pg = NULL; int c; dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new); while (*p) { parent = *p; pg = rb_entry(parent, struct ceph_pg_mapping, node); c = pgid_cmp(new->pgid, pg->pgid); if (c < 0) p = &(*p)->rb_left; else if (c > 0) p = &(*p)->rb_right; else return -EEXIST; } rb_link_node(&new->node, parent, p); rb_insert_color(&new->node, root); return 0; } static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root, struct ceph_pg pgid) { struct rb_node *n = root->rb_node; struct ceph_pg_mapping *pg; int c; while (n) { pg = rb_entry(n, struct ceph_pg_mapping, node); c = pgid_cmp(pgid, pg->pgid); if (c < 0) { n = n->rb_left; } else if (c > 0) { n = n->rb_right; } else { dout("__lookup_pg_mapping %lld.%x got %p\n", pgid.pool, pgid.seed, pg); return pg; } } return NULL; } static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid) { struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid); if (pg) { dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed, pg); rb_erase(&pg->node, root); kfree(pg); return 0; } dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed); return -ENOENT; } /* * rbtree of pg pool info */ static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct ceph_pg_pool_info *pi = NULL; while (*p) { parent = *p; pi = rb_entry(parent, struct ceph_pg_pool_info, node); if (new->id < pi->id) p = &(*p)->rb_left; else if (new->id > pi->id) p = &(*p)->rb_right; else return -EEXIST; } rb_link_node(&new->node, parent, p); rb_insert_color(&new->node, root); return 0; } static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id) { struct ceph_pg_pool_info *pi; struct rb_node *n = root->rb_node; while (n) { pi = rb_entry(n, struct ceph_pg_pool_info, node); if (id < pi->id) n = n->rb_left; else if (id > pi->id) n = n->rb_right; else return pi; } return NULL; } const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id) { struct ceph_pg_pool_info *pi; if (id == CEPH_NOPOOL) return NULL; if (WARN_ON_ONCE(id > (u64) INT_MAX)) return NULL; pi = __lookup_pg_pool(&map->pg_pools, (int) id); return pi ? pi->name : NULL; } EXPORT_SYMBOL(ceph_pg_pool_name_by_id); int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) { struct rb_node *rbp; for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { struct ceph_pg_pool_info *pi = rb_entry(rbp, struct ceph_pg_pool_info, node); if (pi->name && strcmp(pi->name, name) == 0) return pi->id; } return -ENOENT; } EXPORT_SYMBOL(ceph_pg_poolid_by_name); static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) { rb_erase(&pi->node, root); kfree(pi->name); kfree(pi); } static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) { u8 ev, cv; unsigned len, num; void *pool_end; ceph_decode_need(p, end, 2 + 4, bad); ev = ceph_decode_8(p); /* encoding version */ cv = ceph_decode_8(p); /* compat version */ if (ev < 5) { pr_warning("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv); return -EINVAL; } if (cv > 7) { pr_warning("got v %d cv %d > 7 of ceph_pg_pool\n", ev, cv); return -EINVAL; } len = ceph_decode_32(p); ceph_decode_need(p, end, len, bad); pool_end = *p + len; pi->type = ceph_decode_8(p); pi->size = ceph_decode_8(p); pi->crush_ruleset = ceph_decode_8(p); pi->object_hash = ceph_decode_8(p); pi->pg_num = ceph_decode_32(p); pi->pgp_num = ceph_decode_32(p); *p += 4 + 4; /* skip lpg* */ *p += 4; /* skip last_change */ *p += 8 + 4; /* skip snap_seq, snap_epoch */ /* skip snaps */ num = ceph_decode_32(p); while (num--) { *p += 8; /* snapid key */ *p += 1 + 1; /* versions */ len = ceph_decode_32(p); *p += len; } /* skip removed snaps */ num = ceph_decode_32(p); *p += num * (8 + 8); *p += 8; /* skip auid */ pi->flags = ceph_decode_64(p); /* ignore the rest */ *p = pool_end; calc_pg_masks(pi); return 0; bad: return -EINVAL; } static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map) { struct ceph_pg_pool_info *pi; u32 num, len; u64 pool; ceph_decode_32_safe(p, end, num, bad); dout(" %d pool names\n", num); while (num--) { ceph_decode_64_safe(p, end, pool, bad); ceph_decode_32_safe(p, end, len, bad); dout(" pool %llu len %d\n", pool, len); ceph_decode_need(p, end, len, bad); pi = __lookup_pg_pool(&map->pg_pools, pool); if (pi) { char *name = kstrndup(*p, len, GFP_NOFS); if (!name) return -ENOMEM; kfree(pi->name); pi->name = name; dout(" name is %s\n", pi->name); } *p += len; } return 0; bad: return -EINVAL; } /* * osd map */ void ceph_osdmap_destroy(struct ceph_osdmap *map) { dout("osdmap_destroy %p\n", map); if (map->crush) crush_destroy(map->crush); while (!RB_EMPTY_ROOT(&map->pg_temp)) { struct ceph_pg_mapping *pg = rb_entry(rb_first(&map->pg_temp), struct ceph_pg_mapping, node); rb_erase(&pg->node, &map->pg_temp); kfree(pg); } while (!RB_EMPTY_ROOT(&map->pg_pools)) { struct ceph_pg_pool_info *pi = rb_entry(rb_first(&map->pg_pools), struct ceph_pg_pool_info, node); __remove_pg_pool(&map->pg_pools, pi); } kfree(map->osd_state); kfree(map->osd_weight); kfree(map->osd_addr); kfree(map); } /* * adjust max osd value. reallocate arrays. */ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) { u8 *state; struct ceph_entity_addr *addr; u32 *weight; state = kcalloc(max, sizeof(*state), GFP_NOFS); addr = kcalloc(max, sizeof(*addr), GFP_NOFS); weight = kcalloc(max, sizeof(*weight), GFP_NOFS); if (state == NULL || addr == NULL || weight == NULL) { kfree(state); kfree(addr); kfree(weight); return -ENOMEM; } /* copy old? */ if (map->osd_state) { memcpy(state, map->osd_state, map->max_osd*sizeof(*state)); memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr)); memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight)); kfree(map->osd_state); kfree(map->osd_addr); kfree(map->osd_weight); } map->osd_state = state; map->osd_weight = weight; map->osd_addr = addr; map->max_osd = max; return 0; } /* * decode a full map. */ struct ceph_osdmap *osdmap_decode(void **p, void *end) { struct ceph_osdmap *map; u16 version; u32 len, max, i; int err = -EINVAL; void *start = *p; struct ceph_pg_pool_info *pi; dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p)); map = kzalloc(sizeof(*map), GFP_NOFS); if (map == NULL) return ERR_PTR(-ENOMEM); map->pg_temp = RB_ROOT; ceph_decode_16_safe(p, end, version, bad); if (version > 6) { pr_warning("got unknown v %d > 6 of osdmap\n", version); goto bad; } if (version < 6) { pr_warning("got old v %d < 6 of osdmap\n", version); goto bad; } ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad); ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); map->epoch = ceph_decode_32(p); ceph_decode_copy(p, &map->created, sizeof(map->created)); ceph_decode_copy(p, &map->modified, sizeof(map->modified)); ceph_decode_32_safe(p, end, max, bad); while (max--) { ceph_decode_need(p, end, 8 + 2, bad); err = -ENOMEM; pi = kzalloc(sizeof(*pi), GFP_NOFS); if (!pi) goto bad; pi->id = ceph_decode_64(p); err = __decode_pool(p, end, pi); if (err < 0) { kfree(pi); goto bad; } __insert_pg_pool(&map->pg_pools, pi); } err = __decode_pool_names(p, end, map); if (err < 0) { dout("fail to decode pool names"); goto bad; } ceph_decode_32_safe(p, end, map->pool_max, bad); ceph_decode_32_safe(p, end, map->flags, bad); max = ceph_decode_32(p); /* (re)alloc osd arrays */ err = osdmap_set_max_osd(map, max); if (err < 0) goto bad; dout("osdmap_decode max_osd = %d\n", map->max_osd); /* osds */ err = -EINVAL; ceph_decode_need(p, end, 3*sizeof(u32) + map->max_osd*(1 + sizeof(*map->osd_weight) + sizeof(*map->osd_addr)), bad); *p += 4; /* skip length field (should match max) */ ceph_decode_copy(p, map->osd_state, map->max_osd); *p += 4; /* skip length field (should match max) */ for (i = 0; i < map->max_osd; i++) map->osd_weight[i] = ceph_decode_32(p); *p += 4; /* skip length field (should match max) */ ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); for (i = 0; i < map->max_osd; i++) ceph_decode_addr(&map->osd_addr[i]); /* pg_temp */ ceph_decode_32_safe(p, end, len, bad); for (i = 0; i < len; i++) { int n, j; struct ceph_pg pgid; struct ceph_pg_mapping *pg; err = ceph_decode_pgid(p, end, &pgid); if (err) goto bad; ceph_decode_need(p, end, sizeof(u32), bad); n = ceph_decode_32(p); err = -EINVAL; if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) goto bad; ceph_decode_need(p, end, n * sizeof(u32), bad); err = -ENOMEM; pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS); if (!pg) goto bad; pg->pgid = pgid; pg->len = n; for (j = 0; j < n; j++) pg->osds[j] = ceph_decode_32(p); err = __insert_pg_mapping(pg, &map->pg_temp); if (err) goto bad; dout(" added pg_temp %lld.%x len %d\n", pgid.pool, pgid.seed, len); } /* crush */ ceph_decode_32_safe(p, end, len, bad); dout("osdmap_decode crush len %d from off 0x%x\n", len, (int)(*p - start)); ceph_decode_need(p, end, len, bad); map->crush = crush_decode(*p, end); *p += len; if (IS_ERR(map->crush)) { err = PTR_ERR(map->crush); map->crush = NULL; goto bad; } /* ignore the rest of the map */ *p = end; dout("osdmap_decode done %p %p\n", *p, end); return map; bad: dout("osdmap_decode fail err %d\n", err); ceph_osdmap_destroy(map); return ERR_PTR(err); } /* * decode and apply an incremental map update. */ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, struct ceph_osdmap *map, struct ceph_messenger *msgr) { struct crush_map *newcrush = NULL; struct ceph_fsid fsid; u32 epoch = 0; struct ceph_timespec modified; s32 len; u64 pool; __s64 new_pool_max; __s32 new_flags, max; void *start = *p; int err = -EINVAL; u16 version; ceph_decode_16_safe(p, end, version, bad); if (version != 6) { pr_warning("got unknown v %d != 6 of inc osdmap\n", version); goto bad; } ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32), bad); ceph_decode_copy(p, &fsid, sizeof(fsid)); epoch = ceph_decode_32(p); BUG_ON(epoch != map->epoch+1); ceph_decode_copy(p, &modified, sizeof(modified)); new_pool_max = ceph_decode_64(p); new_flags = ceph_decode_32(p); /* full map? */ ceph_decode_32_safe(p, end, len, bad); if (len > 0) { dout("apply_incremental full map len %d, %p to %p\n", len, *p, end); return osdmap_decode(p, min(*p+len, end)); } /* new crush? */ ceph_decode_32_safe(p, end, len, bad); if (len > 0) { dout("apply_incremental new crush map len %d, %p to %p\n", len, *p, end); newcrush = crush_decode(*p, min(*p+len, end)); if (IS_ERR(newcrush)) return ERR_CAST(newcrush); *p += len; } /* new flags? */ if (new_flags >= 0) map->flags = new_flags; if (new_pool_max >= 0) map->pool_max = new_pool_max; ceph_decode_need(p, end, 5*sizeof(u32), bad); /* new max? */ max = ceph_decode_32(p); if (max >= 0) { err = osdmap_set_max_osd(map, max); if (err < 0) goto bad; } map->epoch++; map->modified = modified; if (newcrush) { if (map->crush) crush_destroy(map->crush); map->crush = newcrush; newcrush = NULL; } /* new_pool */ ceph_decode_32_safe(p, end, len, bad); while (len--) { struct ceph_pg_pool_info *pi; ceph_decode_64_safe(p, end, pool, bad); pi = __lookup_pg_pool(&map->pg_pools, pool); if (!pi) { pi = kzalloc(sizeof(*pi), GFP_NOFS); if (!pi) { err = -ENOMEM; goto bad; } pi->id = pool; __insert_pg_pool(&map->pg_pools, pi); } err = __decode_pool(p, end, pi); if (err < 0) goto bad; } if (version >= 5) { err = __decode_pool_names(p, end, map); if (err < 0) goto bad; } /* old_pool */ ceph_decode_32_safe(p, end, len, bad); while (len--) { struct ceph_pg_pool_info *pi; ceph_decode_64_safe(p, end, pool, bad); pi = __lookup_pg_pool(&map->pg_pools, pool); if (pi) __remove_pg_pool(&map->pg_pools, pi); } /* new_up */ err = -EINVAL; ceph_decode_32_safe(p, end, len, bad); while (len--) { u32 osd; struct ceph_entity_addr addr; ceph_decode_32_safe(p, end, osd, bad); ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad); ceph_decode_addr(&addr); pr_info("osd%d up\n", osd); BUG_ON(osd >= map->max_osd); map->osd_state[osd] |= CEPH_OSD_UP; map->osd_addr[osd] = addr; } /* new_state */ ceph_decode_32_safe(p, end, len, bad); while (len--) { u32 osd; u8 xorstate; ceph_decode_32_safe(p, end, osd, bad); xorstate = **(u8 **)p; (*p)++; /* clean flag */ if (xorstate == 0) xorstate = CEPH_OSD_UP; if (xorstate & CEPH_OSD_UP) pr_info("osd%d down\n", osd); if (osd < map->max_osd) map->osd_state[osd] ^= xorstate; } /* new_weight */ ceph_decode_32_safe(p, end, len, bad); while (len--) { u32 osd, off; ceph_decode_need(p, end, sizeof(u32)*2, bad); osd = ceph_decode_32(p); off = ceph_decode_32(p); pr_info("osd%d weight 0x%x %s\n", osd, off, off == CEPH_OSD_IN ? "(in)" : (off == CEPH_OSD_OUT ? "(out)" : "")); if (osd < map->max_osd) map->osd_weight[osd] = off; } /* new_pg_temp */ ceph_decode_32_safe(p, end, len, bad); while (len--) { struct ceph_pg_mapping *pg; int j; struct ceph_pg pgid; u32 pglen; err = ceph_decode_pgid(p, end, &pgid); if (err) goto bad; ceph_decode_need(p, end, sizeof(u32), bad); pglen = ceph_decode_32(p); if (pglen) { ceph_decode_need(p, end, pglen*sizeof(u32), bad); /* removing existing (if any) */ (void) __remove_pg_mapping(&map->pg_temp, pgid); /* insert */ err = -EINVAL; if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) goto bad; err = -ENOMEM; pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS); if (!pg) goto bad; pg->pgid = pgid; pg->len = pglen; for (j = 0; j < pglen; j++) pg->osds[j] = ceph_decode_32(p); err = __insert_pg_mapping(pg, &map->pg_temp); if (err) { kfree(pg); goto bad; } dout(" added pg_temp %lld.%x len %d\n", pgid.pool, pgid.seed, pglen); } else { /* remove */ __remove_pg_mapping(&map->pg_temp, pgid); } } /* ignore the rest */ *p = end; return map; bad: pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n", epoch, (int)(*p - start), *p, start, end); print_hex_dump(KERN_DEBUG, "osdmap: ", DUMP_PREFIX_OFFSET, 16, 1, start, end - start, true); if (newcrush) crush_destroy(newcrush); return ERR_PTR(err); } /* * calculate file layout from given offset, length. * fill in correct oid, logical length, and object extent * offset, length. * * for now, we write only a single su, until we can * pass a stride back to the caller. */ int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, u64 off, u64 len, u64 *ono, u64 *oxoff, u64 *oxlen) { u32 osize = le32_to_cpu(layout->fl_object_size); u32 su = le32_to_cpu(layout->fl_stripe_unit); u32 sc = le32_to_cpu(layout->fl_stripe_count); u32 bl, stripeno, stripepos, objsetno; u32 su_per_object; u64 t, su_offset; dout("mapping %llu~%llu osize %u fl_su %u\n", off, len, osize, su); if (su == 0 || sc == 0) goto invalid; su_per_object = osize / su; if (su_per_object == 0) goto invalid; dout("osize %u / su %u = su_per_object %u\n", osize, su, su_per_object); if ((su & ~PAGE_MASK) != 0) goto invalid; /* bl = *off / su; */ t = off; do_div(t, su); bl = t; dout("off %llu / su %u = bl %u\n", off, su, bl); stripeno = bl / sc; stripepos = bl % sc; objsetno = stripeno / su_per_object; *ono = objsetno * sc + stripepos; dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono); /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ t = off; su_offset = do_div(t, su); *oxoff = su_offset + (stripeno % su_per_object) * su; /* * Calculate the length of the extent being written to the selected * object. This is the minimum of the full length requested (len) or * the remainder of the current stripe being written to. */ *oxlen = min_t(u64, len, su - su_offset); dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); return 0; invalid: dout(" invalid layout\n"); *ono = 0; *oxoff = 0; *oxlen = 0; return -EINVAL; } EXPORT_SYMBOL(ceph_calc_file_object_mapping); /* * calculate an object layout (i.e. pgid) from an oid, * file_layout, and osdmap */ int ceph_calc_ceph_pg(struct ceph_pg *pg, const char *oid, struct ceph_osdmap *osdmap, uint64_t pool) { struct ceph_pg_pool_info *pool_info; BUG_ON(!osdmap); pool_info = __lookup_pg_pool(&osdmap->pg_pools, pool); if (!pool_info) return -EIO; pg->pool = pool; pg->seed = ceph_str_hash(pool_info->object_hash, oid, strlen(oid)); dout("%s '%s' pgid %lld.%x\n", __func__, oid, pg->pool, pg->seed); return 0; } EXPORT_SYMBOL(ceph_calc_ceph_pg); /* * Calculate raw osd vector for the given pgid. Return pointer to osd * array, or NULL on failure. */ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, int *osds, int *num) { struct ceph_pg_mapping *pg; struct ceph_pg_pool_info *pool; int ruleno; int r; u32 pps; pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool); if (!pool) return NULL; /* pg_temp? */ pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num, pool->pg_num_mask); pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); if (pg) { *num = pg->len; return pg->osds; } /* crush */ ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset, pool->type, pool->size); if (ruleno < 0) { pr_err("no crush rule pool %lld ruleset %d type %d size %d\n", pgid.pool, pool->crush_ruleset, pool->type, pool->size); return NULL; } if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) { /* hash pool id and seed sothat pool PGs do not overlap */ pps = crush_hash32_2(CRUSH_HASH_RJENKINS1, ceph_stable_mod(pgid.seed, pool->pgp_num, pool->pgp_num_mask), pgid.pool); } else { /* * legacy ehavior: add ps and pool together. this is * not a great approach because the PGs from each pool * will overlap on top of each other: 0.5 == 1.4 == * 2.3 == ... */ pps = ceph_stable_mod(pgid.seed, pool->pgp_num, pool->pgp_num_mask) + (unsigned)pgid.pool; } r = crush_do_rule(osdmap->crush, ruleno, pps, osds, min_t(int, pool->size, *num), osdmap->osd_weight); if (r < 0) { pr_err("error %d from crush rule: pool %lld ruleset %d type %d" " size %d\n", r, pgid.pool, pool->crush_ruleset, pool->type, pool->size); return NULL; } *num = r; return osds; } /* * Return acting set for given pgid. */ int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, int *acting) { int rawosds[CEPH_PG_MAX_SIZE], *osds; int i, o, num = CEPH_PG_MAX_SIZE; osds = calc_pg_raw(osdmap, pgid, rawosds, &num); if (!osds) return -1; /* primary is first up osd */ o = 0; for (i = 0; i < num; i++) if (ceph_osd_is_up(osdmap, osds[i])) acting[o++] = osds[i]; return o; } /* * Return primary osd for given pgid, or -1 if none. */ int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid) { int rawosds[CEPH_PG_MAX_SIZE], *osds; int i, num = CEPH_PG_MAX_SIZE; osds = calc_pg_raw(osdmap, pgid, rawosds, &num); if (!osds) return -1; /* primary is first up osd */ for (i = 0; i < num; i++) if (ceph_osd_is_up(osdmap, osds[i])) return osds[i]; return -1; } EXPORT_SYMBOL(ceph_calc_pg_primary);
gpl-2.0
avareldalton85/rpi2-linux-rt
arch/sh/kernel/process_32.c
1900
5768
/* * arch/sh/kernel/process.c * * This file handles the architecture-dependent parts of process handling.. * * Copyright (C) 1995 Linus Torvalds * * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC * Copyright (C) 2002 - 2008 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/elfcore.h> #include <linux/kallsyms.h> #include <linux/fs.h> #include <linux/ftrace.h> #include <linux/hw_breakpoint.h> #include <linux/prefetch.h> #include <linux/stackprotector.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> #include <asm/fpu.h> #include <asm/syscalls.h> #include <asm/switch_to.h> void show_regs(struct pt_regs * regs) { printk("\n"); show_regs_print_info(KERN_DEFAULT); print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("PR is at %s\n", regs->pr); printk("PC : %08lx SP : %08lx SR : %08lx ", regs->pc, regs->regs[15], regs->sr); #ifdef CONFIG_MMU printk("TEA : %08x\n", __raw_readl(MMU_TEA)); #else printk("\n"); #endif printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", regs->regs[0],regs->regs[1], regs->regs[2],regs->regs[3]); printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", regs->regs[4],regs->regs[5], regs->regs[6],regs->regs[7]); printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n", regs->regs[8],regs->regs[9], regs->regs[10],regs->regs[11]); printk("R12 : %08lx R13 : %08lx R14 : %08lx\n", regs->regs[12],regs->regs[13], regs->regs[14]); printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n", regs->mach, regs->macl, regs->gbr, regs->pr); show_trace(NULL, (unsigned long *)regs->regs[15], regs); show_code(regs); } void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp) { regs->pr = 0; regs->sr = SR_FD; regs->pc = new_pc; regs->regs[15] = new_sp; free_thread_xstate(current); } EXPORT_SYMBOL(start_thread); /* * Free current thread data structures etc.. */ void exit_thread(void) { } void flush_thread(void) { struct task_struct *tsk = current; flush_ptrace_hw_breakpoint(tsk); #if defined(CONFIG_SH_FPU) /* Forget lazy FPU state */ clear_fpu(tsk, task_pt_regs(tsk)); clear_used_math(); #endif } void release_thread(struct task_struct *dead_task) { /* do nothing */ } /* Fill in the fpu structure for a core dump.. */ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) { int fpvalid = 0; #if defined(CONFIG_SH_FPU) struct task_struct *tsk = current; fpvalid = !!tsk_used_math(tsk); if (fpvalid) fpvalid = !fpregs_get(tsk, NULL, 0, sizeof(struct user_fpu_struct), fpu, NULL); #endif return fpvalid; } EXPORT_SYMBOL(dump_fpu); asmlinkage void ret_from_fork(void); asmlinkage void ret_from_kernel_thread(void); int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct task_struct *p) { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs; #if defined(CONFIG_SH_DSP) struct task_struct *tsk = current; if (is_dsp_enabled(tsk)) { /* We can use the __save_dsp or just copy the struct: * __save_dsp(p); * p->thread.dsp_status.status |= SR_DSP */ p->thread.dsp_status = tsk->thread.dsp_status; } #endif memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); childregs = task_pt_regs(p); p->thread.sp = (unsigned long) childregs; if (unlikely(p->flags & PF_KTHREAD)) { memset(childregs, 0, sizeof(struct pt_regs)); p->thread.pc = (unsigned long) ret_from_kernel_thread; childregs->regs[4] = arg; childregs->regs[5] = usp; childregs->sr = SR_MD; #if defined(CONFIG_SH_FPU) childregs->sr |= SR_FD; #endif ti->addr_limit = KERNEL_DS; ti->status &= ~TS_USEDFPU; p->thread.fpu_counter = 0; return 0; } *childregs = *current_pt_regs(); if (usp) childregs->regs[15] = usp; ti->addr_limit = USER_DS; if (clone_flags & CLONE_SETTLS) childregs->gbr = childregs->regs[0]; childregs->regs[0] = 0; /* Set return value for child */ p->thread.pc = (unsigned long) ret_from_fork; return 0; } /* * switch_to(x,y) should switch tasks from x to y. * */ __notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev, struct task_struct *next) { struct thread_struct *next_t = &next->thread; #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) __stack_chk_guard = next->stack_canary; #endif unlazy_fpu(prev, task_pt_regs(prev)); /* we're going to use this soon, after a few expensive things */ if (next->thread.fpu_counter > 5) prefetch(next_t->xstate); #ifdef CONFIG_MMU /* * Restore the kernel mode register * k7 (r7_bank1) */ asm volatile("ldc %0, r7_bank" : /* no output */ : "r" (task_thread_info(next))); #endif /* * If the task has used fpu the last 5 timeslices, just do a full * restore of the math state immediately to avoid the trap; the * chances of needing FPU soon are obviously high now */ if (next->thread.fpu_counter > 5) __fpu_state_restore(); return prev; } unsigned long get_wchan(struct task_struct *p) { unsigned long pc; if (!p || p == current || p->state == TASK_RUNNING) return 0; /* * The same comment as on the Alpha applies here, too ... */ pc = thread_saved_pc(p); #ifdef CONFIG_FRAME_POINTER if (in_sched_functions(pc)) { unsigned long schedule_frame = (unsigned long)p->thread.sp; return ((unsigned long *)schedule_frame)[21]; } #endif return pc; }
gpl-2.0
spleef/android_kernel_samsung_trlte
drivers/staging/rtl8192e/rtl8192e/rtl_core.c
2156
84133
/****************************************************************************** * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> ******************************************************************************/ #undef RX_DONT_PASS_UL #undef DEBUG_EPROM #undef DEBUG_RX_VERBOSE #undef DUMMY_RX #undef DEBUG_ZERO_RX #undef DEBUG_RX_SKB #undef DEBUG_TX_FRAG #undef DEBUG_RX_FRAG #undef DEBUG_TX_FILLDESC #undef DEBUG_TX #undef DEBUG_IRQ #undef DEBUG_RX #undef DEBUG_RXALLOC #undef DEBUG_REGISTERS #undef DEBUG_RING #undef DEBUG_IRQ_TASKLET #undef DEBUG_TX_ALLOC #undef DEBUG_TX_DESC #include <linux/uaccess.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include "rtl_core.h" #include "r8192E_phy.h" #include "r8192E_phyreg.h" #include "r8190P_rtl8256.h" #include "r8192E_cmdpkt.h" #include "rtl_wx.h" #include "rtl_dm.h" #include "rtl_pm.h" int hwwep = 1; static int channels = 0x3fff; static char *ifname = "wlan%d"; static struct rtl819x_ops rtl819xp_ops = { .nic_type = NIC_8192E, .get_eeprom_size = rtl8192_get_eeprom_size, .init_adapter_variable = rtl8192_InitializeVariables, .initialize_adapter = rtl8192_adapter_start, .link_change = rtl8192_link_change, .tx_fill_descriptor = rtl8192_tx_fill_desc, .tx_fill_cmd_descriptor = rtl8192_tx_fill_cmd_desc, .rx_query_status_descriptor = rtl8192_rx_query_status_desc, .rx_command_packet_handler = NULL, .stop_adapter = rtl8192_halt_adapter, .update_ratr_table = rtl8192_update_ratr_table, .irq_enable = rtl8192_EnableInterrupt, .irq_disable = rtl8192_DisableInterrupt, .irq_clear = rtl8192_ClearInterrupt, .rx_enable = rtl8192_enable_rx, .tx_enable = rtl8192_enable_tx, .interrupt_recognized = rtl8192_interrupt_recognized, .TxCheckStuckHandler = rtl8192_HalTxCheckStuck, .RxCheckStuckHandler = rtl8192_HalRxCheckStuck, }; static struct pci_device_id rtl8192_pci_id_tbl[] = { {RTL_PCI_DEVICE(0x10ec, 0x8192, rtl819xp_ops)}, {RTL_PCI_DEVICE(0x07aa, 0x0044, rtl819xp_ops)}, {RTL_PCI_DEVICE(0x07aa, 0x0047, rtl819xp_ops)}, {} }; MODULE_DEVICE_TABLE(pci, rtl8192_pci_id_tbl); static int rtl8192_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void rtl8192_pci_disconnect(struct pci_dev *pdev); static struct pci_driver rtl8192_pci_driver = { .name = DRV_NAME, /* Driver name */ .id_table = rtl8192_pci_id_tbl, /* PCI_ID table */ .probe = rtl8192_pci_probe, /* probe fn */ .remove = rtl8192_pci_disconnect, /* remove fn */ .suspend = rtl8192E_suspend, /* PM suspend fn */ .resume = rtl8192E_resume, /* PM resume fn */ }; /**************************************************************************** -----------------------------IO STUFF------------------------- *****************************************************************************/ static bool PlatformIOCheckPageLegalAndGetRegMask(u32 u4bPage, u8 *pu1bPageMask) { bool bReturn = false; *pu1bPageMask = 0xfe; switch (u4bPage) { case 1: case 2: case 3: case 4: case 8: case 9: case 10: case 12: case 13: bReturn = true; *pu1bPageMask = 0xf0; break; default: bReturn = false; break; } return bReturn; } void write_nic_io_byte(struct net_device *dev, int x, u8 y) { u32 u4bPage = (x >> 8); u8 u1PageMask = 0; bool bIsLegalPage = false; if (u4bPage == 0) { outb(y&0xff, dev->base_addr + x); } else { bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage, &u1PageMask); if (bIsLegalPage) { u8 u1bPsr = read_nic_io_byte(dev, PSR); write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) | (u8)u4bPage)); write_nic_io_byte(dev, (x & 0xff), y); write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask)); } } } void write_nic_io_word(struct net_device *dev, int x, u16 y) { u32 u4bPage = (x >> 8); u8 u1PageMask = 0; bool bIsLegalPage = false; if (u4bPage == 0) { outw(y, dev->base_addr + x); } else { bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage, &u1PageMask); if (bIsLegalPage) { u8 u1bPsr = read_nic_io_byte(dev, PSR); write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) | (u8)u4bPage)); write_nic_io_word(dev, (x & 0xff), y); write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask)); } } } void write_nic_io_dword(struct net_device *dev, int x, u32 y) { u32 u4bPage = (x >> 8); u8 u1PageMask = 0; bool bIsLegalPage = false; if (u4bPage == 0) { outl(y, dev->base_addr + x); } else { bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage, &u1PageMask); if (bIsLegalPage) { u8 u1bPsr = read_nic_io_byte(dev, PSR); write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) | (u8)u4bPage)); write_nic_io_dword(dev, (x & 0xff), y); write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask)); } } } u8 read_nic_io_byte(struct net_device *dev, int x) { u32 u4bPage = (x >> 8); u8 u1PageMask = 0; bool bIsLegalPage = false; u8 Data = 0; if (u4bPage == 0) { return 0xff&inb(dev->base_addr + x); } else { bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage, &u1PageMask); if (bIsLegalPage) { u8 u1bPsr = read_nic_io_byte(dev, PSR); write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) | (u8)u4bPage)); Data = read_nic_io_byte(dev, (x & 0xff)); write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask)); } } return Data; } u16 read_nic_io_word(struct net_device *dev, int x) { u32 u4bPage = (x >> 8); u8 u1PageMask = 0; bool bIsLegalPage = false; u16 Data = 0; if (u4bPage == 0) { return inw(dev->base_addr + x); } else { bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage, &u1PageMask); if (bIsLegalPage) { u8 u1bPsr = read_nic_io_byte(dev, PSR); write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) | (u8)u4bPage)); Data = read_nic_io_word(dev, (x & 0xff)); write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask)); } } return Data; } u32 read_nic_io_dword(struct net_device *dev, int x) { u32 u4bPage = (x >> 8); u8 u1PageMask = 0; bool bIsLegalPage = false; u32 Data = 0; if (u4bPage == 0) { return inl(dev->base_addr + x); } else { bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage, &u1PageMask); if (bIsLegalPage) { u8 u1bPsr = read_nic_io_byte(dev, PSR); write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) | (u8)u4bPage)); Data = read_nic_io_dword(dev, (x & 0xff)); write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask)); } } return Data; } u8 read_nic_byte(struct net_device *dev, int x) { return 0xff & readb((u8 __iomem *)dev->mem_start + x); } u32 read_nic_dword(struct net_device *dev, int x) { return readl((u8 __iomem *)dev->mem_start + x); } u16 read_nic_word(struct net_device *dev, int x) { return readw((u8 __iomem *)dev->mem_start + x); } void write_nic_byte(struct net_device *dev, int x, u8 y) { writeb(y, (u8 __iomem *)dev->mem_start + x); udelay(20); } void write_nic_dword(struct net_device *dev, int x, u32 y) { writel(y, (u8 __iomem *)dev->mem_start + x); udelay(20); } void write_nic_word(struct net_device *dev, int x, u16 y) { writew(y, (u8 __iomem *)dev->mem_start + x); udelay(20); } /**************************************************************************** -----------------------------GENERAL FUNCTION------------------------- *****************************************************************************/ bool MgntActSet_RF_State(struct net_device *dev, enum rt_rf_power_state StateToSet, RT_RF_CHANGE_SOURCE ChangeSource, bool ProtectOrNot) { struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_device *ieee = priv->rtllib; bool bActionAllowed = false; bool bConnectBySSID = false; enum rt_rf_power_state rtState; u16 RFWaitCounter = 0; unsigned long flag; RT_TRACE((COMP_PS | COMP_RF), "===>MgntActSet_RF_State(): " "StateToSet(%d)\n", StateToSet); ProtectOrNot = false; if (!ProtectOrNot) { while (true) { spin_lock_irqsave(&priv->rf_ps_lock, flag); if (priv->RFChangeInProgress) { spin_unlock_irqrestore(&priv->rf_ps_lock, flag); RT_TRACE((COMP_PS | COMP_RF), "MgntActSet_RF_State(): RF Change in " "progress! Wait to set..StateToSet" "(%d).\n", StateToSet); while (priv->RFChangeInProgress) { RFWaitCounter++; RT_TRACE((COMP_PS | COMP_RF), "MgntActSet_RF_State(): Wait 1" " ms (%d times)...\n", RFWaitCounter); mdelay(1); if (RFWaitCounter > 100) { RT_TRACE(COMP_ERR, "MgntActSet_" "RF_State(): Wait too " "logn to set RF\n"); return false; } } } else { priv->RFChangeInProgress = true; spin_unlock_irqrestore(&priv->rf_ps_lock, flag); break; } } } rtState = priv->rtllib->eRFPowerState; switch (StateToSet) { case eRfOn: priv->rtllib->RfOffReason &= (~ChangeSource); if ((ChangeSource == RF_CHANGE_BY_HW) && (priv->bHwRadioOff == true)) priv->bHwRadioOff = false; if (!priv->rtllib->RfOffReason) { priv->rtllib->RfOffReason = 0; bActionAllowed = true; if (rtState == eRfOff && ChangeSource >= RF_CHANGE_BY_HW) bConnectBySSID = true; } else { RT_TRACE((COMP_PS | COMP_RF), "MgntActSet_RF_State - " "eRfon reject pMgntInfo->RfOffReason= 0x%x," " ChangeSource=0x%X\n", priv->rtllib->RfOffReason, ChangeSource); } break; case eRfOff: if ((priv->rtllib->iw_mode == IW_MODE_INFRA) || (priv->rtllib->iw_mode == IW_MODE_ADHOC)) { if ((priv->rtllib->RfOffReason > RF_CHANGE_BY_IPS) || (ChangeSource > RF_CHANGE_BY_IPS)) { if (ieee->state == RTLLIB_LINKED) priv->blinked_ingpio = true; else priv->blinked_ingpio = false; rtllib_MgntDisconnect(priv->rtllib, disas_lv_ss); } } if ((ChangeSource == RF_CHANGE_BY_HW) && (priv->bHwRadioOff == false)) priv->bHwRadioOff = true; priv->rtllib->RfOffReason |= ChangeSource; bActionAllowed = true; break; case eRfSleep: priv->rtllib->RfOffReason |= ChangeSource; bActionAllowed = true; break; default: break; } if (bActionAllowed) { RT_TRACE((COMP_PS | COMP_RF), "MgntActSet_RF_State(): Action is" " allowed.... StateToSet(%d), RfOffReason(%#X)\n", StateToSet, priv->rtllib->RfOffReason); PHY_SetRFPowerState(dev, StateToSet); if (StateToSet == eRfOn) { if (bConnectBySSID && (priv->blinked_ingpio == true)) { queue_delayed_work_rsl(ieee->wq, &ieee->associate_procedure_wq, 0); priv->blinked_ingpio = false; } } } else { RT_TRACE((COMP_PS | COMP_RF), "MgntActSet_RF_State(): " "Action is rejected.... StateToSet(%d), ChangeSource" "(%#X), RfOffReason(%#X)\n", StateToSet, ChangeSource, priv->rtllib->RfOffReason); } if (!ProtectOrNot) { spin_lock_irqsave(&priv->rf_ps_lock, flag); priv->RFChangeInProgress = false; spin_unlock_irqrestore(&priv->rf_ps_lock, flag); } RT_TRACE((COMP_PS | COMP_RF), "<===MgntActSet_RF_State()\n"); return bActionAllowed; } static short rtl8192_get_nic_desc_num(struct net_device *dev, int prio) { struct r8192_priv *priv = rtllib_priv(dev); struct rtl8192_tx_ring *ring = &priv->tx_ring[prio]; /* For now, we reserved two free descriptor as a safety boundary * between the tail and the head */ if ((prio == MGNT_QUEUE) && (skb_queue_len(&ring->queue) > 10)) RT_TRACE(COMP_DBG, "-----[%d]---------ring->idx=%d " "queue_len=%d---------\n", prio, ring->idx, skb_queue_len(&ring->queue)); return skb_queue_len(&ring->queue); } static short rtl8192_check_nic_enough_desc(struct net_device *dev, int prio) { struct r8192_priv *priv = rtllib_priv(dev); struct rtl8192_tx_ring *ring = &priv->tx_ring[prio]; if (ring->entries - skb_queue_len(&ring->queue) >= 2) return 1; return 0; } void rtl8192_tx_timeout(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); schedule_work(&priv->reset_wq); printk(KERN_INFO "TXTIMEOUT"); } void rtl8192_irq_enable(struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); priv->irq_enabled = 1; priv->ops->irq_enable(dev); } void rtl8192_irq_disable(struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); priv->ops->irq_disable(dev); priv->irq_enabled = 0; } void rtl8192_set_chan(struct net_device *dev, short ch) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); RT_TRACE(COMP_CH, "=====>%s()====ch:%d\n", __func__, ch); if (priv->chan_forced) return; priv->chan = ch; if (priv->rf_set_chan) priv->rf_set_chan(dev, priv->chan); } void rtl8192_update_cap(struct net_device *dev, u16 cap) { struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_network *net = &priv->rtllib->current_network; bool ShortPreamble; if (cap & WLAN_CAPABILITY_SHORT_PREAMBLE) { if (priv->dot11CurrentPreambleMode != PREAMBLE_SHORT) { ShortPreamble = true; priv->dot11CurrentPreambleMode = PREAMBLE_SHORT; RT_TRACE(COMP_DBG, "%s(): WLAN_CAPABILITY_SHORT_" "PREAMBLE\n", __func__); priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACK_PREAMBLE, (unsigned char *)&ShortPreamble); } } else { if (priv->dot11CurrentPreambleMode != PREAMBLE_LONG) { ShortPreamble = false; priv->dot11CurrentPreambleMode = PREAMBLE_LONG; RT_TRACE(COMP_DBG, "%s(): WLAN_CAPABILITY_LONG_" "PREAMBLE\n", __func__); priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACK_PREAMBLE, (unsigned char *)&ShortPreamble); } } if (net->mode & (IEEE_G|IEEE_N_24G)) { u8 slot_time_val; u8 CurSlotTime = priv->slot_time; if ((cap & WLAN_CAPABILITY_SHORT_SLOT_TIME) && (!priv->rtllib->pHTInfo->bCurrentRT2RTLongSlotTime)) { if (CurSlotTime != SHORT_SLOT_TIME) { slot_time_val = SHORT_SLOT_TIME; priv->rtllib->SetHwRegHandler(dev, HW_VAR_SLOT_TIME, &slot_time_val); } } else { if (CurSlotTime != NON_SHORT_SLOT_TIME) { slot_time_val = NON_SHORT_SLOT_TIME; priv->rtllib->SetHwRegHandler(dev, HW_VAR_SLOT_TIME, &slot_time_val); } } } } static struct rtllib_qos_parameters def_qos_parameters = { {3, 3, 3, 3}, {7, 7, 7, 7}, {2, 2, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0} }; static void rtl8192_update_beacon(void *data) { struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv, update_beacon_wq.work); struct net_device *dev = priv->rtllib->dev; struct rtllib_device *ieee = priv->rtllib; struct rtllib_network *net = &ieee->current_network; if (ieee->pHTInfo->bCurrentHTSupport) HT_update_self_and_peer_setting(ieee, net); ieee->pHTInfo->bCurrentRT2RTLongSlotTime = net->bssht.bdRT2RTLongSlotTime; ieee->pHTInfo->RT2RT_HT_Mode = net->bssht.RT2RT_HT_Mode; rtl8192_update_cap(dev, net->capability); } int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI, EDCAPARA_VO}; static void rtl8192_qos_activate(void *data) { struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv, qos_activate); struct net_device *dev = priv->rtllib->dev; int i; mutex_lock(&priv->mutex); if (priv->rtllib->state != RTLLIB_LINKED) goto success; RT_TRACE(COMP_QOS, "qos active process with associate response " "received\n"); for (i = 0; i < QOS_QUEUE_NUM; i++) { priv->rtllib->SetHwRegHandler(dev, HW_VAR_AC_PARAM, (u8 *)(&i)); } success: mutex_unlock(&priv->mutex); } static int rtl8192_qos_handle_probe_response(struct r8192_priv *priv, int active_network, struct rtllib_network *network) { int ret = 0; u32 size = sizeof(struct rtllib_qos_parameters); if (priv->rtllib->state != RTLLIB_LINKED) return ret; if ((priv->rtllib->iw_mode != IW_MODE_INFRA)) return ret; if (network->flags & NETWORK_HAS_QOS_MASK) { if (active_network && (network->flags & NETWORK_HAS_QOS_PARAMETERS)) network->qos_data.active = network->qos_data.supported; if ((network->qos_data.active == 1) && (active_network == 1) && (network->flags & NETWORK_HAS_QOS_PARAMETERS) && (network->qos_data.old_param_count != network->qos_data.param_count)) { network->qos_data.old_param_count = network->qos_data.param_count; priv->rtllib->wmm_acm = network->qos_data.wmm_acm; queue_work_rsl(priv->priv_wq, &priv->qos_activate); RT_TRACE(COMP_QOS, "QoS parameters change call " "qos_activate\n"); } } else { memcpy(&priv->rtllib->current_network.qos_data.parameters, &def_qos_parameters, size); if ((network->qos_data.active == 1) && (active_network == 1)) { queue_work_rsl(priv->priv_wq, &priv->qos_activate); RT_TRACE(COMP_QOS, "QoS was disabled call qos_" "activate\n"); } network->qos_data.active = 0; network->qos_data.supported = 0; } return 0; } static int rtl8192_handle_beacon(struct net_device *dev, struct rtllib_beacon *beacon, struct rtllib_network *network) { struct r8192_priv *priv = rtllib_priv(dev); rtl8192_qos_handle_probe_response(priv, 1, network); queue_delayed_work_rsl(priv->priv_wq, &priv->update_beacon_wq, 0); return 0; } static int rtl8192_qos_association_resp(struct r8192_priv *priv, struct rtllib_network *network) { int ret = 0; unsigned long flags; u32 size = sizeof(struct rtllib_qos_parameters); int set_qos_param = 0; if ((priv == NULL) || (network == NULL)) return ret; if (priv->rtllib->state != RTLLIB_LINKED) return ret; if ((priv->rtllib->iw_mode != IW_MODE_INFRA)) return ret; spin_lock_irqsave(&priv->rtllib->lock, flags); if (network->flags & NETWORK_HAS_QOS_PARAMETERS) { memcpy(&priv->rtllib->current_network.qos_data.parameters, &network->qos_data.parameters, sizeof(struct rtllib_qos_parameters)); priv->rtllib->current_network.qos_data.active = 1; priv->rtllib->wmm_acm = network->qos_data.wmm_acm; set_qos_param = 1; priv->rtllib->current_network.qos_data.old_param_count = priv->rtllib->current_network.qos_data.param_count; priv->rtllib->current_network.qos_data.param_count = network->qos_data.param_count; } else { memcpy(&priv->rtllib->current_network.qos_data.parameters, &def_qos_parameters, size); priv->rtllib->current_network.qos_data.active = 0; priv->rtllib->current_network.qos_data.supported = 0; set_qos_param = 1; } spin_unlock_irqrestore(&priv->rtllib->lock, flags); RT_TRACE(COMP_QOS, "%s: network->flags = %d,%d\n", __func__, network->flags, priv->rtllib->current_network.qos_data.active); if (set_qos_param == 1) { dm_init_edca_turbo(priv->rtllib->dev); queue_work_rsl(priv->priv_wq, &priv->qos_activate); } return ret; } static int rtl8192_handle_assoc_response(struct net_device *dev, struct rtllib_assoc_response_frame *resp, struct rtllib_network *network) { struct r8192_priv *priv = rtllib_priv(dev); rtl8192_qos_association_resp(priv, network); return 0; } static void rtl8192_prepare_beacon(struct r8192_priv *priv) { struct net_device *dev = priv->rtllib->dev; struct sk_buff *pskb = NULL, *pnewskb = NULL; struct cb_desc *tcb_desc = NULL; struct rtl8192_tx_ring *ring = NULL; struct tx_desc *pdesc = NULL; ring = &priv->tx_ring[BEACON_QUEUE]; pskb = __skb_dequeue(&ring->queue); kfree_skb(pskb); pnewskb = rtllib_get_beacon(priv->rtllib); if (!pnewskb) return; tcb_desc = (struct cb_desc *)(pnewskb->cb + 8); tcb_desc->queue_index = BEACON_QUEUE; tcb_desc->data_rate = 2; tcb_desc->RATRIndex = 7; tcb_desc->bTxDisableRateFallBack = 1; tcb_desc->bTxUseDriverAssingedRate = 1; skb_push(pnewskb, priv->rtllib->tx_headroom); pdesc = &ring->desc[0]; priv->ops->tx_fill_descriptor(dev, pdesc, tcb_desc, pnewskb); __skb_queue_tail(&ring->queue, pnewskb); pdesc->OWN = 1; return; } static void rtl8192_stop_beacon(struct net_device *dev) { } void rtl8192_config_rate(struct net_device *dev, u16 *rate_config) { struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_network *net; u8 i = 0, basic_rate = 0; net = &priv->rtllib->current_network; for (i = 0; i < net->rates_len; i++) { basic_rate = net->rates[i] & 0x7f; switch (basic_rate) { case MGN_1M: *rate_config |= RRSR_1M; break; case MGN_2M: *rate_config |= RRSR_2M; break; case MGN_5_5M: *rate_config |= RRSR_5_5M; break; case MGN_11M: *rate_config |= RRSR_11M; break; case MGN_6M: *rate_config |= RRSR_6M; break; case MGN_9M: *rate_config |= RRSR_9M; break; case MGN_12M: *rate_config |= RRSR_12M; break; case MGN_18M: *rate_config |= RRSR_18M; break; case MGN_24M: *rate_config |= RRSR_24M; break; case MGN_36M: *rate_config |= RRSR_36M; break; case MGN_48M: *rate_config |= RRSR_48M; break; case MGN_54M: *rate_config |= RRSR_54M; break; } } for (i = 0; i < net->rates_ex_len; i++) { basic_rate = net->rates_ex[i] & 0x7f; switch (basic_rate) { case MGN_1M: *rate_config |= RRSR_1M; break; case MGN_2M: *rate_config |= RRSR_2M; break; case MGN_5_5M: *rate_config |= RRSR_5_5M; break; case MGN_11M: *rate_config |= RRSR_11M; break; case MGN_6M: *rate_config |= RRSR_6M; break; case MGN_9M: *rate_config |= RRSR_9M; break; case MGN_12M: *rate_config |= RRSR_12M; break; case MGN_18M: *rate_config |= RRSR_18M; break; case MGN_24M: *rate_config |= RRSR_24M; break; case MGN_36M: *rate_config |= RRSR_36M; break; case MGN_48M: *rate_config |= RRSR_48M; break; case MGN_54M: *rate_config |= RRSR_54M; break; } } } static void rtl8192_refresh_supportrate(struct r8192_priv *priv) { struct rtllib_device *ieee = priv->rtllib; if (ieee->mode == WIRELESS_MODE_N_24G || ieee->mode == WIRELESS_MODE_N_5G) { memcpy(ieee->Regdot11HTOperationalRateSet, ieee->RegHTSuppRateSet, 16); memcpy(ieee->Regdot11TxHTOperationalRateSet, ieee->RegHTSuppRateSet, 16); } else { memset(ieee->Regdot11HTOperationalRateSet, 0, 16); } return; } static u8 rtl8192_getSupportedWireleeMode(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); u8 ret = 0; switch (priv->rf_chip) { case RF_8225: case RF_8256: case RF_6052: case RF_PSEUDO_11N: ret = (WIRELESS_MODE_N_24G|WIRELESS_MODE_G | WIRELESS_MODE_B); break; case RF_8258: ret = (WIRELESS_MODE_A | WIRELESS_MODE_N_5G); break; default: ret = WIRELESS_MODE_B; break; } return ret; } void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode) { struct r8192_priv *priv = rtllib_priv(dev); u8 bSupportMode = rtl8192_getSupportedWireleeMode(dev); if ((wireless_mode == WIRELESS_MODE_AUTO) || ((wireless_mode & bSupportMode) == 0)) { if (bSupportMode & WIRELESS_MODE_N_24G) { wireless_mode = WIRELESS_MODE_N_24G; } else if (bSupportMode & WIRELESS_MODE_N_5G) { wireless_mode = WIRELESS_MODE_N_5G; } else if ((bSupportMode & WIRELESS_MODE_A)) { wireless_mode = WIRELESS_MODE_A; } else if ((bSupportMode & WIRELESS_MODE_G)) { wireless_mode = WIRELESS_MODE_G; } else if ((bSupportMode & WIRELESS_MODE_B)) { wireless_mode = WIRELESS_MODE_B; } else { RT_TRACE(COMP_ERR, "%s(), No valid wireless mode " "supported (%x)!!!\n", __func__, bSupportMode); wireless_mode = WIRELESS_MODE_B; } } if ((wireless_mode & (WIRELESS_MODE_B | WIRELESS_MODE_G)) == (WIRELESS_MODE_G | WIRELESS_MODE_B)) wireless_mode = WIRELESS_MODE_G; priv->rtllib->mode = wireless_mode; ActUpdateChannelAccessSetting(dev, wireless_mode, &priv->ChannelAccessSetting); if ((wireless_mode == WIRELESS_MODE_N_24G) || (wireless_mode == WIRELESS_MODE_N_5G)) { priv->rtllib->pHTInfo->bEnableHT = 1; RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 1\n", __func__, wireless_mode); } else { priv->rtllib->pHTInfo->bEnableHT = 0; RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 0\n", __func__, wireless_mode); } RT_TRACE(COMP_INIT, "Current Wireless Mode is %x\n", wireless_mode); rtl8192_refresh_supportrate(priv); } static int _rtl8192_sta_up(struct net_device *dev, bool is_silent_reset) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) (&(priv->rtllib->PowerSaveControl)); bool init_status = true; priv->bDriverIsGoingToUnload = false; priv->bdisable_nic = false; priv->up = 1; priv->rtllib->ieee_up = 1; priv->up_first_time = 0; RT_TRACE(COMP_INIT, "Bringing up iface"); priv->bfirst_init = true; init_status = priv->ops->initialize_adapter(dev); if (init_status != true) { RT_TRACE(COMP_ERR, "ERR!!! %s(): initialization is failed!\n", __func__); priv->bfirst_init = false; return -1; } RT_TRACE(COMP_INIT, "start adapter finished\n"); RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC); priv->bfirst_init = false; if (priv->polling_timer_on == 0) check_rfctrl_gpio_timer((unsigned long)dev); if (priv->rtllib->state != RTLLIB_LINKED) rtllib_softmac_start_protocol(priv->rtllib, 0); rtllib_reset_queue(priv->rtllib); watch_dog_timer_callback((unsigned long) dev); if (!netif_queue_stopped(dev)) netif_start_queue(dev); else netif_wake_queue(dev); return 0; } static int rtl8192_sta_down(struct net_device *dev, bool shutdownrf) { struct r8192_priv *priv = rtllib_priv(dev); unsigned long flags = 0; u8 RFInProgressTimeOut = 0; if (priv->up == 0) return -1; if (priv->rtllib->rtllib_ips_leave != NULL) priv->rtllib->rtllib_ips_leave(dev); if (priv->rtllib->state == RTLLIB_LINKED) LeisurePSLeave(dev); priv->bDriverIsGoingToUnload = true; priv->up = 0; priv->rtllib->ieee_up = 0; priv->bfirst_after_down = 1; RT_TRACE(COMP_DOWN, "==========>%s()\n", __func__); if (!netif_queue_stopped(dev)) netif_stop_queue(dev); priv->rtllib->wpa_ie_len = 0; kfree(priv->rtllib->wpa_ie); priv->rtllib->wpa_ie = NULL; CamResetAllEntry(dev); memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32); rtl8192_irq_disable(dev); del_timer_sync(&priv->watch_dog_timer); rtl8192_cancel_deferred_work(priv); cancel_delayed_work(&priv->rtllib->hw_wakeup_wq); rtllib_softmac_stop_protocol(priv->rtllib, 0, true); spin_lock_irqsave(&priv->rf_ps_lock, flags); while (priv->RFChangeInProgress) { spin_unlock_irqrestore(&priv->rf_ps_lock, flags); if (RFInProgressTimeOut > 100) { spin_lock_irqsave(&priv->rf_ps_lock, flags); break; } RT_TRACE(COMP_DBG, "===>%s():RF is in progress, need to wait " "until rf change is done.\n", __func__); mdelay(1); RFInProgressTimeOut++; spin_lock_irqsave(&priv->rf_ps_lock, flags); } priv->RFChangeInProgress = true; spin_unlock_irqrestore(&priv->rf_ps_lock, flags); priv->ops->stop_adapter(dev, false); spin_lock_irqsave(&priv->rf_ps_lock, flags); priv->RFChangeInProgress = false; spin_unlock_irqrestore(&priv->rf_ps_lock, flags); udelay(100); memset(&priv->rtllib->current_network, 0, offsetof(struct rtllib_network, list)); RT_TRACE(COMP_DOWN, "<==========%s()\n", __func__); return 0; } static void rtl8192_init_priv_handler(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); priv->rtllib->softmac_hard_start_xmit = rtl8192_hard_start_xmit; priv->rtllib->set_chan = rtl8192_set_chan; priv->rtllib->link_change = priv->ops->link_change; priv->rtllib->softmac_data_hard_start_xmit = rtl8192_hard_data_xmit; priv->rtllib->data_hard_stop = rtl8192_data_hard_stop; priv->rtllib->data_hard_resume = rtl8192_data_hard_resume; priv->rtllib->check_nic_enough_desc = rtl8192_check_nic_enough_desc; priv->rtllib->get_nic_desc_num = rtl8192_get_nic_desc_num; priv->rtllib->handle_assoc_response = rtl8192_handle_assoc_response; priv->rtllib->handle_beacon = rtl8192_handle_beacon; priv->rtllib->SetWirelessMode = rtl8192_SetWirelessMode; priv->rtllib->LeisurePSLeave = LeisurePSLeave; priv->rtllib->SetBWModeHandler = rtl8192_SetBWMode; priv->rf_set_chan = rtl8192_phy_SwChnl; priv->rtllib->start_send_beacons = rtl8192e_start_beacon; priv->rtllib->stop_send_beacons = rtl8192_stop_beacon; priv->rtllib->sta_wake_up = rtl8192_hw_wakeup; priv->rtllib->enter_sleep_state = rtl8192_hw_to_sleep; priv->rtllib->ps_is_queue_empty = rtl8192_is_tx_queue_empty; priv->rtllib->GetNmodeSupportBySecCfg = rtl8192_GetNmodeSupportBySecCfg; priv->rtllib->GetHalfNmodeSupportByAPsHandler = rtl8192_GetHalfNmodeSupportByAPs; priv->rtllib->SetHwRegHandler = rtl8192e_SetHwReg; priv->rtllib->AllowAllDestAddrHandler = rtl8192_AllowAllDestAddr; priv->rtllib->SetFwCmdHandler = NULL; priv->rtllib->InitialGainHandler = InitialGain819xPci; priv->rtllib->rtllib_ips_leave_wq = rtllib_ips_leave_wq; priv->rtllib->rtllib_ips_leave = rtllib_ips_leave; priv->rtllib->LedControlHandler = NULL; priv->rtllib->UpdateBeaconInterruptHandler = NULL; priv->rtllib->ScanOperationBackupHandler = PHY_ScanOperationBackup8192; priv->rtllib->rtllib_rfkill_poll = NULL; } static void rtl8192_init_priv_constant(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) &(priv->rtllib->PowerSaveControl); pPSC->RegMaxLPSAwakeIntvl = 5; priv->RegPciASPM = 2; priv->RegDevicePciASPMSetting = 0x03; priv->RegHostPciASPMSetting = 0x02; priv->RegHwSwRfOffD3 = 2; priv->RegSupportPciASPM = 2; } static void rtl8192_init_priv_variable(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); u8 i; priv->AcmMethod = eAcmWay2_SW; priv->dot11CurrentPreambleMode = PREAMBLE_AUTO; priv->rtllib->hwscan_sem_up = 1; priv->rtllib->status = 0; priv->H2CTxCmdSeq = 0; priv->bDisableFrameBursting = 0; priv->bDMInitialGainEnable = 1; priv->polling_timer_on = 0; priv->up_first_time = 1; priv->blinked_ingpio = false; priv->bDriverIsGoingToUnload = false; priv->being_init_adapter = false; priv->initialized_at_probe = false; priv->sw_radio_on = true; priv->bdisable_nic = false; priv->bfirst_init = false; priv->txringcount = 64; priv->rxbuffersize = 9100; priv->rxringcount = MAX_RX_COUNT; priv->irq_enabled = 0; priv->chan = 1; priv->RegWirelessMode = WIRELESS_MODE_AUTO; priv->RegChannelPlan = 0xf; priv->nrxAMPDU_size = 0; priv->nrxAMPDU_aggr_num = 0; priv->last_rxdesc_tsf_high = 0; priv->last_rxdesc_tsf_low = 0; priv->rtllib->mode = WIRELESS_MODE_AUTO; priv->rtllib->iw_mode = IW_MODE_INFRA; priv->rtllib->bNetPromiscuousMode = false; priv->rtllib->IntelPromiscuousModeInfo.bPromiscuousOn = false; priv->rtllib->IntelPromiscuousModeInfo.bFilterSourceStationFrame = false; priv->rtllib->ieee_up = 0; priv->retry_rts = DEFAULT_RETRY_RTS; priv->retry_data = DEFAULT_RETRY_DATA; priv->rtllib->rts = DEFAULT_RTS_THRESHOLD; priv->rtllib->rate = 110; priv->rtllib->short_slot = 1; priv->promisc = (dev->flags & IFF_PROMISC) ? 1 : 0; priv->bcck_in_ch14 = false; priv->bfsync_processing = false; priv->CCKPresentAttentuation = 0; priv->rfa_txpowertrackingindex = 0; priv->rfc_txpowertrackingindex = 0; priv->CckPwEnl = 6; priv->ScanDelay = 50; priv->ResetProgress = RESET_TYPE_NORESET; priv->bForcedSilentReset = 0; priv->bDisableNormalResetCheck = false; priv->force_reset = false; memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32); memset(&priv->InterruptLog, 0, sizeof(struct log_int_8190)); priv->RxCounter = 0; priv->rtllib->wx_set_enc = 0; priv->bHwRadioOff = false; priv->RegRfOff = 0; priv->isRFOff = false; priv->bInPowerSaveMode = false; priv->rtllib->RfOffReason = 0; priv->RFChangeInProgress = false; priv->bHwRfOffAction = 0; priv->SetRFPowerStateInProgress = false; priv->rtllib->PowerSaveControl.bInactivePs = true; priv->rtllib->PowerSaveControl.bIPSModeBackup = false; priv->rtllib->PowerSaveControl.bLeisurePs = true; priv->rtllib->PowerSaveControl.bFwCtrlLPS = false; priv->rtllib->LPSDelayCnt = 0; priv->rtllib->sta_sleep = LPS_IS_WAKE; priv->rtllib->eRFPowerState = eRfOn; priv->txpower_checkcnt = 0; priv->thermal_readback_index = 0; priv->txpower_tracking_callback_cnt = 0; priv->ccktxpower_adjustcnt_ch14 = 0; priv->ccktxpower_adjustcnt_not_ch14 = 0; priv->rtllib->current_network.beacon_interval = DEFAULT_BEACONINTERVAL; priv->rtllib->iw_mode = IW_MODE_INFRA; priv->rtllib->active_scan = 1; priv->rtllib->be_scan_inprogress = false; priv->rtllib->modulation = RTLLIB_CCK_MODULATION | RTLLIB_OFDM_MODULATION; priv->rtllib->host_encrypt = 1; priv->rtllib->host_decrypt = 1; priv->rtllib->dot11PowerSaveMode = eActive; priv->rtllib->fts = DEFAULT_FRAG_THRESHOLD; priv->rtllib->MaxMssDensity = 0; priv->rtllib->MinSpaceCfg = 0; priv->card_type = PCI; priv->AcmControl = 0; priv->pFirmware = vzalloc(sizeof(struct rt_firmware)); if (!priv->pFirmware) printk(KERN_ERR "rtl8192e: Unable to allocate space " "for firmware\n"); skb_queue_head_init(&priv->rx_queue); skb_queue_head_init(&priv->skb_queue); for (i = 0; i < MAX_QUEUE_SIZE; i++) skb_queue_head_init(&priv->rtllib->skb_waitQ[i]); for (i = 0; i < MAX_QUEUE_SIZE; i++) skb_queue_head_init(&priv->rtllib->skb_aggQ[i]); } static void rtl8192_init_priv_lock(struct r8192_priv *priv) { spin_lock_init(&priv->fw_scan_lock); spin_lock_init(&priv->tx_lock); spin_lock_init(&priv->irq_lock); spin_lock_init(&priv->irq_th_lock); spin_lock_init(&priv->rf_ps_lock); spin_lock_init(&priv->ps_lock); spin_lock_init(&priv->rf_lock); spin_lock_init(&priv->rt_h2c_lock); sema_init(&priv->wx_sem, 1); sema_init(&priv->rf_sem, 1); mutex_init(&priv->mutex); } static void rtl8192_init_priv_task(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); priv->priv_wq = create_workqueue(DRV_NAME); INIT_WORK_RSL(&priv->reset_wq, (void *)rtl8192_restart, dev); INIT_WORK_RSL(&priv->rtllib->ips_leave_wq, (void *)IPSLeave_wq, dev); INIT_DELAYED_WORK_RSL(&priv->watch_dog_wq, (void *)rtl819x_watchdog_wqcallback, dev); INIT_DELAYED_WORK_RSL(&priv->txpower_tracking_wq, (void *)dm_txpower_trackingcallback, dev); INIT_DELAYED_WORK_RSL(&priv->rfpath_check_wq, (void *)dm_rf_pathcheck_workitemcallback, dev); INIT_DELAYED_WORK_RSL(&priv->update_beacon_wq, (void *)rtl8192_update_beacon, dev); INIT_WORK_RSL(&priv->qos_activate, (void *)rtl8192_qos_activate, dev); INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_wakeup_wq, (void *) rtl8192_hw_wakeup_wq, dev); INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_sleep_wq, (void *) rtl8192_hw_sleep_wq, dev); tasklet_init(&priv->irq_rx_tasklet, (void(*)(unsigned long))rtl8192_irq_rx_tasklet, (unsigned long)priv); tasklet_init(&priv->irq_tx_tasklet, (void(*)(unsigned long))rtl8192_irq_tx_tasklet, (unsigned long)priv); tasklet_init(&priv->irq_prepare_beacon_tasklet, (void(*)(unsigned long))rtl8192_prepare_beacon, (unsigned long)priv); } static short rtl8192_get_channel_map(struct net_device *dev) { int i; struct r8192_priv *priv = rtllib_priv(dev); if ((priv->rf_chip != RF_8225) && (priv->rf_chip != RF_8256) && (priv->rf_chip != RF_6052)) { RT_TRACE(COMP_ERR, "%s: unknown rf chip, can't set channel " "map\n", __func__); return -1; } if (priv->ChannelPlan >= COUNTRY_CODE_MAX) { printk(KERN_INFO "rtl819x_init:Error channel plan! Set to " "default.\n"); priv->ChannelPlan = COUNTRY_CODE_FCC; } RT_TRACE(COMP_INIT, "Channel plan is %d\n", priv->ChannelPlan); dot11d_init(priv->rtllib); Dot11d_Channelmap(priv->ChannelPlan, priv->rtllib); for (i = 1; i <= 11; i++) (priv->rtllib->active_channel_map)[i] = 1; (priv->rtllib->active_channel_map)[12] = 2; (priv->rtllib->active_channel_map)[13] = 2; return 0; } static short rtl8192_init(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); memset(&(priv->stats), 0, sizeof(struct rt_stats)); rtl8192_init_priv_handler(dev); rtl8192_init_priv_constant(dev); rtl8192_init_priv_variable(dev); rtl8192_init_priv_lock(priv); rtl8192_init_priv_task(dev); priv->ops->get_eeprom_size(dev); priv->ops->init_adapter_variable(dev); rtl8192_get_channel_map(dev); init_hal_dm(dev); init_timer(&priv->watch_dog_timer); setup_timer(&priv->watch_dog_timer, watch_dog_timer_callback, (unsigned long) dev); init_timer(&priv->gpio_polling_timer); setup_timer(&priv->gpio_polling_timer, check_rfctrl_gpio_timer, (unsigned long)dev); rtl8192_irq_disable(dev); if (request_irq(dev->irq, (void *)rtl8192_interrupt_rsl, IRQF_SHARED, dev->name, dev)) { printk(KERN_ERR "Error allocating IRQ %d", dev->irq); return -1; } else { priv->irq = dev->irq; RT_TRACE(COMP_INIT, "IRQ %d\n", dev->irq); } if (rtl8192_pci_initdescring(dev) != 0) { printk(KERN_ERR "Endopoints initialization failed"); free_irq(dev->irq, dev); return -1; } return 0; } /*************************************************************************** -------------------------------WATCHDOG STUFF--------------------------- ***************************************************************************/ short rtl8192_is_tx_queue_empty(struct net_device *dev) { int i = 0; struct r8192_priv *priv = rtllib_priv(dev); for (i = 0; i <= MGNT_QUEUE; i++) { if ((i == TXCMD_QUEUE) || (i == HCCA_QUEUE)) continue; if (skb_queue_len(&(&priv->tx_ring[i])->queue) > 0) { printk(KERN_INFO "===>tx queue is not empty:%d, %d\n", i, skb_queue_len(&(&priv->tx_ring[i])->queue)); return 0; } } return 1; } static enum reset_type rtl819x_TxCheckStuck(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); u8 QueueID; u8 ResetThreshold = NIC_SEND_HANG_THRESHOLD_POWERSAVE; bool bCheckFwTxCnt = false; struct rtl8192_tx_ring *ring = NULL; struct sk_buff *skb = NULL; struct cb_desc *tcb_desc = NULL; unsigned long flags = 0; switch (priv->rtllib->ps) { case RTLLIB_PS_DISABLED: ResetThreshold = NIC_SEND_HANG_THRESHOLD_NORMAL; break; case (RTLLIB_PS_MBCAST|RTLLIB_PS_UNICAST): ResetThreshold = NIC_SEND_HANG_THRESHOLD_POWERSAVE; break; default: ResetThreshold = NIC_SEND_HANG_THRESHOLD_POWERSAVE; break; } spin_lock_irqsave(&priv->irq_th_lock, flags); for (QueueID = 0; QueueID < MAX_TX_QUEUE; QueueID++) { if (QueueID == TXCMD_QUEUE) continue; if (QueueID == BEACON_QUEUE) continue; ring = &priv->tx_ring[QueueID]; if (skb_queue_len(&ring->queue) == 0) { continue; } else { skb = (&ring->queue)->next; tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->nStuckCount++; bCheckFwTxCnt = true; if (tcb_desc->nStuckCount > 1) printk(KERN_INFO "%s: QueueID=%d tcb_desc->n" "StuckCount=%d\n", __func__, QueueID, tcb_desc->nStuckCount); } } spin_unlock_irqrestore(&priv->irq_th_lock, flags); if (bCheckFwTxCnt) { if (priv->ops->TxCheckStuckHandler(dev)) { RT_TRACE(COMP_RESET, "TxCheckStuck(): Fw indicates no" " Tx condition!\n"); return RESET_TYPE_SILENT; } } return RESET_TYPE_NORESET; } static enum reset_type rtl819x_RxCheckStuck(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->ops->RxCheckStuckHandler(dev)) { RT_TRACE(COMP_RESET, "RxStuck Condition\n"); return RESET_TYPE_SILENT; } return RESET_TYPE_NORESET; } static enum reset_type rtl819x_ifcheck_resetornot(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); enum reset_type TxResetType = RESET_TYPE_NORESET; enum reset_type RxResetType = RESET_TYPE_NORESET; enum rt_rf_power_state rfState; rfState = priv->rtllib->eRFPowerState; if (rfState == eRfOn) TxResetType = rtl819x_TxCheckStuck(dev); if (rfState == eRfOn && (priv->rtllib->iw_mode == IW_MODE_INFRA) && (priv->rtllib->state == RTLLIB_LINKED)) RxResetType = rtl819x_RxCheckStuck(dev); if (TxResetType == RESET_TYPE_NORMAL || RxResetType == RESET_TYPE_NORMAL) { printk(KERN_INFO "%s(): TxResetType is %d, RxResetType is %d\n", __func__, TxResetType, RxResetType); return RESET_TYPE_NORMAL; } else if (TxResetType == RESET_TYPE_SILENT || RxResetType == RESET_TYPE_SILENT) { printk(KERN_INFO "%s(): TxResetType is %d, RxResetType is %d\n", __func__, TxResetType, RxResetType); return RESET_TYPE_SILENT; } else { return RESET_TYPE_NORESET; } } static void rtl819x_silentreset_mesh_bk(struct net_device *dev, u8 IsPortal) { } static void rtl819x_ifsilentreset(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); u8 reset_times = 0; int reset_status = 0; struct rtllib_device *ieee = priv->rtllib; unsigned long flag; u8 IsPortal = 0; if (priv->ResetProgress == RESET_TYPE_NORESET) { RT_TRACE(COMP_RESET, "=========>Reset progress!!\n"); priv->ResetProgress = RESET_TYPE_SILENT; spin_lock_irqsave(&priv->rf_ps_lock, flag); if (priv->RFChangeInProgress) { spin_unlock_irqrestore(&priv->rf_ps_lock, flag); goto END; } priv->RFChangeInProgress = true; priv->bResetInProgress = true; spin_unlock_irqrestore(&priv->rf_ps_lock, flag); RESET_START: down(&priv->wx_sem); if (priv->rtllib->state == RTLLIB_LINKED) LeisurePSLeave(dev); if (IS_NIC_DOWN(priv)) { RT_TRACE(COMP_ERR, "%s():the driver is not up! " "return\n", __func__); up(&priv->wx_sem); return ; } priv->up = 0; RT_TRACE(COMP_RESET, "%s():======>start to down the driver\n", __func__); mdelay(1000); RT_TRACE(COMP_RESET, "%s():111111111111111111111111======>start" " to down the driver\n", __func__); if (!netif_queue_stopped(dev)) netif_stop_queue(dev); rtl8192_irq_disable(dev); del_timer_sync(&priv->watch_dog_timer); rtl8192_cancel_deferred_work(priv); deinit_hal_dm(dev); rtllib_stop_scan_syncro(ieee); if (ieee->state == RTLLIB_LINKED) { SEM_DOWN_IEEE_WX(&ieee->wx_sem); printk(KERN_INFO "ieee->state is RTLLIB_LINKED\n"); rtllib_stop_send_beacons(priv->rtllib); del_timer_sync(&ieee->associate_timer); cancel_delayed_work(&ieee->associate_retry_wq); rtllib_stop_scan(ieee); netif_carrier_off(dev); SEM_UP_IEEE_WX(&ieee->wx_sem); } else { printk(KERN_INFO "ieee->state is NOT LINKED\n"); rtllib_softmac_stop_protocol(priv->rtllib, 0 , true); } dm_backup_dynamic_mechanism_state(dev); up(&priv->wx_sem); RT_TRACE(COMP_RESET, "%s():<==========down process is " "finished\n", __func__); RT_TRACE(COMP_RESET, "%s():<===========up process start\n", __func__); reset_status = _rtl8192_up(dev, true); RT_TRACE(COMP_RESET, "%s():<===========up process is " "finished\n", __func__); if (reset_status == -1) { if (reset_times < 3) { reset_times++; goto RESET_START; } else { RT_TRACE(COMP_ERR, " ERR!!! %s(): Reset " "Failed!!\n", __func__); } } ieee->is_silent_reset = 1; spin_lock_irqsave(&priv->rf_ps_lock, flag); priv->RFChangeInProgress = false; spin_unlock_irqrestore(&priv->rf_ps_lock, flag); EnableHWSecurityConfig8192(dev); if (ieee->state == RTLLIB_LINKED && ieee->iw_mode == IW_MODE_INFRA) { ieee->set_chan(ieee->dev, ieee->current_network.channel); queue_work_rsl(ieee->wq, &ieee->associate_complete_wq); } else if (ieee->state == RTLLIB_LINKED && ieee->iw_mode == IW_MODE_ADHOC) { ieee->set_chan(ieee->dev, ieee->current_network.channel); ieee->link_change(ieee->dev); notify_wx_assoc_event(ieee); rtllib_start_send_beacons(ieee); if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); netif_carrier_on(ieee->dev); } else if (ieee->iw_mode == IW_MODE_MESH) { rtl819x_silentreset_mesh_bk(dev, IsPortal); } CamRestoreAllEntry(dev); dm_restore_dynamic_mechanism_state(dev); END: priv->ResetProgress = RESET_TYPE_NORESET; priv->reset_count++; priv->bForcedSilentReset = false; priv->bResetInProgress = false; write_nic_byte(dev, UFWP, 1); RT_TRACE(COMP_RESET, "Reset finished!! ====>[%d]\n", priv->reset_count); } } static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum, u32 *TotalRxDataNum) { u16 SlotIndex; u8 i; *TotalRxBcnNum = 0; *TotalRxDataNum = 0; SlotIndex = (priv->rtllib->LinkDetectInfo.SlotIndex++) % (priv->rtllib->LinkDetectInfo.SlotNum); priv->rtllib->LinkDetectInfo.RxBcnNum[SlotIndex] = priv->rtllib->LinkDetectInfo.NumRecvBcnInPeriod; priv->rtllib->LinkDetectInfo.RxDataNum[SlotIndex] = priv->rtllib->LinkDetectInfo.NumRecvDataInPeriod; for (i = 0; i < priv->rtllib->LinkDetectInfo.SlotNum; i++) { *TotalRxBcnNum += priv->rtllib->LinkDetectInfo.RxBcnNum[i]; *TotalRxDataNum += priv->rtllib->LinkDetectInfo.RxDataNum[i]; } } void rtl819x_watchdog_wqcallback(void *data) { struct r8192_priv *priv = container_of_dwork_rsl(data, struct r8192_priv, watch_dog_wq); struct net_device *dev = priv->rtllib->dev; struct rtllib_device *ieee = priv->rtllib; enum reset_type ResetType = RESET_TYPE_NORESET; static u8 check_reset_cnt; unsigned long flags; struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) (&(priv->rtllib->PowerSaveControl)); bool bBusyTraffic = false; bool bHigherBusyTraffic = false; bool bHigherBusyRxTraffic = false; bool bEnterPS = false; if (IS_NIC_DOWN(priv) || (priv->bHwRadioOff == true)) return; if (priv->rtllib->state >= RTLLIB_LINKED) { if (priv->rtllib->CntAfterLink < 2) priv->rtllib->CntAfterLink++; } else { priv->rtllib->CntAfterLink = 0; } hal_dm_watchdog(dev); if (rtllib_act_scanning(priv->rtllib, false) == false) { if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->state == RTLLIB_NOLINK) && (ieee->eRFPowerState == eRfOn) && !ieee->is_set_key && (!ieee->proto_stoppping) && !ieee->wx_set_enc) { if ((ieee->PowerSaveControl.ReturnPoint == IPS_CALLBACK_NONE) && (!ieee->bNetPromiscuousMode)) { RT_TRACE(COMP_PS, "====================>haha: " "IPSEnter()\n"); IPSEnter(dev); } } } if ((ieee->state == RTLLIB_LINKED) && (ieee->iw_mode == IW_MODE_INFRA) && (!ieee->bNetPromiscuousMode)) { if (ieee->LinkDetectInfo.NumRxOkInPeriod > 100 || ieee->LinkDetectInfo.NumTxOkInPeriod > 100) bBusyTraffic = true; if (ieee->LinkDetectInfo.NumRxOkInPeriod > 4000 || ieee->LinkDetectInfo.NumTxOkInPeriod > 4000) { bHigherBusyTraffic = true; if (ieee->LinkDetectInfo.NumRxOkInPeriod > 5000) bHigherBusyRxTraffic = true; else bHigherBusyRxTraffic = false; } if (((ieee->LinkDetectInfo.NumRxUnicastOkInPeriod + ieee->LinkDetectInfo.NumTxOkInPeriod) > 8) || (ieee->LinkDetectInfo.NumRxUnicastOkInPeriod > 2)) bEnterPS = false; else bEnterPS = true; if (ieee->current_network.beacon_interval < 95) bEnterPS = false; if (bEnterPS) LeisurePSEnter(dev); else LeisurePSLeave(dev); } else { RT_TRACE(COMP_LPS, "====>no link LPS leave\n"); LeisurePSLeave(dev); } ieee->LinkDetectInfo.NumRxOkInPeriod = 0; ieee->LinkDetectInfo.NumTxOkInPeriod = 0; ieee->LinkDetectInfo.NumRxUnicastOkInPeriod = 0; ieee->LinkDetectInfo.bBusyTraffic = bBusyTraffic; ieee->LinkDetectInfo.bHigherBusyTraffic = bHigherBusyTraffic; ieee->LinkDetectInfo.bHigherBusyRxTraffic = bHigherBusyRxTraffic; if (ieee->state == RTLLIB_LINKED && ieee->iw_mode == IW_MODE_INFRA) { u32 TotalRxBcnNum = 0; u32 TotalRxDataNum = 0; rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum); if ((TotalRxBcnNum+TotalRxDataNum) == 0) priv->check_roaming_cnt++; else priv->check_roaming_cnt = 0; if (priv->check_roaming_cnt > 0) { if (ieee->eRFPowerState == eRfOff) RT_TRACE(COMP_ERR, "========>%s()\n", __func__); printk(KERN_INFO "===>%s(): AP is power off, chan:%d," " connect another one\n", __func__, priv->chan); ieee->state = RTLLIB_ASSOCIATING; RemovePeerTS(priv->rtllib, priv->rtllib->current_network.bssid); ieee->is_roaming = true; ieee->is_set_key = false; ieee->link_change(dev); if (ieee->LedControlHandler) ieee->LedControlHandler(ieee->dev, LED_CTL_START_TO_LINK); notify_wx_assoc_event(ieee); if (!(ieee->rtllib_ap_sec_type(ieee) & (SEC_ALG_CCMP|SEC_ALG_TKIP))) queue_delayed_work_rsl(ieee->wq, &ieee->associate_procedure_wq, 0); priv->check_roaming_cnt = 0; } ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0; ieee->LinkDetectInfo.NumRecvDataInPeriod = 0; } spin_lock_irqsave(&priv->tx_lock, flags); if ((check_reset_cnt++ >= 3) && (!ieee->is_roaming) && (!priv->RFChangeInProgress) && (!pPSC->bSwRfProcessing)) { ResetType = rtl819x_ifcheck_resetornot(dev); check_reset_cnt = 3; } spin_unlock_irqrestore(&priv->tx_lock, flags); if (!priv->bDisableNormalResetCheck && ResetType == RESET_TYPE_NORMAL) { priv->ResetProgress = RESET_TYPE_NORMAL; RT_TRACE(COMP_RESET, "%s(): NOMAL RESET\n", __func__); return; } if (((priv->force_reset) || (!priv->bDisableNormalResetCheck && ResetType == RESET_TYPE_SILENT))) rtl819x_ifsilentreset(dev); priv->force_reset = false; priv->bForcedSilentReset = false; priv->bResetInProgress = false; RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n"); } void watch_dog_timer_callback(unsigned long data) { struct r8192_priv *priv = rtllib_priv((struct net_device *)data); queue_delayed_work_rsl(priv->priv_wq, &priv->watch_dog_wq, 0); mod_timer(&priv->watch_dog_timer, jiffies + MSECS(RTLLIB_WATCH_DOG_TIME)); } /**************************************************************************** ---------------------------- NIC TX/RX STUFF--------------------------- *****************************************************************************/ void rtl8192_rx_enable(struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); priv->ops->rx_enable(dev); } void rtl8192_tx_enable(struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); priv->ops->tx_enable(dev); rtllib_reset_queue(priv->rtllib); } static void rtl8192_free_rx_ring(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); int i, rx_queue_idx; for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE; rx_queue_idx++) { for (i = 0; i < priv->rxringcount; i++) { struct sk_buff *skb = priv->rx_buf[rx_queue_idx][i]; if (!skb) continue; pci_unmap_single(priv->pdev, *((dma_addr_t *)skb->cb), priv->rxbuffersize, PCI_DMA_FROMDEVICE); kfree_skb(skb); } pci_free_consistent(priv->pdev, sizeof(*priv->rx_ring[rx_queue_idx]) * priv->rxringcount, priv->rx_ring[rx_queue_idx], priv->rx_ring_dma[rx_queue_idx]); priv->rx_ring[rx_queue_idx] = NULL; } } static void rtl8192_free_tx_ring(struct net_device *dev, unsigned int prio) { struct r8192_priv *priv = rtllib_priv(dev); struct rtl8192_tx_ring *ring = &priv->tx_ring[prio]; while (skb_queue_len(&ring->queue)) { struct tx_desc *entry = &ring->desc[ring->idx]; struct sk_buff *skb = __skb_dequeue(&ring->queue); pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr), skb->len, PCI_DMA_TODEVICE); kfree_skb(skb); ring->idx = (ring->idx + 1) % ring->entries; } pci_free_consistent(priv->pdev, sizeof(*ring->desc)*ring->entries, ring->desc, ring->dma); ring->desc = NULL; } void rtl8192_data_hard_stop(struct net_device *dev) { } void rtl8192_data_hard_resume(struct net_device *dev) { } void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, int rate) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); int ret; struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); u8 queue_index = tcb_desc->queue_index; if ((priv->rtllib->eRFPowerState == eRfOff) || IS_NIC_DOWN(priv) || priv->bResetInProgress) { kfree_skb(skb); return; } assert(queue_index != TXCMD_QUEUE); memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); skb_push(skb, priv->rtllib->tx_headroom); ret = rtl8192_tx(dev, skb); if (ret != 0) { kfree_skb(skb); }; if (queue_index != MGNT_QUEUE) { priv->rtllib->stats.tx_bytes += (skb->len - priv->rtllib->tx_headroom); priv->rtllib->stats.tx_packets++; } return; } int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); int ret; struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); u8 queue_index = tcb_desc->queue_index; if (queue_index != TXCMD_QUEUE) { if ((priv->rtllib->eRFPowerState == eRfOff) || IS_NIC_DOWN(priv) || priv->bResetInProgress) { kfree_skb(skb); return 0; } } memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); if (queue_index == TXCMD_QUEUE) { rtl8192_tx_cmd(dev, skb); ret = 0; return ret; } else { tcb_desc->RATRIndex = 7; tcb_desc->bTxDisableRateFallBack = 1; tcb_desc->bTxUseDriverAssingedRate = 1; tcb_desc->bTxEnableFwCalcDur = 1; skb_push(skb, priv->rtllib->tx_headroom); ret = rtl8192_tx(dev, skb); if (ret != 0) { kfree_skb(skb); }; } return ret; } static void rtl8192_tx_isr(struct net_device *dev, int prio) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); struct rtl8192_tx_ring *ring = &priv->tx_ring[prio]; while (skb_queue_len(&ring->queue)) { struct tx_desc *entry = &ring->desc[ring->idx]; struct sk_buff *skb; if (prio != BEACON_QUEUE) { if (entry->OWN) return; ring->idx = (ring->idx + 1) % ring->entries; } skb = __skb_dequeue(&ring->queue); pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr), skb->len, PCI_DMA_TODEVICE); kfree_skb(skb); } if (prio != BEACON_QUEUE) tasklet_schedule(&priv->irq_tx_tasklet); } void rtl8192_tx_cmd(struct net_device *dev, struct sk_buff *skb) { struct r8192_priv *priv = rtllib_priv(dev); struct rtl8192_tx_ring *ring; struct tx_desc_cmd *entry; unsigned int idx; struct cb_desc *tcb_desc; unsigned long flags; spin_lock_irqsave(&priv->irq_th_lock, flags); ring = &priv->tx_ring[TXCMD_QUEUE]; idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries; entry = (struct tx_desc_cmd *) &ring->desc[idx]; tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); priv->ops->tx_fill_cmd_descriptor(dev, entry, tcb_desc, skb); __skb_queue_tail(&ring->queue, skb); spin_unlock_irqrestore(&priv->irq_th_lock, flags); return; } short rtl8192_tx(struct net_device *dev, struct sk_buff *skb) { struct r8192_priv *priv = rtllib_priv(dev); struct rtl8192_tx_ring *ring; unsigned long flags; struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); struct tx_desc *pdesc = NULL; struct rtllib_hdr_1addr *header = NULL; u16 fc = 0, type = 0, stype = 0; bool multi_addr = false, broad_addr = false, uni_addr = false; u8 *pda_addr = NULL; int idx; u32 fwinfo_size = 0; if (priv->bdisable_nic) { RT_TRACE(COMP_ERR, "%s: ERR!! Nic is disabled! Can't tx packet" " len=%d qidx=%d!!!\n", __func__, skb->len, tcb_desc->queue_index); return skb->len; } priv->rtllib->bAwakePktSent = true; fwinfo_size = sizeof(struct tx_fwinfo_8190pci); header = (struct rtllib_hdr_1addr *)(((u8 *)skb->data) + fwinfo_size); fc = header->frame_ctl; type = WLAN_FC_GET_TYPE(fc); stype = WLAN_FC_GET_STYPE(fc); pda_addr = header->addr1; if (is_broadcast_ether_addr(pda_addr)) broad_addr = true; else if (is_multicast_ether_addr(pda_addr)) multi_addr = true; else uni_addr = true; if (uni_addr) priv->stats.txbytesunicast += skb->len - fwinfo_size; else if (multi_addr) priv->stats.txbytesmulticast += skb->len - fwinfo_size; else priv->stats.txbytesbroadcast += skb->len - fwinfo_size; spin_lock_irqsave(&priv->irq_th_lock, flags); ring = &priv->tx_ring[tcb_desc->queue_index]; if (tcb_desc->queue_index != BEACON_QUEUE) idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries; else idx = 0; pdesc = &ring->desc[idx]; if ((pdesc->OWN == 1) && (tcb_desc->queue_index != BEACON_QUEUE)) { RT_TRACE(COMP_ERR, "No more TX desc@%d, ring->idx = %d, idx = " "%d, skblen = 0x%x queuelen=%d", tcb_desc->queue_index, ring->idx, idx, skb->len, skb_queue_len(&ring->queue)); spin_unlock_irqrestore(&priv->irq_th_lock, flags); return skb->len; } if (type == RTLLIB_FTYPE_DATA) { if (priv->rtllib->LedControlHandler) priv->rtllib->LedControlHandler(dev, LED_CTL_TX); } priv->ops->tx_fill_descriptor(dev, pdesc, tcb_desc, skb); __skb_queue_tail(&ring->queue, skb); pdesc->OWN = 1; spin_unlock_irqrestore(&priv->irq_th_lock, flags); dev->trans_start = jiffies; write_nic_word(dev, TPPoll, 0x01 << tcb_desc->queue_index); return 0; } static short rtl8192_alloc_rx_desc_ring(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); struct rx_desc *entry = NULL; int i, rx_queue_idx; for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE; rx_queue_idx++) { priv->rx_ring[rx_queue_idx] = pci_alloc_consistent(priv->pdev, sizeof(*priv->rx_ring[rx_queue_idx]) * priv->rxringcount, &priv->rx_ring_dma[rx_queue_idx]); if (!priv->rx_ring[rx_queue_idx] || (unsigned long)priv->rx_ring[rx_queue_idx] & 0xFF) { RT_TRACE(COMP_ERR, "Cannot allocate RX ring\n"); return -ENOMEM; } memset(priv->rx_ring[rx_queue_idx], 0, sizeof(*priv->rx_ring[rx_queue_idx]) * priv->rxringcount); priv->rx_idx[rx_queue_idx] = 0; for (i = 0; i < priv->rxringcount; i++) { struct sk_buff *skb = dev_alloc_skb(priv->rxbuffersize); dma_addr_t *mapping; entry = &priv->rx_ring[rx_queue_idx][i]; if (!skb) return 0; skb->dev = dev; priv->rx_buf[rx_queue_idx][i] = skb; mapping = (dma_addr_t *)skb->cb; *mapping = pci_map_single(priv->pdev, skb_tail_pointer_rsl(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(priv->pdev, *mapping)) { dev_kfree_skb_any(skb); return -1; } entry->BufferAddress = cpu_to_le32(*mapping); entry->Length = priv->rxbuffersize; entry->OWN = 1; } if(entry) entry->EOR = 1; } return 0; } static int rtl8192_alloc_tx_desc_ring(struct net_device *dev, unsigned int prio, unsigned int entries) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); struct tx_desc *ring; dma_addr_t dma; int i; ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma); if (!ring || (unsigned long)ring & 0xFF) { RT_TRACE(COMP_ERR, "Cannot allocate TX ring (prio = %d)\n", prio); return -ENOMEM; } memset(ring, 0, sizeof(*ring)*entries); priv->tx_ring[prio].desc = ring; priv->tx_ring[prio].dma = dma; priv->tx_ring[prio].idx = 0; priv->tx_ring[prio].entries = entries; skb_queue_head_init(&priv->tx_ring[prio].queue); for (i = 0; i < entries; i++) ring[i].NextDescAddress = cpu_to_le32((u32)dma + ((i + 1) % entries) * sizeof(*ring)); return 0; } short rtl8192_pci_initdescring(struct net_device *dev) { u32 ret; int i; struct r8192_priv *priv = rtllib_priv(dev); ret = rtl8192_alloc_rx_desc_ring(dev); if (ret) return ret; for (i = 0; i < MAX_TX_QUEUE_COUNT; i++) { ret = rtl8192_alloc_tx_desc_ring(dev, i, priv->txringcount); if (ret) goto err_free_rings; } return 0; err_free_rings: rtl8192_free_rx_ring(dev); for (i = 0; i < MAX_TX_QUEUE_COUNT; i++) if (priv->tx_ring[i].desc) rtl8192_free_tx_ring(dev, i); return 1; } void rtl8192_pci_resetdescring(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); int i, rx_queue_idx; unsigned long flags = 0; for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE; rx_queue_idx++) { if (priv->rx_ring[rx_queue_idx]) { struct rx_desc *entry = NULL; for (i = 0; i < priv->rxringcount; i++) { entry = &priv->rx_ring[rx_queue_idx][i]; entry->OWN = 1; } priv->rx_idx[rx_queue_idx] = 0; } } spin_lock_irqsave(&priv->irq_th_lock, flags); for (i = 0; i < MAX_TX_QUEUE_COUNT; i++) { if (priv->tx_ring[i].desc) { struct rtl8192_tx_ring *ring = &priv->tx_ring[i]; while (skb_queue_len(&ring->queue)) { struct tx_desc *entry = &ring->desc[ring->idx]; struct sk_buff *skb = __skb_dequeue(&ring->queue); pci_unmap_single(priv->pdev, le32_to_cpu(entry->TxBuffAddr), skb->len, PCI_DMA_TODEVICE); kfree_skb(skb); ring->idx = (ring->idx + 1) % ring->entries; } ring->idx = 0; } } spin_unlock_irqrestore(&priv->irq_th_lock, flags); } void rtl819x_UpdateRxPktTimeStamp(struct net_device *dev, struct rtllib_rx_stats *stats) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); if (stats->bIsAMPDU && !stats->bFirstMPDU) stats->mac_time = priv->LastRxDescTSF; else priv->LastRxDescTSF = stats->mac_time; } long rtl819x_translate_todbm(struct r8192_priv *priv, u8 signal_strength_index) { long signal_power; signal_power = (long)((signal_strength_index + 1) >> 1); signal_power -= 95; return signal_power; } void rtl819x_update_rxsignalstatistics8190pci( struct r8192_priv *priv, struct rtllib_rx_stats *pprevious_stats ) { int weighting = 0; if (priv->stats.recv_signal_power == 0) priv->stats.recv_signal_power = pprevious_stats->RecvSignalPower; if (pprevious_stats->RecvSignalPower > priv->stats.recv_signal_power) weighting = 5; else if (pprevious_stats->RecvSignalPower < priv->stats.recv_signal_power) weighting = (-5); priv->stats.recv_signal_power = (priv->stats.recv_signal_power * 5 + pprevious_stats->RecvSignalPower + weighting) / 6; } void rtl819x_process_cck_rxpathsel(struct r8192_priv *priv, struct rtllib_rx_stats *pprevious_stats) { } u8 rtl819x_query_rxpwrpercentage(char antpower) { if ((antpower <= -100) || (antpower >= 20)) return 0; else if (antpower >= 0) return 100; else return 100 + antpower; } /* QueryRxPwrPercentage */ u8 rtl819x_evm_dbtopercentage( char value ) { char ret_val; ret_val = value; if (ret_val >= 0) ret_val = 0; if (ret_val <= -33) ret_val = -33; ret_val = 0 - ret_val; ret_val *= 3; if (ret_val == 99) ret_val = 100; return ret_val; } void rtl8192_record_rxdesc_forlateruse( struct rtllib_rx_stats *psrc_stats, struct rtllib_rx_stats *ptarget_stats ) { ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU; ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU; } static void rtl8192_rx_normal(struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); struct rtllib_hdr_1addr *rtllib_hdr = NULL; bool unicast_packet = false; bool bLedBlinking = true; u16 fc = 0, type = 0; u32 skb_len = 0; int rx_queue_idx = RX_MPDU_QUEUE; struct rtllib_rx_stats stats = { .signal = 0, .noise = -98, .rate = 0, .freq = RTLLIB_24GHZ_BAND, }; unsigned int count = priv->rxringcount; stats.nic_type = NIC_8192E; while (count--) { struct rx_desc *pdesc = &priv->rx_ring[rx_queue_idx] [priv->rx_idx[rx_queue_idx]]; struct sk_buff *skb = priv->rx_buf[rx_queue_idx] [priv->rx_idx[rx_queue_idx]]; if (pdesc->OWN) { return; } else { struct sk_buff *new_skb; if (!priv->ops->rx_query_status_descriptor(dev, &stats, pdesc, skb)) goto done; new_skb = dev_alloc_skb(priv->rxbuffersize); /* if allocation of new skb failed - drop current packet * and reuse skb */ if (unlikely(!new_skb)) goto done; pci_unmap_single(priv->pdev, *((dma_addr_t *)skb->cb), priv->rxbuffersize, PCI_DMA_FROMDEVICE); skb_put(skb, pdesc->Length); skb_reserve(skb, stats.RxDrvInfoSize + stats.RxBufShift); skb_trim(skb, skb->len - 4/*sCrcLng*/); rtllib_hdr = (struct rtllib_hdr_1addr *)skb->data; if (!is_multicast_ether_addr(rtllib_hdr->addr1)) { /* unicast packet */ unicast_packet = true; } fc = le16_to_cpu(rtllib_hdr->frame_ctl); type = WLAN_FC_GET_TYPE(fc); if (type == RTLLIB_FTYPE_MGMT) bLedBlinking = false; if (bLedBlinking) if (priv->rtllib->LedControlHandler) priv->rtllib->LedControlHandler(dev, LED_CTL_RX); if (stats.bCRC) { if (type != RTLLIB_FTYPE_MGMT) priv->stats.rxdatacrcerr++; else priv->stats.rxmgmtcrcerr++; } skb_len = skb->len; if (!rtllib_rx(priv->rtllib, skb, &stats)) { dev_kfree_skb_any(skb); } else { priv->stats.rxok++; if (unicast_packet) priv->stats.rxbytesunicast += skb_len; } skb = new_skb; skb->dev = dev; priv->rx_buf[rx_queue_idx][priv->rx_idx[rx_queue_idx]] = skb; *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer_rsl(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(priv->pdev, *((dma_addr_t *)skb->cb))) { dev_kfree_skb_any(skb); return; } } done: pdesc->BufferAddress = cpu_to_le32(*((dma_addr_t *)skb->cb)); pdesc->OWN = 1; pdesc->Length = priv->rxbuffersize; if (priv->rx_idx[rx_queue_idx] == priv->rxringcount-1) pdesc->EOR = 1; priv->rx_idx[rx_queue_idx] = (priv->rx_idx[rx_queue_idx] + 1) % priv->rxringcount; } } static void rtl8192_rx_cmd(struct net_device *dev) { } static void rtl8192_tx_resume(struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); struct rtllib_device *ieee = priv->rtllib; struct sk_buff *skb; int queue_index; for (queue_index = BK_QUEUE; queue_index < MAX_QUEUE_SIZE; queue_index++) { while ((!skb_queue_empty(&ieee->skb_waitQ[queue_index])) && (priv->rtllib->check_nic_enough_desc(dev, queue_index) > 0)) { skb = skb_dequeue(&ieee->skb_waitQ[queue_index]); ieee->softmac_data_hard_start_xmit(skb, dev, 0); } } } void rtl8192_irq_tx_tasklet(struct r8192_priv *priv) { rtl8192_tx_resume(priv->rtllib->dev); } void rtl8192_irq_rx_tasklet(struct r8192_priv *priv) { rtl8192_rx_normal(priv->rtllib->dev); if (MAX_RX_QUEUE > 1) rtl8192_rx_cmd(priv->rtllib->dev); write_nic_dword(priv->rtllib->dev, INTA_MASK, read_nic_dword(priv->rtllib->dev, INTA_MASK) | IMR_RDU); } /**************************************************************************** ---------------------------- NIC START/CLOSE STUFF--------------------------- *****************************************************************************/ void rtl8192_cancel_deferred_work(struct r8192_priv *priv) { cancel_delayed_work(&priv->watch_dog_wq); cancel_delayed_work(&priv->update_beacon_wq); cancel_delayed_work(&priv->rtllib->hw_sleep_wq); cancel_work_sync(&priv->reset_wq); cancel_work_sync(&priv->qos_activate); } int _rtl8192_up(struct net_device *dev, bool is_silent_reset) { if (_rtl8192_sta_up(dev, is_silent_reset) == -1) return -1; return 0; } static int rtl8192_open(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); int ret; down(&priv->wx_sem); ret = rtl8192_up(dev); up(&priv->wx_sem); return ret; } int rtl8192_up(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->up == 1) return -1; return _rtl8192_up(dev, false); } static int rtl8192_close(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); int ret; if ((rtllib_act_scanning(priv->rtllib, false)) && !(priv->rtllib->softmac_features & IEEE_SOFTMAC_SCAN)) { rtllib_stop_scan(priv->rtllib); } down(&priv->wx_sem); ret = rtl8192_down(dev, true); up(&priv->wx_sem); return ret; } int rtl8192_down(struct net_device *dev, bool shutdownrf) { if (rtl8192_sta_down(dev, shutdownrf) == -1) return -1; return 0; } void rtl8192_commit(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->up == 0) return; rtllib_softmac_stop_protocol(priv->rtllib, 0 , true); rtl8192_irq_disable(dev); priv->ops->stop_adapter(dev, true); _rtl8192_up(dev, false); } void rtl8192_restart(void *data) { struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv, reset_wq); struct net_device *dev = priv->rtllib->dev; down(&priv->wx_sem); rtl8192_commit(dev); up(&priv->wx_sem); } static void r8192_set_multicast(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); short promisc; promisc = (dev->flags & IFF_PROMISC) ? 1 : 0; priv->promisc = promisc; } static int r8192_set_mac_adr(struct net_device *dev, void *mac) { struct r8192_priv *priv = rtllib_priv(dev); struct sockaddr *addr = mac; down(&priv->wx_sem); memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); schedule_work(&priv->reset_wq); up(&priv->wx_sem); return 0; } /* based on ipw2200 driver */ static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); struct iwreq *wrq = (struct iwreq *)rq; int ret = -1; struct rtllib_device *ieee = priv->rtllib; u32 key[4]; u8 broadcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; u8 zero_addr[6] = {0}; struct iw_point *p = &wrq->u.data; struct ieee_param *ipw = NULL; down(&priv->wx_sem); switch (cmd) { case RTL_IOCTL_WPA_SUPPLICANT: if (p->length < sizeof(struct ieee_param) || !p->pointer) { ret = -EINVAL; goto out; } ipw = kmalloc(p->length, GFP_KERNEL); if (ipw == NULL) { ret = -ENOMEM; goto out; } if (copy_from_user(ipw, p->pointer, p->length)) { kfree(ipw); ret = -EFAULT; goto out; } if (ipw->cmd == IEEE_CMD_SET_ENCRYPTION) { if (ipw->u.crypt.set_tx) { if (strcmp(ipw->u.crypt.alg, "CCMP") == 0) ieee->pairwise_key_type = KEY_TYPE_CCMP; else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0) ieee->pairwise_key_type = KEY_TYPE_TKIP; else if (strcmp(ipw->u.crypt.alg, "WEP") == 0) { if (ipw->u.crypt.key_len == 13) ieee->pairwise_key_type = KEY_TYPE_WEP104; else if (ipw->u.crypt.key_len == 5) ieee->pairwise_key_type = KEY_TYPE_WEP40; } else { ieee->pairwise_key_type = KEY_TYPE_NA; } if (ieee->pairwise_key_type) { if (memcmp(ieee->ap_mac_addr, zero_addr, 6) == 0) ieee->iw_mode = IW_MODE_ADHOC; memcpy((u8 *)key, ipw->u.crypt.key, 16); EnableHWSecurityConfig8192(dev); set_swcam(dev, 4, ipw->u.crypt.idx, ieee->pairwise_key_type, (u8 *)ieee->ap_mac_addr, 0, key, 0); setKey(dev, 4, ipw->u.crypt.idx, ieee->pairwise_key_type, (u8 *)ieee->ap_mac_addr, 0, key); if (ieee->iw_mode == IW_MODE_ADHOC) { set_swcam(dev, ipw->u.crypt.idx, ipw->u.crypt.idx, ieee->pairwise_key_type, (u8 *)ieee->ap_mac_addr, 0, key, 0); setKey(dev, ipw->u.crypt.idx, ipw->u.crypt.idx, ieee->pairwise_key_type, (u8 *)ieee->ap_mac_addr, 0, key); } } if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) && ieee->pHTInfo->bCurrentHTSupport) { write_nic_byte(dev, 0x173, 1); } } else { memcpy((u8 *)key, ipw->u.crypt.key, 16); if (strcmp(ipw->u.crypt.alg, "CCMP") == 0) ieee->group_key_type = KEY_TYPE_CCMP; else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0) ieee->group_key_type = KEY_TYPE_TKIP; else if (strcmp(ipw->u.crypt.alg, "WEP") == 0) { if (ipw->u.crypt.key_len == 13) ieee->group_key_type = KEY_TYPE_WEP104; else if (ipw->u.crypt.key_len == 5) ieee->group_key_type = KEY_TYPE_WEP40; } else ieee->group_key_type = KEY_TYPE_NA; if (ieee->group_key_type) { set_swcam(dev, ipw->u.crypt.idx, ipw->u.crypt.idx, ieee->group_key_type, broadcast_addr, 0, key, 0); setKey(dev, ipw->u.crypt.idx, ipw->u.crypt.idx, ieee->group_key_type, broadcast_addr, 0, key); } } } ret = rtllib_wpa_supplicant_ioctl(priv->rtllib, &wrq->u.data, 0); kfree(ipw); break; default: ret = -EOPNOTSUPP; break; } out: up(&priv->wx_sem); return ret; } irqreturn_type rtl8192_interrupt(int irq, void *netdev, struct pt_regs *regs) { struct net_device *dev = (struct net_device *) netdev; struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); unsigned long flags; u32 inta; u32 intb; intb = 0; if (priv->irq_enabled == 0) goto done; spin_lock_irqsave(&priv->irq_th_lock, flags); priv->ops->interrupt_recognized(dev, &inta, &intb); priv->stats.shints++; if (!inta) { spin_unlock_irqrestore(&priv->irq_th_lock, flags); goto done; } if (inta == 0xffff) { spin_unlock_irqrestore(&priv->irq_th_lock, flags); goto done; } priv->stats.ints++; if (!netif_running(dev)) { spin_unlock_irqrestore(&priv->irq_th_lock, flags); goto done; } if (inta & IMR_TBDOK) { RT_TRACE(COMP_INTR, "beacon ok interrupt!\n"); priv->stats.txbeaconokint++; } if (inta & IMR_TBDER) { RT_TRACE(COMP_INTR, "beacon ok interrupt!\n"); priv->stats.txbeaconerr++; } if (inta & IMR_BDOK) RT_TRACE(COMP_INTR, "beacon interrupt!\n"); if (inta & IMR_MGNTDOK) { RT_TRACE(COMP_INTR, "Manage ok interrupt!\n"); priv->stats.txmanageokint++; rtl8192_tx_isr(dev, MGNT_QUEUE); spin_unlock_irqrestore(&priv->irq_th_lock, flags); if (priv->rtllib->ack_tx_to_ieee) { if (rtl8192_is_tx_queue_empty(dev)) { priv->rtllib->ack_tx_to_ieee = 0; rtllib_ps_tx_ack(priv->rtllib, 1); } } spin_lock_irqsave(&priv->irq_th_lock, flags); } if (inta & IMR_COMDOK) { priv->stats.txcmdpktokint++; rtl8192_tx_isr(dev, TXCMD_QUEUE); } if (inta & IMR_HIGHDOK) rtl8192_tx_isr(dev, HIGH_QUEUE); if (inta & IMR_ROK) { priv->stats.rxint++; priv->InterruptLog.nIMR_ROK++; tasklet_schedule(&priv->irq_rx_tasklet); } if (inta & IMR_BcnInt) { RT_TRACE(COMP_INTR, "prepare beacon for interrupt!\n"); tasklet_schedule(&priv->irq_prepare_beacon_tasklet); } if (inta & IMR_RDU) { RT_TRACE(COMP_INTR, "rx descriptor unavailable!\n"); priv->stats.rxrdu++; write_nic_dword(dev, INTA_MASK, read_nic_dword(dev, INTA_MASK) & ~IMR_RDU); tasklet_schedule(&priv->irq_rx_tasklet); } if (inta & IMR_RXFOVW) { RT_TRACE(COMP_INTR, "rx overflow !\n"); priv->stats.rxoverflow++; tasklet_schedule(&priv->irq_rx_tasklet); } if (inta & IMR_TXFOVW) priv->stats.txoverflow++; if (inta & IMR_BKDOK) { RT_TRACE(COMP_INTR, "BK Tx OK interrupt!\n"); priv->stats.txbkokint++; priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++; rtl8192_tx_isr(dev, BK_QUEUE); } if (inta & IMR_BEDOK) { RT_TRACE(COMP_INTR, "BE TX OK interrupt!\n"); priv->stats.txbeokint++; priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++; rtl8192_tx_isr(dev, BE_QUEUE); } if (inta & IMR_VIDOK) { RT_TRACE(COMP_INTR, "VI TX OK interrupt!\n"); priv->stats.txviokint++; priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++; rtl8192_tx_isr(dev, VI_QUEUE); } if (inta & IMR_VODOK) { priv->stats.txvookint++; RT_TRACE(COMP_INTR, "Vo TX OK interrupt!\n"); priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++; rtl8192_tx_isr(dev, VO_QUEUE); } spin_unlock_irqrestore(&priv->irq_th_lock, flags); done: return IRQ_HANDLED; } /**************************************************************************** ---------------------------- PCI_STUFF--------------------------- *****************************************************************************/ static const struct net_device_ops rtl8192_netdev_ops = { .ndo_open = rtl8192_open, .ndo_stop = rtl8192_close, .ndo_tx_timeout = rtl8192_tx_timeout, .ndo_do_ioctl = rtl8192_ioctl, .ndo_set_rx_mode = r8192_set_multicast, .ndo_set_mac_address = r8192_set_mac_adr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, .ndo_start_xmit = rtllib_xmit, }; static int rtl8192_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { unsigned long ioaddr = 0; struct net_device *dev = NULL; struct r8192_priv *priv = NULL; struct rtl819x_ops *ops = (struct rtl819x_ops *)(id->driver_data); unsigned long pmem_start, pmem_len, pmem_flags; int err = -ENOMEM; bool bdma64 = false; u8 revision_id; RT_TRACE(COMP_INIT, "Configuring chip resources"); if (pci_enable_device(pdev)) { RT_TRACE(COMP_ERR, "Failed to enable PCI device"); return -EIO; } pci_set_master(pdev); if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_INFO "Unable to obtain 32bit DMA for consistent allocations\n"); goto err_pci_disable; } } dev = alloc_rtllib(sizeof(struct r8192_priv)); if (!dev) goto err_pci_disable; err = -ENODEV; if (bdma64) dev->features |= NETIF_F_HIGHDMA; pci_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); priv = rtllib_priv(dev); priv->rtllib = (struct rtllib_device *)netdev_priv_rsl(dev); priv->pdev = pdev; priv->rtllib->pdev = pdev; if ((pdev->subsystem_vendor == PCI_VENDOR_ID_DLINK) && (pdev->subsystem_device == 0x3304)) priv->rtllib->bSupportRemoteWakeUp = 1; else priv->rtllib->bSupportRemoteWakeUp = 0; pmem_start = pci_resource_start(pdev, 1); pmem_len = pci_resource_len(pdev, 1); pmem_flags = pci_resource_flags(pdev, 1); if (!(pmem_flags & IORESOURCE_MEM)) { RT_TRACE(COMP_ERR, "region #1 not a MMIO resource, aborting"); goto err_rel_rtllib; } printk(KERN_INFO "Memory mapped space start: 0x%08lx\n", pmem_start); if (!request_mem_region(pmem_start, pmem_len, DRV_NAME)) { RT_TRACE(COMP_ERR, "request_mem_region failed!"); goto err_rel_rtllib; } ioaddr = (unsigned long)ioremap_nocache(pmem_start, pmem_len); if (ioaddr == (unsigned long)NULL) { RT_TRACE(COMP_ERR, "ioremap failed!"); goto err_rel_mem; } dev->mem_start = ioaddr; dev->mem_end = ioaddr + pci_resource_len(pdev, 0); pci_read_config_byte(pdev, 0x08, &revision_id); /* If the revisionid is 0x10, the device uses rtl8192se. */ if (pdev->device == 0x8192 && revision_id == 0x10) goto err_rel_mem; priv->ops = ops; if (rtl8192_pci_findadapter(pdev, dev) == false) goto err_rel_mem; dev->irq = pdev->irq; priv->irq = 0; dev->netdev_ops = &rtl8192_netdev_ops; dev->wireless_handlers = (struct iw_handler_def *) &r8192_wx_handlers_def; dev->ethtool_ops = &rtl819x_ethtool_ops; dev->type = ARPHRD_ETHER; dev->watchdog_timeo = HZ * 3; if (dev_alloc_name(dev, ifname) < 0) { RT_TRACE(COMP_INIT, "Oops: devname already taken! Trying " "wlan%%d...\n"); dev_alloc_name(dev, ifname); } RT_TRACE(COMP_INIT, "Driver probe completed1\n"); if (rtl8192_init(dev) != 0) { RT_TRACE(COMP_ERR, "Initialization failed"); goto err_free_irq; } netif_carrier_off(dev); netif_stop_queue(dev); if (register_netdev(dev)) goto err_free_irq; RT_TRACE(COMP_INIT, "dev name: %s\n", dev->name); if (priv->polling_timer_on == 0) check_rfctrl_gpio_timer((unsigned long)dev); RT_TRACE(COMP_INIT, "Driver probe completed\n"); return 0; err_free_irq: free_irq(dev->irq, dev); priv->irq = 0; err_rel_mem: release_mem_region(pmem_start, pmem_len); err_rel_rtllib: free_rtllib(dev); DMESG("wlan driver load failed\n"); pci_set_drvdata(pdev, NULL); err_pci_disable: pci_disable_device(pdev); return err; } static void rtl8192_pci_disconnect(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct r8192_priv *priv ; u32 i; if (dev) { unregister_netdev(dev); priv = rtllib_priv(dev); del_timer_sync(&priv->gpio_polling_timer); cancel_delayed_work(&priv->gpio_change_rf_wq); priv->polling_timer_on = 0; rtl8192_down(dev, true); deinit_hal_dm(dev); if (priv->pFirmware) { vfree(priv->pFirmware); priv->pFirmware = NULL; } destroy_workqueue(priv->priv_wq); rtl8192_free_rx_ring(dev); for (i = 0; i < MAX_TX_QUEUE_COUNT; i++) rtl8192_free_tx_ring(dev, i); if (priv->irq) { printk(KERN_INFO "Freeing irq %d\n", dev->irq); free_irq(dev->irq, dev); priv->irq = 0; } free_rtllib(dev); kfree(priv->scan_cmd); if (dev->mem_start != 0) { iounmap((void __iomem *)dev->mem_start); release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); } } else { priv = rtllib_priv(dev); } pci_disable_device(pdev); RT_TRACE(COMP_DOWN, "wlan driver removed\n"); } bool NicIFEnableNIC(struct net_device *dev) { bool init_status = true; struct r8192_priv *priv = rtllib_priv(dev); struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) (&(priv->rtllib->PowerSaveControl)); if (IS_NIC_DOWN(priv)) { RT_TRACE(COMP_ERR, "ERR!!! %s(): Driver is already down!\n", __func__); priv->bdisable_nic = false; return RT_STATUS_FAILURE; } RT_TRACE(COMP_PS, "===========>%s()\n", __func__); priv->bfirst_init = true; init_status = priv->ops->initialize_adapter(dev); if (init_status != true) { RT_TRACE(COMP_ERR, "ERR!!! %s(): initialization is failed!\n", __func__); priv->bdisable_nic = false; return -1; } RT_TRACE(COMP_INIT, "start adapter finished\n"); RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC); priv->bfirst_init = false; rtl8192_irq_enable(dev); priv->bdisable_nic = false; RT_TRACE(COMP_PS, "<===========%s()\n", __func__); return init_status; } bool NicIFDisableNIC(struct net_device *dev) { bool status = true; struct r8192_priv *priv = rtllib_priv(dev); u8 tmp_state = 0; RT_TRACE(COMP_PS, "=========>%s()\n", __func__); priv->bdisable_nic = true; tmp_state = priv->rtllib->state; rtllib_softmac_stop_protocol(priv->rtllib, 0, false); priv->rtllib->state = tmp_state; rtl8192_cancel_deferred_work(priv); rtl8192_irq_disable(dev); priv->ops->stop_adapter(dev, false); RT_TRACE(COMP_PS, "<=========%s()\n", __func__); return status; } static int __init rtl8192_pci_module_init(void) { printk(KERN_INFO "\nLinux kernel driver for RTL8192E WLAN cards\n"); printk(KERN_INFO "Copyright (c) 2007-2008, Realsil Wlan Driver\n"); if (0 != pci_register_driver(&rtl8192_pci_driver)) { DMESG("No device found"); /*pci_unregister_driver (&rtl8192_pci_driver);*/ return -ENODEV; } return 0; } static void __exit rtl8192_pci_module_exit(void) { pci_unregister_driver(&rtl8192_pci_driver); RT_TRACE(COMP_DOWN, "Exiting"); } void check_rfctrl_gpio_timer(unsigned long data) { struct r8192_priv *priv = rtllib_priv((struct net_device *)data); priv->polling_timer_on = 1; queue_delayed_work_rsl(priv->priv_wq, &priv->gpio_change_rf_wq, 0); mod_timer(&priv->gpio_polling_timer, jiffies + MSECS(RTLLIB_WATCH_DOG_TIME)); } /*************************************************************************** ------------------- module init / exit stubs ---------------- ****************************************************************************/ module_init(rtl8192_pci_module_init); module_exit(rtl8192_pci_module_exit); MODULE_DESCRIPTION("Linux driver for Realtek RTL819x WiFi cards"); MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(RTL8192E_BOOT_IMG_FW); MODULE_FIRMWARE(RTL8192E_MAIN_IMG_FW); MODULE_FIRMWARE(RTL8192E_DATA_IMG_FW); module_param(ifname, charp, S_IRUGO|S_IWUSR); module_param(hwwep, int, S_IRUGO|S_IWUSR); module_param(channels, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ifname, " Net interface name, wlan%d=default"); MODULE_PARM_DESC(hwwep, " Try to use hardware WEP support(default use hw. set 0 to use software security)"); MODULE_PARM_DESC(channels, " Channel bitmask for specific locales. NYI");
gpl-2.0
NamelessRom/android_kernel_yu_msm8916
drivers/power/max8925_power.c
2156
15110
/* * Battery driver for Maxim MAX8925 * * Copyright (c) 2009-2010 Marvell International Ltd. * Haojian Zhuang <haojian.zhuang@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/mfd/max8925.h> /* registers in GPM */ #define MAX8925_OUT5VEN 0x54 #define MAX8925_OUT3VEN 0x58 #define MAX8925_CHG_CNTL1 0x7c /* bits definition */ #define MAX8925_CHG_STAT_VSYSLOW (1 << 0) #define MAX8925_CHG_STAT_MODE_MASK (3 << 2) #define MAX8925_CHG_STAT_EN_MASK (1 << 4) #define MAX8925_CHG_MBDET (1 << 1) #define MAX8925_CHG_AC_RANGE_MASK (3 << 6) /* registers in ADC */ #define MAX8925_ADC_RES_CNFG1 0x06 #define MAX8925_ADC_AVG_CNFG1 0x07 #define MAX8925_ADC_ACQ_CNFG1 0x08 #define MAX8925_ADC_ACQ_CNFG2 0x09 /* 2 bytes registers in below. MSB is 1st, LSB is 2nd. */ #define MAX8925_ADC_AUX2 0x62 #define MAX8925_ADC_VCHG 0x64 #define MAX8925_ADC_VBBATT 0x66 #define MAX8925_ADC_VMBATT 0x68 #define MAX8925_ADC_ISNS 0x6a #define MAX8925_ADC_THM 0x6c #define MAX8925_ADC_TDIE 0x6e #define MAX8925_CMD_AUX2 0xc8 #define MAX8925_CMD_VCHG 0xd0 #define MAX8925_CMD_VBBATT 0xd8 #define MAX8925_CMD_VMBATT 0xe0 #define MAX8925_CMD_ISNS 0xe8 #define MAX8925_CMD_THM 0xf0 #define MAX8925_CMD_TDIE 0xf8 enum { MEASURE_AUX2, MEASURE_VCHG, MEASURE_VBBATT, MEASURE_VMBATT, MEASURE_ISNS, MEASURE_THM, MEASURE_TDIE, MEASURE_MAX, }; struct max8925_power_info { struct max8925_chip *chip; struct i2c_client *gpm; struct i2c_client *adc; struct power_supply ac; struct power_supply usb; struct power_supply battery; int irq_base; unsigned ac_online:1; unsigned usb_online:1; unsigned bat_online:1; unsigned chg_mode:2; unsigned batt_detect:1; /* detecing MB by ID pin */ unsigned topoff_threshold:2; unsigned fast_charge:3; unsigned no_temp_support:1; unsigned no_insert_detect:1; int (*set_charger) (int); }; static int __set_charger(struct max8925_power_info *info, int enable) { struct max8925_chip *chip = info->chip; if (enable) { /* enable charger in platform */ if (info->set_charger) info->set_charger(1); /* enable charger */ max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 1 << 7, 0); } else { /* disable charge */ max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 1 << 7, 1 << 7); if (info->set_charger) info->set_charger(0); } dev_dbg(chip->dev, "%s\n", (enable) ? "Enable charger" : "Disable charger"); return 0; } static irqreturn_t max8925_charger_handler(int irq, void *data) { struct max8925_power_info *info = (struct max8925_power_info *)data; struct max8925_chip *chip = info->chip; switch (irq - chip->irq_base) { case MAX8925_IRQ_VCHG_DC_R: info->ac_online = 1; __set_charger(info, 1); dev_dbg(chip->dev, "Adapter inserted\n"); break; case MAX8925_IRQ_VCHG_DC_F: info->ac_online = 0; __set_charger(info, 0); dev_dbg(chip->dev, "Adapter removed\n"); break; case MAX8925_IRQ_VCHG_THM_OK_F: /* Battery is not ready yet */ dev_dbg(chip->dev, "Battery temperature is out of range\n"); case MAX8925_IRQ_VCHG_DC_OVP: dev_dbg(chip->dev, "Error detection\n"); __set_charger(info, 0); break; case MAX8925_IRQ_VCHG_THM_OK_R: /* Battery is ready now */ dev_dbg(chip->dev, "Battery temperature is in range\n"); break; case MAX8925_IRQ_VCHG_SYSLOW_R: /* VSYS is low */ dev_info(chip->dev, "Sys power is too low\n"); break; case MAX8925_IRQ_VCHG_SYSLOW_F: dev_dbg(chip->dev, "Sys power is above low threshold\n"); break; case MAX8925_IRQ_VCHG_DONE: __set_charger(info, 0); dev_dbg(chip->dev, "Charging is done\n"); break; case MAX8925_IRQ_VCHG_TOPOFF: dev_dbg(chip->dev, "Charging in top-off mode\n"); break; case MAX8925_IRQ_VCHG_TMR_FAULT: __set_charger(info, 0); dev_dbg(chip->dev, "Safe timer is expired\n"); break; case MAX8925_IRQ_VCHG_RST: __set_charger(info, 0); dev_dbg(chip->dev, "Charger is reset\n"); break; } return IRQ_HANDLED; } static int start_measure(struct max8925_power_info *info, int type) { unsigned char buf[2] = {0, 0}; int meas_cmd; int meas_reg = 0, ret; switch (type) { case MEASURE_VCHG: meas_cmd = MAX8925_CMD_VCHG; meas_reg = MAX8925_ADC_VCHG; break; case MEASURE_VBBATT: meas_cmd = MAX8925_CMD_VBBATT; meas_reg = MAX8925_ADC_VBBATT; break; case MEASURE_VMBATT: meas_cmd = MAX8925_CMD_VMBATT; meas_reg = MAX8925_ADC_VMBATT; break; case MEASURE_ISNS: meas_cmd = MAX8925_CMD_ISNS; meas_reg = MAX8925_ADC_ISNS; break; default: return -EINVAL; } max8925_reg_write(info->adc, meas_cmd, 0); max8925_bulk_read(info->adc, meas_reg, 2, buf); ret = ((buf[0]<<8) | buf[1]) >> 4; return ret; } static int max8925_ac_get_prop(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct max8925_power_info *info = dev_get_drvdata(psy->dev->parent); int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = info->ac_online; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: if (info->ac_online) { ret = start_measure(info, MEASURE_VCHG); if (ret >= 0) { val->intval = ret * 2000; /* unit is uV */ goto out; } } ret = -ENODATA; break; default: ret = -ENODEV; break; } out: return ret; } static enum power_supply_property max8925_ac_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_VOLTAGE_NOW, }; static int max8925_usb_get_prop(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct max8925_power_info *info = dev_get_drvdata(psy->dev->parent); int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = info->usb_online; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: if (info->usb_online) { ret = start_measure(info, MEASURE_VCHG); if (ret >= 0) { val->intval = ret * 2000; /* unit is uV */ goto out; } } ret = -ENODATA; break; default: ret = -ENODEV; break; } out: return ret; } static enum power_supply_property max8925_usb_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_VOLTAGE_NOW, }; static int max8925_bat_get_prop(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct max8925_power_info *info = dev_get_drvdata(psy->dev->parent); int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = info->bat_online; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: if (info->bat_online) { ret = start_measure(info, MEASURE_VMBATT); if (ret >= 0) { val->intval = ret * 2000; /* unit is uV */ ret = 0; break; } } ret = -ENODATA; break; case POWER_SUPPLY_PROP_CURRENT_NOW: if (info->bat_online) { ret = start_measure(info, MEASURE_ISNS); if (ret >= 0) { /* assume r_sns is 0.02 */ ret = ((ret * 6250) - 3125) /* uA */; val->intval = 0; if (ret > 0) val->intval = ret; /* unit is mA */ ret = 0; break; } } ret = -ENODATA; break; case POWER_SUPPLY_PROP_CHARGE_TYPE: if (!info->bat_online) { ret = -ENODATA; break; } ret = max8925_reg_read(info->gpm, MAX8925_CHG_STATUS); ret = (ret & MAX8925_CHG_STAT_MODE_MASK) >> 2; switch (ret) { case 1: val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; break; case 0: case 2: val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; break; case 3: val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE; break; } ret = 0; break; case POWER_SUPPLY_PROP_STATUS: if (!info->bat_online) { ret = -ENODATA; break; } ret = max8925_reg_read(info->gpm, MAX8925_CHG_STATUS); if (info->usb_online || info->ac_online) { val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; if (ret & MAX8925_CHG_STAT_EN_MASK) val->intval = POWER_SUPPLY_STATUS_CHARGING; } else val->intval = POWER_SUPPLY_STATUS_DISCHARGING; ret = 0; break; default: ret = -ENODEV; break; } return ret; } static enum power_supply_property max8925_battery_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_STATUS, }; #define REQUEST_IRQ(_irq, _name) \ do { \ ret = request_threaded_irq(chip->irq_base + _irq, NULL, \ max8925_charger_handler, \ IRQF_ONESHOT, _name, info); \ if (ret) \ dev_err(chip->dev, "Failed to request IRQ #%d: %d\n", \ _irq, ret); \ } while (0) static int max8925_init_charger(struct max8925_chip *chip, struct max8925_power_info *info) { int ret; REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_OVP, "ac-ovp"); if (!info->no_insert_detect) { REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_F, "ac-remove"); REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_R, "ac-insert"); } if (!info->no_temp_support) { REQUEST_IRQ(MAX8925_IRQ_VCHG_THM_OK_R, "batt-temp-in-range"); REQUEST_IRQ(MAX8925_IRQ_VCHG_THM_OK_F, "batt-temp-out-range"); } REQUEST_IRQ(MAX8925_IRQ_VCHG_SYSLOW_F, "vsys-high"); REQUEST_IRQ(MAX8925_IRQ_VCHG_SYSLOW_R, "vsys-low"); REQUEST_IRQ(MAX8925_IRQ_VCHG_RST, "charger-reset"); REQUEST_IRQ(MAX8925_IRQ_VCHG_DONE, "charger-done"); REQUEST_IRQ(MAX8925_IRQ_VCHG_TOPOFF, "charger-topoff"); REQUEST_IRQ(MAX8925_IRQ_VCHG_TMR_FAULT, "charger-timer-expire"); info->usb_online = 0; info->bat_online = 0; /* check for power - can miss interrupt at boot time */ if (start_measure(info, MEASURE_VCHG) * 2000 > 500000) info->ac_online = 1; else info->ac_online = 0; ret = max8925_reg_read(info->gpm, MAX8925_CHG_STATUS); if (ret >= 0) { /* * If battery detection is enabled, ID pin of battery is * connected to MBDET pin of MAX8925. It could be used to * detect battery presence. * Otherwise, we have to assume that battery is always on. */ if (info->batt_detect) info->bat_online = (ret & MAX8925_CHG_MBDET) ? 0 : 1; else info->bat_online = 1; if (ret & MAX8925_CHG_AC_RANGE_MASK) info->ac_online = 1; else info->ac_online = 0; } /* disable charge */ max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 1 << 7, 1 << 7); /* set charging current in charge topoff mode */ max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 3 << 5, info->topoff_threshold << 5); /* set charing current in fast charge mode */ max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 7, info->fast_charge); return 0; } static int max8925_deinit_charger(struct max8925_power_info *info) { struct max8925_chip *chip = info->chip; int irq; irq = chip->irq_base + MAX8925_IRQ_VCHG_DC_OVP; for (; irq <= chip->irq_base + MAX8925_IRQ_VCHG_TMR_FAULT; irq++) free_irq(irq, info); return 0; } #ifdef CONFIG_OF static struct max8925_power_pdata * max8925_power_dt_init(struct platform_device *pdev) { struct device_node *nproot = pdev->dev.parent->of_node; struct device_node *np; int batt_detect; int topoff_threshold; int fast_charge; int no_temp_support; int no_insert_detect; struct max8925_power_pdata *pdata; if (!nproot) return pdev->dev.platform_data; np = of_find_node_by_name(nproot, "charger"); if (!np) { dev_err(&pdev->dev, "failed to find charger node\n"); return NULL; } pdata = devm_kzalloc(&pdev->dev, sizeof(struct max8925_power_pdata), GFP_KERNEL); of_property_read_u32(np, "topoff-threshold", &topoff_threshold); of_property_read_u32(np, "batt-detect", &batt_detect); of_property_read_u32(np, "fast-charge", &fast_charge); of_property_read_u32(np, "no-insert-detect", &no_insert_detect); of_property_read_u32(np, "no-temp-support", &no_temp_support); pdata->batt_detect = batt_detect; pdata->fast_charge = fast_charge; pdata->topoff_threshold = topoff_threshold; pdata->no_insert_detect = no_insert_detect; pdata->no_temp_support = no_temp_support; return pdata; } #else static struct max8925_power_pdata * max8925_power_dt_init(struct platform_device *pdev) { return pdev->dev.platform_data; } #endif static int max8925_power_probe(struct platform_device *pdev) { struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); struct max8925_power_pdata *pdata = NULL; struct max8925_power_info *info; int ret; pdata = max8925_power_dt_init(pdev); if (!pdata) { dev_err(&pdev->dev, "platform data isn't assigned to " "power supply\n"); return -EINVAL; } info = devm_kzalloc(&pdev->dev, sizeof(struct max8925_power_info), GFP_KERNEL); if (!info) return -ENOMEM; info->chip = chip; info->gpm = chip->i2c; info->adc = chip->adc; platform_set_drvdata(pdev, info); info->ac.name = "max8925-ac"; info->ac.type = POWER_SUPPLY_TYPE_MAINS; info->ac.properties = max8925_ac_props; info->ac.num_properties = ARRAY_SIZE(max8925_ac_props); info->ac.get_property = max8925_ac_get_prop; info->ac.supplied_to = pdata->supplied_to; info->ac.num_supplicants = pdata->num_supplicants; ret = power_supply_register(&pdev->dev, &info->ac); if (ret) goto out; info->ac.dev->parent = &pdev->dev; info->usb.name = "max8925-usb"; info->usb.type = POWER_SUPPLY_TYPE_USB; info->usb.properties = max8925_usb_props; info->usb.num_properties = ARRAY_SIZE(max8925_usb_props); info->usb.get_property = max8925_usb_get_prop; info->usb.supplied_to = pdata->supplied_to; info->usb.num_supplicants = pdata->num_supplicants; ret = power_supply_register(&pdev->dev, &info->usb); if (ret) goto out_usb; info->usb.dev->parent = &pdev->dev; info->battery.name = "max8925-battery"; info->battery.type = POWER_SUPPLY_TYPE_BATTERY; info->battery.properties = max8925_battery_props; info->battery.num_properties = ARRAY_SIZE(max8925_battery_props); info->battery.get_property = max8925_bat_get_prop; ret = power_supply_register(&pdev->dev, &info->battery); if (ret) goto out_battery; info->battery.dev->parent = &pdev->dev; info->batt_detect = pdata->batt_detect; info->topoff_threshold = pdata->topoff_threshold; info->fast_charge = pdata->fast_charge; info->set_charger = pdata->set_charger; info->no_temp_support = pdata->no_temp_support; info->no_insert_detect = pdata->no_insert_detect; max8925_init_charger(chip, info); return 0; out_battery: power_supply_unregister(&info->battery); out_usb: power_supply_unregister(&info->ac); out: return ret; } static int max8925_power_remove(struct platform_device *pdev) { struct max8925_power_info *info = platform_get_drvdata(pdev); if (info) { power_supply_unregister(&info->ac); power_supply_unregister(&info->usb); power_supply_unregister(&info->battery); max8925_deinit_charger(info); } return 0; } static struct platform_driver max8925_power_driver = { .probe = max8925_power_probe, .remove = max8925_power_remove, .driver = { .name = "max8925-power", }, }; module_platform_driver(max8925_power_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Power supply driver for MAX8925"); MODULE_ALIAS("platform:max8925-power");
gpl-2.0
crimsonthunder/kernel_samsung_trlte
drivers/iio/accel/hid-sensor-accel-3d.c
2156
11352
/* * HID Sensors Driver * Copyright (c) 2012, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/hid-sensor-hub.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/buffer.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #include "../common/hid-sensors/hid-sensor-trigger.h" /*Format: HID-SENSOR-usage_id_in_hex*/ /*Usage ID from spec for Accelerometer-3D: 0x200073*/ #define DRIVER_NAME "HID-SENSOR-200073" enum accel_3d_channel { CHANNEL_SCAN_INDEX_X, CHANNEL_SCAN_INDEX_Y, CHANNEL_SCAN_INDEX_Z, ACCEL_3D_CHANNEL_MAX, }; struct accel_3d_state { struct hid_sensor_hub_callbacks callbacks; struct hid_sensor_common common_attributes; struct hid_sensor_hub_attribute_info accel[ACCEL_3D_CHANNEL_MAX]; u32 accel_val[ACCEL_3D_CHANNEL_MAX]; }; static const u32 accel_3d_addresses[ACCEL_3D_CHANNEL_MAX] = { HID_USAGE_SENSOR_ACCEL_X_AXIS, HID_USAGE_SENSOR_ACCEL_Y_AXIS, HID_USAGE_SENSOR_ACCEL_Z_AXIS }; /* Channel definitions */ static const struct iio_chan_spec accel_3d_channels[] = { { .type = IIO_ACCEL, .modified = 1, .channel2 = IIO_MOD_X, .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_HYSTERESIS), .scan_index = CHANNEL_SCAN_INDEX_X, }, { .type = IIO_ACCEL, .modified = 1, .channel2 = IIO_MOD_Y, .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_HYSTERESIS), .scan_index = CHANNEL_SCAN_INDEX_Y, }, { .type = IIO_ACCEL, .modified = 1, .channel2 = IIO_MOD_Z, .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_HYSTERESIS), .scan_index = CHANNEL_SCAN_INDEX_Z, } }; /* Adjust channel real bits based on report descriptor */ static void accel_3d_adjust_channel_bit_mask(struct iio_chan_spec *channels, int channel, int size) { channels[channel].scan_type.sign = 's'; /* Real storage bits will change based on the report desc. */ channels[channel].scan_type.realbits = size * 8; /* Maximum size of a sample to capture is u32 */ channels[channel].scan_type.storagebits = sizeof(u32) * 8; } /* Channel read_raw handler */ static int accel_3d_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct accel_3d_state *accel_state = iio_priv(indio_dev); int report_id = -1; u32 address; int ret; int ret_type; *val = 0; *val2 = 0; switch (mask) { case 0: report_id = accel_state->accel[chan->scan_index].report_id; address = accel_3d_addresses[chan->scan_index]; if (report_id >= 0) *val = sensor_hub_input_attr_get_raw_value( accel_state->common_attributes.hsdev, HID_USAGE_SENSOR_ACCEL_3D, address, report_id); else { *val = 0; return -EINVAL; } ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SCALE: *val = accel_state->accel[CHANNEL_SCAN_INDEX_X].units; ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_OFFSET: *val = hid_sensor_convert_exponent( accel_state->accel[CHANNEL_SCAN_INDEX_X].unit_expo); ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SAMP_FREQ: ret = hid_sensor_read_samp_freq_value( &accel_state->common_attributes, val, val2); ret_type = IIO_VAL_INT_PLUS_MICRO; break; case IIO_CHAN_INFO_HYSTERESIS: ret = hid_sensor_read_raw_hyst_value( &accel_state->common_attributes, val, val2); ret_type = IIO_VAL_INT_PLUS_MICRO; break; default: ret_type = -EINVAL; break; } return ret_type; } /* Channel write_raw handler */ static int accel_3d_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct accel_3d_state *accel_state = iio_priv(indio_dev); int ret = 0; switch (mask) { case IIO_CHAN_INFO_SAMP_FREQ: ret = hid_sensor_write_samp_freq_value( &accel_state->common_attributes, val, val2); break; case IIO_CHAN_INFO_HYSTERESIS: ret = hid_sensor_write_raw_hyst_value( &accel_state->common_attributes, val, val2); break; default: ret = -EINVAL; } return ret; } static int accel_3d_write_raw_get_fmt(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, long mask) { return IIO_VAL_INT_PLUS_MICRO; } static const struct iio_info accel_3d_info = { .driver_module = THIS_MODULE, .read_raw = &accel_3d_read_raw, .write_raw = &accel_3d_write_raw, .write_raw_get_fmt = &accel_3d_write_raw_get_fmt, }; /* Function to push data to buffer */ static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len) { dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n"); iio_push_to_buffers(indio_dev, (u8 *)data); } /* Callback handler to send event after all samples are received and captured */ static int accel_3d_proc_event(struct hid_sensor_hub_device *hsdev, unsigned usage_id, void *priv) { struct iio_dev *indio_dev = platform_get_drvdata(priv); struct accel_3d_state *accel_state = iio_priv(indio_dev); dev_dbg(&indio_dev->dev, "accel_3d_proc_event [%d]\n", accel_state->common_attributes.data_ready); if (accel_state->common_attributes.data_ready) hid_sensor_push_data(indio_dev, (u8 *)accel_state->accel_val, sizeof(accel_state->accel_val)); return 0; } /* Capture samples in local storage */ static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev, unsigned usage_id, size_t raw_len, char *raw_data, void *priv) { struct iio_dev *indio_dev = platform_get_drvdata(priv); struct accel_3d_state *accel_state = iio_priv(indio_dev); int offset; int ret = -EINVAL; switch (usage_id) { case HID_USAGE_SENSOR_ACCEL_X_AXIS: case HID_USAGE_SENSOR_ACCEL_Y_AXIS: case HID_USAGE_SENSOR_ACCEL_Z_AXIS: offset = usage_id - HID_USAGE_SENSOR_ACCEL_X_AXIS; accel_state->accel_val[CHANNEL_SCAN_INDEX_X + offset] = *(u32 *)raw_data; ret = 0; break; default: break; } return ret; } /* Parse report which is specific to an usage id*/ static int accel_3d_parse_report(struct platform_device *pdev, struct hid_sensor_hub_device *hsdev, struct iio_chan_spec *channels, unsigned usage_id, struct accel_3d_state *st) { int ret; int i; for (i = 0; i <= CHANNEL_SCAN_INDEX_Z; ++i) { ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT, usage_id, HID_USAGE_SENSOR_ACCEL_X_AXIS + i, &st->accel[CHANNEL_SCAN_INDEX_X + i]); if (ret < 0) break; accel_3d_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_X + i, st->accel[CHANNEL_SCAN_INDEX_X + i].size); } dev_dbg(&pdev->dev, "accel_3d %x:%x, %x:%x, %x:%x\n", st->accel[0].index, st->accel[0].report_id, st->accel[1].index, st->accel[1].report_id, st->accel[2].index, st->accel[2].report_id); return ret; } /* Function to initialize the processing for usage id */ static int hid_accel_3d_probe(struct platform_device *pdev) { int ret = 0; static const char *name = "accel_3d"; struct iio_dev *indio_dev; struct accel_3d_state *accel_state; struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; struct iio_chan_spec *channels; indio_dev = iio_device_alloc(sizeof(struct accel_3d_state)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } platform_set_drvdata(pdev, indio_dev); accel_state = iio_priv(indio_dev); accel_state->common_attributes.hsdev = hsdev; accel_state->common_attributes.pdev = pdev; ret = hid_sensor_parse_common_attributes(hsdev, HID_USAGE_SENSOR_ACCEL_3D, &accel_state->common_attributes); if (ret) { dev_err(&pdev->dev, "failed to setup common attributes\n"); goto error_free_dev; } channels = kmemdup(accel_3d_channels, sizeof(accel_3d_channels), GFP_KERNEL); if (!channels) { ret = -ENOMEM; dev_err(&pdev->dev, "failed to duplicate channels\n"); goto error_free_dev; } ret = accel_3d_parse_report(pdev, hsdev, channels, HID_USAGE_SENSOR_ACCEL_3D, accel_state); if (ret) { dev_err(&pdev->dev, "failed to setup attributes\n"); goto error_free_dev_mem; } indio_dev->channels = channels; indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels); indio_dev->dev.parent = &pdev->dev; indio_dev->info = &accel_3d_info; indio_dev->name = name; indio_dev->modes = INDIO_DIRECT_MODE; ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, NULL, NULL); if (ret) { dev_err(&pdev->dev, "failed to initialize trigger buffer\n"); goto error_free_dev_mem; } accel_state->common_attributes.data_ready = false; ret = hid_sensor_setup_trigger(indio_dev, name, &accel_state->common_attributes); if (ret < 0) { dev_err(&pdev->dev, "trigger setup failed\n"); goto error_unreg_buffer_funcs; } ret = iio_device_register(indio_dev); if (ret) { dev_err(&pdev->dev, "device register failed\n"); goto error_remove_trigger; } accel_state->callbacks.send_event = accel_3d_proc_event; accel_state->callbacks.capture_sample = accel_3d_capture_sample; accel_state->callbacks.pdev = pdev; ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_ACCEL_3D, &accel_state->callbacks); if (ret < 0) { dev_err(&pdev->dev, "callback reg failed\n"); goto error_iio_unreg; } return ret; error_iio_unreg: iio_device_unregister(indio_dev); error_remove_trigger: hid_sensor_remove_trigger(indio_dev); error_unreg_buffer_funcs: iio_triggered_buffer_cleanup(indio_dev); error_free_dev_mem: kfree(indio_dev->channels); error_free_dev: iio_device_free(indio_dev); error_ret: return ret; } /* Function to deinitialize the processing for usage id */ static int hid_accel_3d_remove(struct platform_device *pdev) { struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; struct iio_dev *indio_dev = platform_get_drvdata(pdev); sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ACCEL_3D); iio_device_unregister(indio_dev); hid_sensor_remove_trigger(indio_dev); iio_triggered_buffer_cleanup(indio_dev); kfree(indio_dev->channels); iio_device_free(indio_dev); return 0; } static struct platform_driver hid_accel_3d_platform_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, .probe = hid_accel_3d_probe, .remove = hid_accel_3d_remove, }; module_platform_driver(hid_accel_3d_platform_driver); MODULE_DESCRIPTION("HID Sensor Accel 3D"); MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>"); MODULE_LICENSE("GPL");
gpl-2.0
crpalmer/android_kernel_sony_tetra
sound/usb/6fire/midi.c
2412
5204
/* * Linux driver for TerraTec DMX 6Fire USB * * Rawmidi driver * * Author: Torsten Schenk <torsten.schenk@zoho.com> * Created: Jan 01, 2011 * Copyright: (C) Torsten Schenk * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <sound/rawmidi.h> #include "midi.h" #include "chip.h" #include "comm.h" enum { MIDI_BUFSIZE = 64 }; static void usb6fire_midi_out_handler(struct urb *urb) { struct midi_runtime *rt = urb->context; int ret; unsigned long flags; spin_lock_irqsave(&rt->out_lock, flags); if (rt->out) { ret = snd_rawmidi_transmit(rt->out, rt->out_buffer + 4, MIDI_BUFSIZE - 4); if (ret > 0) { /* more data available, send next packet */ rt->out_buffer[1] = ret + 2; rt->out_buffer[3] = rt->out_serial++; urb->transfer_buffer_length = ret + 4; ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) snd_printk(KERN_ERR PREFIX "midi out urb " "submit failed: %d\n", ret); } else /* no more data to transmit */ rt->out = NULL; } spin_unlock_irqrestore(&rt->out_lock, flags); } static void usb6fire_midi_in_received( struct midi_runtime *rt, u8 *data, int length) { unsigned long flags; spin_lock_irqsave(&rt->in_lock, flags); if (rt->in) snd_rawmidi_receive(rt->in, data, length); spin_unlock_irqrestore(&rt->in_lock, flags); } static int usb6fire_midi_out_open(struct snd_rawmidi_substream *alsa_sub) { return 0; } static int usb6fire_midi_out_close(struct snd_rawmidi_substream *alsa_sub) { return 0; } static void usb6fire_midi_out_trigger( struct snd_rawmidi_substream *alsa_sub, int up) { struct midi_runtime *rt = alsa_sub->rmidi->private_data; struct urb *urb = &rt->out_urb; __s8 ret; unsigned long flags; spin_lock_irqsave(&rt->out_lock, flags); if (up) { /* start transfer */ if (rt->out) { /* we are already transmitting so just return */ spin_unlock_irqrestore(&rt->out_lock, flags); return; } ret = snd_rawmidi_transmit(alsa_sub, rt->out_buffer + 4, MIDI_BUFSIZE - 4); if (ret > 0) { rt->out_buffer[1] = ret + 2; rt->out_buffer[3] = rt->out_serial++; urb->transfer_buffer_length = ret + 4; ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) snd_printk(KERN_ERR PREFIX "midi out urb " "submit failed: %d\n", ret); else rt->out = alsa_sub; } } else if (rt->out == alsa_sub) rt->out = NULL; spin_unlock_irqrestore(&rt->out_lock, flags); } static void usb6fire_midi_out_drain(struct snd_rawmidi_substream *alsa_sub) { struct midi_runtime *rt = alsa_sub->rmidi->private_data; int retry = 0; while (rt->out && retry++ < 100) msleep(10); } static int usb6fire_midi_in_open(struct snd_rawmidi_substream *alsa_sub) { return 0; } static int usb6fire_midi_in_close(struct snd_rawmidi_substream *alsa_sub) { return 0; } static void usb6fire_midi_in_trigger( struct snd_rawmidi_substream *alsa_sub, int up) { struct midi_runtime *rt = alsa_sub->rmidi->private_data; unsigned long flags; spin_lock_irqsave(&rt->in_lock, flags); if (up) rt->in = alsa_sub; else rt->in = NULL; spin_unlock_irqrestore(&rt->in_lock, flags); } static struct snd_rawmidi_ops out_ops = { .open = usb6fire_midi_out_open, .close = usb6fire_midi_out_close, .trigger = usb6fire_midi_out_trigger, .drain = usb6fire_midi_out_drain }; static struct snd_rawmidi_ops in_ops = { .open = usb6fire_midi_in_open, .close = usb6fire_midi_in_close, .trigger = usb6fire_midi_in_trigger }; int usb6fire_midi_init(struct sfire_chip *chip) { int ret; struct midi_runtime *rt = kzalloc(sizeof(struct midi_runtime), GFP_KERNEL); struct comm_runtime *comm_rt = chip->comm; if (!rt) return -ENOMEM; rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL); if (!rt->out_buffer) { kfree(rt); return -ENOMEM; } rt->chip = chip; rt->in_received = usb6fire_midi_in_received; rt->out_buffer[0] = 0x80; /* 'send midi' command */ rt->out_buffer[1] = 0x00; /* size of data */ rt->out_buffer[2] = 0x00; /* always 0 */ spin_lock_init(&rt->in_lock); spin_lock_init(&rt->out_lock); comm_rt->init_urb(comm_rt, &rt->out_urb, rt->out_buffer, rt, usb6fire_midi_out_handler); ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance); if (ret < 0) { kfree(rt->out_buffer); kfree(rt); snd_printk(KERN_ERR PREFIX "unable to create midi.\n"); return ret; } rt->instance->private_data = rt; strcpy(rt->instance->name, "DMX6FireUSB MIDI"); rt->instance->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; snd_rawmidi_set_ops(rt->instance, SNDRV_RAWMIDI_STREAM_OUTPUT, &out_ops); snd_rawmidi_set_ops(rt->instance, SNDRV_RAWMIDI_STREAM_INPUT, &in_ops); chip->midi = rt; return 0; } void usb6fire_midi_abort(struct sfire_chip *chip) { struct midi_runtime *rt = chip->midi; if (rt) usb_poison_urb(&rt->out_urb); } void usb6fire_midi_destroy(struct sfire_chip *chip) { struct midi_runtime *rt = chip->midi; kfree(rt->out_buffer); kfree(rt); chip->midi = NULL; }
gpl-2.0
brinlyaus/sturdy-eureka
drivers/media/usb/stk1160/stk1160-i2c.c
4460
6274
/* * STK1160 driver * * Copyright (C) 2012 Ezequiel Garcia * <elezegarcia--a.t--gmail.com> * * Based on Easycap driver by R.M. Thomas * Copyright (C) 2010 R.M. Thomas * <rmthomas--a.t--sciolus.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/usb.h> #include <linux/i2c.h> #include "stk1160.h" #include "stk1160-reg.h" static unsigned int i2c_debug; module_param(i2c_debug, int, 0644); MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]"); #define dprintk_i2c(fmt, args...) \ do { \ if (i2c_debug) \ printk(KERN_DEBUG fmt, ##args); \ } while (0) static int stk1160_i2c_busy_wait(struct stk1160 *dev, u8 wait_bit_mask) { unsigned long end; u8 flag; /* Wait until read/write finish bit is set */ end = jiffies + msecs_to_jiffies(STK1160_I2C_TIMEOUT); while (time_is_after_jiffies(end)) { stk1160_read_reg(dev, STK1160_SICTL+1, &flag); /* read/write done? */ if (flag & wait_bit_mask) goto done; usleep_range(10 * USEC_PER_MSEC, 20 * USEC_PER_MSEC); } return -ETIMEDOUT; done: return 0; } static int stk1160_i2c_write_reg(struct stk1160 *dev, u8 addr, u8 reg, u8 value) { int rc; /* Set serial device address */ rc = stk1160_write_reg(dev, STK1160_SICTL_SDA, addr); if (rc < 0) return rc; /* Set i2c device register sub-address */ rc = stk1160_write_reg(dev, STK1160_SBUSW_WA, reg); if (rc < 0) return rc; /* Set i2c device register value */ rc = stk1160_write_reg(dev, STK1160_SBUSW_WD, value); if (rc < 0) return rc; /* Start write now */ rc = stk1160_write_reg(dev, STK1160_SICTL, 0x01); if (rc < 0) return rc; rc = stk1160_i2c_busy_wait(dev, 0x04); if (rc < 0) return rc; return 0; } static int stk1160_i2c_read_reg(struct stk1160 *dev, u8 addr, u8 reg, u8 *value) { int rc; /* Set serial device address */ rc = stk1160_write_reg(dev, STK1160_SICTL_SDA, addr); if (rc < 0) return rc; /* Set i2c device register sub-address */ rc = stk1160_write_reg(dev, STK1160_SBUSR_RA, reg); if (rc < 0) return rc; /* Start read now */ rc = stk1160_write_reg(dev, STK1160_SICTL, 0x20); if (rc < 0) return rc; rc = stk1160_i2c_busy_wait(dev, 0x01); if (rc < 0) return rc; rc = stk1160_read_reg(dev, STK1160_SBUSR_RD, value); if (rc < 0) return rc; return 0; } /* * stk1160_i2c_check_for_device() * check if there is a i2c_device at the supplied address */ static int stk1160_i2c_check_for_device(struct stk1160 *dev, unsigned char addr) { int rc; /* Set serial device address */ rc = stk1160_write_reg(dev, STK1160_SICTL_SDA, addr); if (rc < 0) return rc; /* Set device sub-address, we'll chip version reg */ rc = stk1160_write_reg(dev, STK1160_SBUSR_RA, 0x00); if (rc < 0) return rc; /* Start read now */ rc = stk1160_write_reg(dev, STK1160_SICTL, 0x20); if (rc < 0) return rc; rc = stk1160_i2c_busy_wait(dev, 0x01); if (rc < 0) return -ENODEV; return 0; } /* * stk1160_i2c_xfer() * the main i2c transfer function */ static int stk1160_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct stk1160 *dev = i2c_adap->algo_data; int addr, rc, i; for (i = 0; i < num; i++) { addr = msgs[i].addr << 1; dprintk_i2c("%s: addr=%x", __func__, addr); if (!msgs[i].len) { /* no len: check only for device presence */ rc = stk1160_i2c_check_for_device(dev, addr); if (rc < 0) { dprintk_i2c(" no device\n"); return rc; } } else if (msgs[i].flags & I2C_M_RD) { /* read request without preceding register selection */ dprintk_i2c(" subaddr not selected"); rc = -EOPNOTSUPP; goto err; } else if (i + 1 < num && msgs[i].len <= 2 && (msgs[i + 1].flags & I2C_M_RD) && msgs[i].addr == msgs[i + 1].addr) { if (msgs[i].len != 1 || msgs[i + 1].len != 1) { dprintk_i2c(" len not supported"); rc = -EOPNOTSUPP; goto err; } dprintk_i2c(" subaddr=%x", msgs[i].buf[0]); rc = stk1160_i2c_read_reg(dev, addr, msgs[i].buf[0], msgs[i + 1].buf); dprintk_i2c(" read=%x", *msgs[i + 1].buf); /* consumed two msgs, so we skip one of them */ i++; } else { if (msgs[i].len != 2) { dprintk_i2c(" len not supported"); rc = -EOPNOTSUPP; goto err; } dprintk_i2c(" subaddr=%x write=%x", msgs[i].buf[0], msgs[i].buf[1]); rc = stk1160_i2c_write_reg(dev, addr, msgs[i].buf[0], msgs[i].buf[1]); } if (rc < 0) goto err; dprintk_i2c(" OK\n"); } return num; err: dprintk_i2c(" ERROR: %d\n", rc); return num; } /* * functionality(), what da heck is this? */ static u32 functionality(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_EMUL; } static struct i2c_algorithm algo = { .master_xfer = stk1160_i2c_xfer, .functionality = functionality, }; static struct i2c_adapter adap_template = { .owner = THIS_MODULE, .name = "stk1160", .algo = &algo, }; static struct i2c_client client_template = { .name = "stk1160 internal", }; /* * stk1160_i2c_register() * register i2c bus */ int stk1160_i2c_register(struct stk1160 *dev) { int rc; dev->i2c_adap = adap_template; dev->i2c_adap.dev.parent = dev->dev; strcpy(dev->i2c_adap.name, "stk1160"); dev->i2c_adap.algo_data = dev; i2c_set_adapdata(&dev->i2c_adap, &dev->v4l2_dev); rc = i2c_add_adapter(&dev->i2c_adap); if (rc < 0) { stk1160_err("cannot add i2c adapter (%d)\n", rc); return rc; } dev->i2c_client = client_template; dev->i2c_client.adapter = &dev->i2c_adap; /* Set i2c clock divider device address */ stk1160_write_reg(dev, STK1160_SICTL_CD, 0x0f); /* ??? */ stk1160_write_reg(dev, STK1160_ASIC + 3, 0x00); return 0; } /* * stk1160_i2c_unregister() * unregister i2c_bus */ int stk1160_i2c_unregister(struct stk1160 *dev) { i2c_del_adapter(&dev->i2c_adap); return 0; }
gpl-2.0
aditisstillalive/android_kernel_lge_hammerhead
drivers/net/wireless/rtlwifi/rtl8192de/sw.c
4972
13219
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../core.h" #include "../pci.h" #include "reg.h" #include "def.h" #include "phy.h" #include "dm.h" #include "hw.h" #include "sw.h" #include "trx.h" #include "led.h" #include <linux/module.h> static void rtl92d_init_aspm_vars(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); /*close ASPM for AMD defaultly */ rtlpci->const_amdpci_aspm = 0; /* * ASPM PS mode. * 0 - Disable ASPM, * 1 - Enable ASPM without Clock Req, * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. * set defult to RTL8192CE:3 RTL8192E:2 * */ rtlpci->const_pci_aspm = 3; /*Setting for PCI-E device */ rtlpci->const_devicepci_aspm_setting = 0x03; /*Setting for PCI-E bridge */ rtlpci->const_hostpci_aspm_setting = 0x02; /* * In Hw/Sw Radio Off situation. * 0 - Default, * 1 - From ASPM setting without low Mac Pwr, * 2 - From ASPM setting with low Mac Pwr, * 3 - Bus D3 * set default to RTL8192CE:0 RTL8192SE:2 */ rtlpci->const_hwsw_rfoff_d3 = 0; /* * This setting works for those device with * backdoor ASPM setting such as EPHY setting. * 0 - Not support ASPM, * 1 - Support ASPM, * 2 - According to chipset. */ rtlpci->const_support_pciaspm = 1; } static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) { int err; u8 tid; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); rtlpriv->dm.dm_initialgain_enable = true; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = false; rtlpriv->dm.thermalvalue = 0; rtlpriv->dm.useramask = true; /* dual mac */ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) rtlpriv->phy.current_channel = 36; else rtlpriv->phy.current_channel = 1; if (rtlpriv->rtlhal.macphymode != SINGLEMAC_SINGLEPHY) { rtlpriv->rtlhal.disable_amsdu_8k = true; /* No long RX - reduce fragmentation */ rtlpci->rxbuffersize = 4096; } rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13); rtlpci->receive_config = ( RCR_APPFCS | RCR_AMF | RCR_ADF | RCR_APP_MIC | RCR_APP_ICV | RCR_AICV | RCR_ACRC32 | RCR_AB | RCR_AM | RCR_APM | RCR_APP_PHYST_RXFF | RCR_HTC_LOC_CTRL ); rtlpci->irq_mask[0] = (u32) ( IMR_ROK | IMR_VODOK | IMR_VIDOK | IMR_BEDOK | IMR_BKDOK | IMR_MGNTDOK | IMR_HIGHDOK | IMR_BDOK | IMR_RDU | IMR_RXFOVW ); rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD); /* for debug level */ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; if (!rtlpriv->psc.inactiveps) pr_info("Power Save off (module option)\n"); if (!rtlpriv->psc.fwctrl_lps) pr_info("FW Power Save off (module option)\n"); rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; /* for ASPM, you can close aspm through * set const_support_pciaspm = 0 */ rtl92d_init_aspm_vars(hw); if (rtlpriv->psc.reg_fwctrl_lps == 1) rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 2) rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 3) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /* for early mode */ rtlpriv->rtlhal.earlymode_enable = true; for (tid = 0; tid < 8; tid++) skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]); /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't alloc buffer for fw\n"); return 1; } rtlpriv->max_fw_size = 0x8000; pr_info("Driver for Realtek RTL8192DE WLAN interface\n"); pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name); /* request fw */ err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to request firmware!\n"); return 1; } return 0; } static void rtl92d_deinit_sw_vars(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tid; if (rtlpriv->rtlhal.pfirmware) { vfree(rtlpriv->rtlhal.pfirmware); rtlpriv->rtlhal.pfirmware = NULL; } for (tid = 0; tid < 8; tid++) skb_queue_purge(&rtlpriv->mac80211.skb_waitq[tid]); } static struct rtl_hal_ops rtl8192de_hal_ops = { .init_sw_vars = rtl92d_init_sw_vars, .deinit_sw_vars = rtl92d_deinit_sw_vars, .read_eeprom_info = rtl92de_read_eeprom_info, .interrupt_recognized = rtl92de_interrupt_recognized, .hw_init = rtl92de_hw_init, .hw_disable = rtl92de_card_disable, .hw_suspend = rtl92de_suspend, .hw_resume = rtl92de_resume, .enable_interrupt = rtl92de_enable_interrupt, .disable_interrupt = rtl92de_disable_interrupt, .set_network_type = rtl92de_set_network_type, .set_chk_bssid = rtl92de_set_check_bssid, .set_qos = rtl92de_set_qos, .set_bcn_reg = rtl92de_set_beacon_related_registers, .set_bcn_intv = rtl92de_set_beacon_interval, .update_interrupt_mask = rtl92de_update_interrupt_mask, .get_hw_reg = rtl92de_get_hw_reg, .set_hw_reg = rtl92de_set_hw_reg, .update_rate_tbl = rtl92de_update_hal_rate_tbl, .fill_tx_desc = rtl92de_tx_fill_desc, .fill_tx_cmddesc = rtl92de_tx_fill_cmddesc, .query_rx_desc = rtl92de_rx_query_desc, .set_channel_access = rtl92de_update_channel_access_setting, .radio_onoff_checking = rtl92de_gpio_radio_on_off_checking, .set_bw_mode = rtl92d_phy_set_bw_mode, .switch_channel = rtl92d_phy_sw_chnl, .dm_watchdog = rtl92d_dm_watchdog, .scan_operation_backup = rtl92d_phy_scan_operation_backup, .set_rf_power_state = rtl92d_phy_set_rf_power_state, .led_control = rtl92de_led_control, .set_desc = rtl92de_set_desc, .get_desc = rtl92de_get_desc, .tx_polling = rtl92de_tx_polling, .enable_hw_sec = rtl92de_enable_hw_security_config, .set_key = rtl92de_set_key, .init_sw_leds = rtl92de_init_sw_leds, .get_bbreg = rtl92d_phy_query_bb_reg, .set_bbreg = rtl92d_phy_set_bb_reg, .get_rfreg = rtl92d_phy_query_rf_reg, .set_rfreg = rtl92d_phy_set_rf_reg, .linked_set_reg = rtl92d_linked_set_reg, }; static struct rtl_mod_params rtl92de_mod_params = { .sw_crypto = false, .inactiveps = true, .swctrl_lps = true, .fwctrl_lps = false, .debug = DBG_EMERG, }; static struct rtl_hal_cfg rtl92de_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl8192de", .fw_name = "rtlwifi/rtl8192defw.bin", .ops = &rtl8192de_hal_ops, .mod_params = &rtl92de_mod_params, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN, .maps[SYS_CLK] = REG_SYS_CLKR, .maps[MAC_RCR_AM] = RCR_AM, .maps[MAC_RCR_AB] = RCR_AB, .maps[MAC_RCR_ACRC32] = RCR_ACRC32, .maps[MAC_RCR_ACF] = RCR_ACF, .maps[MAC_RCR_AAP] = RCR_AAP, .maps[EFUSE_TEST] = REG_EFUSE_TEST, .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, .maps[EFUSE_CLK] = 0, /* just for 92se */ .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL, .maps[EFUSE_PWC_EV12V] = PWC_EV12V, .maps[EFUSE_FEN_ELDR] = FEN_ELDR, .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN, .maps[EFUSE_ANA8M] = 0, /* just for 92se */ .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE, .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION, .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN, .maps[RWCAM] = REG_CAMCMD, .maps[WCAMI] = REG_CAMWRITE, .maps[RCAMO] = REG_CAMREAD, .maps[CAMDBG] = REG_CAMDBG, .maps[SECR] = REG_SECCFG, .maps[SEC_CAM_NONE] = CAM_NONE, .maps[SEC_CAM_WEP40] = CAM_WEP40, .maps[SEC_CAM_TKIP] = CAM_TKIP, .maps[SEC_CAM_AES] = CAM_AES, .maps[SEC_CAM_WEP104] = CAM_WEP104, .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6, .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5, .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4, .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3, .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2, .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1, .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8, .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7, .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6, .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5, .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4, .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3, .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2, .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1, .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2, .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1, .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, .maps[RTL_IMR_BcnInt] = IMR_BcnInt, .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW, .maps[RTL_IMR_RDU] = IMR_RDU, .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND, .maps[RTL_IMR_BDOK] = IMR_BDOK, .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK, .maps[RTL_IMR_TBDER] = IMR_TBDER, .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK, .maps[RTL_IMR_TBDOK] = IMR_TBDOK, .maps[RTL_IMR_BKDOK] = IMR_BKDOK, .maps[RTL_IMR_BEDOK] = IMR_BEDOK, .maps[RTL_IMR_VIDOK] = IMR_VIDOK, .maps[RTL_IMR_VODOK] = IMR_VODOK, .maps[RTL_IMR_ROK] = IMR_ROK, .maps[RTL_IBSS_INT_MASKS] = (IMR_BcnInt | IMR_TBDOK | IMR_TBDER), .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M, .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M, .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M, .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M, .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M, .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M, .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M, .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M, .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M, .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M, .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M, .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M, .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7, .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15, }; static struct pci_device_id rtl92de_pci_ids[] __devinitdata = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8193, rtl92de_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x002B, rtl92de_hal_cfg)}, {}, }; MODULE_DEVICE_TABLE(pci, rtl92de_pci_ids); MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek 8192DE 802.11n Dual Mac PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8192defw.bin"); module_param_named(swenc, rtl92de_mod_params.sw_crypto, bool, 0444); module_param_named(debug, rtl92de_mod_params.debug, int, 0444); module_param_named(ips, rtl92de_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); static const struct dev_pm_ops rtlwifi_pm_ops = { .suspend = rtl_pci_suspend, .resume = rtl_pci_resume, .freeze = rtl_pci_suspend, .thaw = rtl_pci_resume, .poweroff = rtl_pci_suspend, .restore = rtl_pci_resume, }; static struct pci_driver rtl92de_driver = { .name = KBUILD_MODNAME, .id_table = rtl92de_pci_ids, .probe = rtl_pci_probe, .remove = rtl_pci_disconnect, .driver.pm = &rtlwifi_pm_ops, }; /* add global spin lock to solve the problem that * Dul mac register operation on the same time */ spinlock_t globalmutex_power; spinlock_t globalmutex_for_fwdownload; spinlock_t globalmutex_for_power_and_efuse; static int __init rtl92de_module_init(void) { int ret = 0; spin_lock_init(&globalmutex_power); spin_lock_init(&globalmutex_for_fwdownload); spin_lock_init(&globalmutex_for_power_and_efuse); ret = pci_register_driver(&rtl92de_driver); if (ret) RT_ASSERT(false, "No device found\n"); return ret; } static void __exit rtl92de_module_exit(void) { pci_unregister_driver(&rtl92de_driver); } module_init(rtl92de_module_init); module_exit(rtl92de_module_exit);
gpl-2.0
619619/T805-Basicrom-Kernel
arch/arm/mach-pxa/zylonite_pxa300.c
7276
6339
/* * linux/arch/arm/mach-pxa/zylonite_pxa300.c * * PXA300/PXA310 specific support code for the * PXA3xx Development Platform (aka Zylonite) * * Copyright (C) 2007 Marvell Internation Ltd. * 2007-08-21: eric miao <eric.miao@marvell.com> * initial version * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/i2c/pxa-i2c.h> #include <linux/i2c/pca953x.h> #include <linux/gpio.h> #include <mach/pxa300.h> #include <mach/zylonite.h> #include "generic.h" /* PXA300/PXA310 common configurations */ static mfp_cfg_t common_mfp_cfg[] __initdata = { /* LCD */ GPIO54_LCD_LDD_0, GPIO55_LCD_LDD_1, GPIO56_LCD_LDD_2, GPIO57_LCD_LDD_3, GPIO58_LCD_LDD_4, GPIO59_LCD_LDD_5, GPIO60_LCD_LDD_6, GPIO61_LCD_LDD_7, GPIO62_LCD_LDD_8, GPIO63_LCD_LDD_9, GPIO64_LCD_LDD_10, GPIO65_LCD_LDD_11, GPIO66_LCD_LDD_12, GPIO67_LCD_LDD_13, GPIO68_LCD_LDD_14, GPIO69_LCD_LDD_15, GPIO70_LCD_LDD_16, GPIO71_LCD_LDD_17, GPIO72_LCD_FCLK, GPIO73_LCD_LCLK, GPIO74_LCD_PCLK, GPIO75_LCD_BIAS, GPIO76_LCD_VSYNC, GPIO127_LCD_CS_N, GPIO20_PWM3_OUT, /* backlight */ /* BTUART */ GPIO111_UART2_RTS, GPIO112_UART2_RXD | MFP_LPM_EDGE_FALL, GPIO113_UART2_TXD, GPIO114_UART2_CTS | MFP_LPM_EDGE_BOTH, /* STUART */ GPIO109_UART3_TXD, GPIO110_UART3_RXD | MFP_LPM_EDGE_FALL, /* AC97 */ GPIO23_AC97_nACRESET, GPIO24_AC97_SYSCLK, GPIO29_AC97_BITCLK, GPIO25_AC97_SDATA_IN_0, GPIO27_AC97_SDATA_OUT, GPIO28_AC97_SYNC, GPIO17_GPIO, /* SDATA_IN_1 but unused - configure to GPIO */ /* SSP3 */ GPIO91_SSP3_SCLK, GPIO92_SSP3_FRM, GPIO93_SSP3_TXD, GPIO94_SSP3_RXD, /* WM9713 IRQ */ GPIO26_GPIO, /* Keypad */ GPIO107_KP_DKIN_0 | MFP_LPM_EDGE_BOTH, GPIO108_KP_DKIN_1 | MFP_LPM_EDGE_BOTH, GPIO115_KP_MKIN_0 | MFP_LPM_EDGE_BOTH, GPIO116_KP_MKIN_1 | MFP_LPM_EDGE_BOTH, GPIO117_KP_MKIN_2 | MFP_LPM_EDGE_BOTH, GPIO118_KP_MKIN_3 | MFP_LPM_EDGE_BOTH, GPIO119_KP_MKIN_4 | MFP_LPM_EDGE_BOTH, GPIO120_KP_MKIN_5 | MFP_LPM_EDGE_BOTH, GPIO2_2_KP_MKIN_6 | MFP_LPM_EDGE_BOTH, GPIO3_2_KP_MKIN_7 | MFP_LPM_EDGE_BOTH, GPIO121_KP_MKOUT_0, GPIO122_KP_MKOUT_1, GPIO123_KP_MKOUT_2, GPIO124_KP_MKOUT_3, GPIO125_KP_MKOUT_4, GPIO4_2_KP_MKOUT_5, GPIO5_2_KP_MKOUT_6, GPIO6_2_KP_MKOUT_7, /* MMC1 */ GPIO3_MMC1_DAT0, GPIO4_MMC1_DAT1 | MFP_LPM_EDGE_BOTH, GPIO5_MMC1_DAT2, GPIO6_MMC1_DAT3, GPIO7_MMC1_CLK, GPIO8_MMC1_CMD, /* CMD0 for slot 0 */ GPIO15_GPIO, /* CMD1 default as GPIO for slot 0 */ /* MMC2 */ GPIO9_MMC2_DAT0, GPIO10_MMC2_DAT1 | MFP_LPM_EDGE_BOTH, GPIO11_MMC2_DAT2, GPIO12_MMC2_DAT3, GPIO13_MMC2_CLK, GPIO14_MMC2_CMD, /* USB Host */ GPIO0_2_USBH_PEN, GPIO1_2_USBH_PWR, /* Standard I2C */ GPIO21_I2C_SCL, GPIO22_I2C_SDA, /* GPIO */ GPIO18_GPIO | MFP_PULL_HIGH, /* GPIO Expander #0 INT_N */ GPIO19_GPIO | MFP_PULL_HIGH, /* GPIO Expander #1 INT_N */ }; static mfp_cfg_t pxa300_mfp_cfg[] __initdata = { /* FFUART */ GPIO30_UART1_RXD | MFP_LPM_EDGE_FALL, GPIO31_UART1_TXD, GPIO32_UART1_CTS, GPIO37_UART1_RTS, GPIO33_UART1_DCD, GPIO34_UART1_DSR | MFP_LPM_EDGE_FALL, GPIO35_UART1_RI, GPIO36_UART1_DTR, /* Ethernet */ GPIO2_nCS3, GPIO99_GPIO, }; static mfp_cfg_t pxa310_mfp_cfg[] __initdata = { /* FFUART */ GPIO99_UART1_RXD | MFP_LPM_EDGE_FALL, GPIO100_UART1_TXD, GPIO101_UART1_CTS, GPIO106_UART1_RTS, /* Ethernet */ GPIO2_nCS3, GPIO102_GPIO, /* MMC3 */ GPIO7_2_MMC3_DAT0, GPIO8_2_MMC3_DAT1 | MFP_LPM_EDGE_BOTH, GPIO9_2_MMC3_DAT2, GPIO10_2_MMC3_DAT3, GPIO103_MMC3_CLK, GPIO105_MMC3_CMD, }; #define NUM_LCD_DETECT_PINS 7 static int lcd_detect_pins[] __initdata = { MFP_PIN_GPIO71, /* LCD_LDD_17 - ORIENT */ MFP_PIN_GPIO70, /* LCD_LDD_16 - LCDID[5] */ MFP_PIN_GPIO75, /* LCD_BIAS - LCDID[4] */ MFP_PIN_GPIO73, /* LCD_LCLK - LCDID[3] */ MFP_PIN_GPIO72, /* LCD_FCLK - LCDID[2] */ MFP_PIN_GPIO127,/* LCD_CS_N - LCDID[1] */ MFP_PIN_GPIO76, /* LCD_VSYNC - LCDID[0] */ }; static void __init zylonite_detect_lcd_panel(void) { unsigned long mfpr_save[NUM_LCD_DETECT_PINS]; int i, gpio, id = 0; /* save the original MFP settings of these pins and configure * them as GPIO Input, DS01X, Pull Neither, Edge Clear */ for (i = 0; i < NUM_LCD_DETECT_PINS; i++) { mfpr_save[i] = pxa3xx_mfp_read(lcd_detect_pins[i]); pxa3xx_mfp_write(lcd_detect_pins[i], 0x8440); } for (i = 0; i < NUM_LCD_DETECT_PINS; i++) { id = id << 1; gpio = mfp_to_gpio(lcd_detect_pins[i]); gpio_request(gpio, "LCD_ID_PINS"); gpio_direction_input(gpio); if (gpio_get_value(gpio)) id = id | 0x1; gpio_free(gpio); } /* lcd id, flush out bit 1 */ lcd_id = id & 0x3d; /* lcd orientation, portrait or landscape */ lcd_orientation = (id >> 6) & 0x1; /* restore the original MFP settings */ for (i = 0; i < NUM_LCD_DETECT_PINS; i++) pxa3xx_mfp_write(lcd_detect_pins[i], mfpr_save[i]); } #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static struct pca953x_platform_data gpio_exp[] = { [0] = { .gpio_base = 128, }, [1] = { .gpio_base = 144, }, }; static struct i2c_board_info zylonite_i2c_board_info[] = { { .type = "pca9539", .addr = 0x74, .platform_data = &gpio_exp[0], .irq = PXA_GPIO_TO_IRQ(18), }, { .type = "pca9539", .addr = 0x75, .platform_data = &gpio_exp[1], .irq = PXA_GPIO_TO_IRQ(19), }, }; static void __init zylonite_init_i2c(void) { pxa_set_i2c_info(NULL); i2c_register_board_info(0, ARRAY_AND_SIZE(zylonite_i2c_board_info)); } #else static inline void zylonite_init_i2c(void) {} #endif void __init zylonite_pxa300_init(void) { if (cpu_is_pxa300() || cpu_is_pxa310()) { /* initialize MFP */ pxa3xx_mfp_config(ARRAY_AND_SIZE(common_mfp_cfg)); /* detect LCD panel */ zylonite_detect_lcd_panel(); /* WM9713 IRQ */ wm9713_irq = mfp_to_gpio(MFP_PIN_GPIO26); zylonite_init_i2c(); } if (cpu_is_pxa300()) { pxa3xx_mfp_config(ARRAY_AND_SIZE(pxa300_mfp_cfg)); gpio_eth_irq = mfp_to_gpio(MFP_PIN_GPIO99); } if (cpu_is_pxa310()) { pxa3xx_mfp_config(ARRAY_AND_SIZE(pxa310_mfp_cfg)); gpio_eth_irq = mfp_to_gpio(MFP_PIN_GPIO102); } /* GPIOs for Debug LEDs */ gpio_debug_led1 = EXT_GPIO(25); gpio_debug_led2 = EXT_GPIO(26); }
gpl-2.0
FrozenCow/msm
arch/mips/cobalt/reset.c
9580
1068
/* * Cobalt Reset operations * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995, 1996, 1997 by Ralf Baechle * Copyright (C) 2001 by Liam Davies (ldavies@agile.tv) */ #include <linux/init.h> #include <linux/io.h> #include <linux/leds.h> #include <asm/processor.h> #include <cobalt.h> #define RESET_PORT ((void __iomem *)CKSEG1ADDR(0x1c000000)) #define RESET 0x0f DEFINE_LED_TRIGGER(power_off_led_trigger); static int __init ledtrig_power_off_init(void) { led_trigger_register_simple("power-off", &power_off_led_trigger); return 0; } device_initcall(ledtrig_power_off_init); void cobalt_machine_halt(void) { /* * turn on power off LED on RaQ */ led_trigger_event(power_off_led_trigger, LED_FULL); local_irq_disable(); while (1) { if (cpu_wait) cpu_wait(); } } void cobalt_machine_restart(char *command) { writeb(RESET, RESET_PORT); /* we should never get here */ cobalt_machine_halt(); }
gpl-2.0
Alucard24/Dorimanx-SG2-I9100-Kernel
arch/blackfin/mach-bf537/ints-priority.c
11116
6743
/* * Copyright 2005-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. * * Set up the interrupt priorities */ #include <linux/module.h> #include <linux/irq.h> #include <asm/blackfin.h> #include <asm/irq_handler.h> #include <asm/bfin5xx_spi.h> #include <asm/bfin_sport.h> #include <asm/bfin_can.h> #include <asm/bfin_dma.h> #include <asm/dpmc.h> void __init program_IAR(void) { /* Program the IAR0 Register with the configured priority */ bfin_write_SIC_IAR0(((CONFIG_IRQ_PLL_WAKEUP - 7) << IRQ_PLL_WAKEUP_POS) | ((CONFIG_IRQ_DMA_ERROR - 7) << IRQ_DMA_ERROR_POS) | ((CONFIG_IRQ_ERROR - 7) << IRQ_ERROR_POS) | ((CONFIG_IRQ_RTC - 7) << IRQ_RTC_POS) | ((CONFIG_IRQ_PPI - 7) << IRQ_PPI_POS) | ((CONFIG_IRQ_SPORT0_RX - 7) << IRQ_SPORT0_RX_POS) | ((CONFIG_IRQ_SPORT0_TX - 7) << IRQ_SPORT0_TX_POS) | ((CONFIG_IRQ_SPORT1_RX - 7) << IRQ_SPORT1_RX_POS)); bfin_write_SIC_IAR1(((CONFIG_IRQ_SPORT1_TX - 7) << IRQ_SPORT1_TX_POS) | ((CONFIG_IRQ_TWI - 7) << IRQ_TWI_POS) | ((CONFIG_IRQ_SPI - 7) << IRQ_SPI_POS) | ((CONFIG_IRQ_UART0_RX - 7) << IRQ_UART0_RX_POS) | ((CONFIG_IRQ_UART0_TX - 7) << IRQ_UART0_TX_POS) | ((CONFIG_IRQ_UART1_RX - 7) << IRQ_UART1_RX_POS) | ((CONFIG_IRQ_UART1_TX - 7) << IRQ_UART1_TX_POS) | ((CONFIG_IRQ_CAN_RX - 7) << IRQ_CAN_RX_POS)); bfin_write_SIC_IAR2(((CONFIG_IRQ_CAN_TX - 7) << IRQ_CAN_TX_POS) | ((CONFIG_IRQ_MAC_RX - 7) << IRQ_MAC_RX_POS) | ((CONFIG_IRQ_MAC_TX - 7) << IRQ_MAC_TX_POS) | ((CONFIG_IRQ_TIMER0 - 7) << IRQ_TIMER0_POS) | ((CONFIG_IRQ_TIMER1 - 7) << IRQ_TIMER1_POS) | ((CONFIG_IRQ_TIMER2 - 7) << IRQ_TIMER2_POS) | ((CONFIG_IRQ_TIMER3 - 7) << IRQ_TIMER3_POS) | ((CONFIG_IRQ_TIMER4 - 7) << IRQ_TIMER4_POS)); bfin_write_SIC_IAR3(((CONFIG_IRQ_TIMER5 - 7) << IRQ_TIMER5_POS) | ((CONFIG_IRQ_TIMER6 - 7) << IRQ_TIMER6_POS) | ((CONFIG_IRQ_TIMER7 - 7) << IRQ_TIMER7_POS) | ((CONFIG_IRQ_PROG_INTA - 7) << IRQ_PROG_INTA_POS) | ((CONFIG_IRQ_PORTG_INTB - 7) << IRQ_PORTG_INTB_POS) | ((CONFIG_IRQ_MEM_DMA0 - 7) << IRQ_MEM_DMA0_POS) | ((CONFIG_IRQ_MEM_DMA1 - 7) << IRQ_MEM_DMA1_POS) | ((CONFIG_IRQ_WATCH - 7) << IRQ_WATCH_POS)); SSYNC(); } #define SPI_ERR_MASK (BIT_STAT_TXCOL | BIT_STAT_RBSY | BIT_STAT_MODF | BIT_STAT_TXE) /* SPI_STAT */ #define SPORT_ERR_MASK (ROVF | RUVF | TOVF | TUVF) /* SPORT_STAT */ #define PPI_ERR_MASK (0xFFFF & ~FLD) /* PPI_STATUS */ #define EMAC_ERR_MASK (PHYINT | MMCINT | RXFSINT | TXFSINT | WAKEDET | RXDMAERR | TXDMAERR | STMDONE) /* EMAC_SYSTAT */ #define UART_ERR_MASK (0x6) /* UART_IIR */ #define CAN_ERR_MASK (EWTIF | EWRIF | EPIF | BOIF | WUIF | UIAIF | AAIF | RMLIF | UCEIF | EXTIF | ADIF) /* CAN_GIF */ static int error_int_mask; static void bf537_generic_error_mask_irq(struct irq_data *d) { error_int_mask &= ~(1L << (d->irq - IRQ_PPI_ERROR)); if (!error_int_mask) bfin_internal_mask_irq(IRQ_GENERIC_ERROR); } static void bf537_generic_error_unmask_irq(struct irq_data *d) { bfin_internal_unmask_irq(IRQ_GENERIC_ERROR); error_int_mask |= 1L << (d->irq - IRQ_PPI_ERROR); } static struct irq_chip bf537_generic_error_irqchip = { .name = "ERROR", .irq_ack = bfin_ack_noop, .irq_mask_ack = bf537_generic_error_mask_irq, .irq_mask = bf537_generic_error_mask_irq, .irq_unmask = bf537_generic_error_unmask_irq, }; static void bf537_demux_error_irq(unsigned int int_err_irq, struct irq_desc *inta_desc) { int irq = 0; #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK) irq = IRQ_MAC_ERROR; else #endif if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK) irq = IRQ_SPORT0_ERROR; else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK) irq = IRQ_SPORT1_ERROR; else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK) irq = IRQ_PPI_ERROR; else if (bfin_read_CAN_GIF() & CAN_ERR_MASK) irq = IRQ_CAN_ERROR; else if (bfin_read_SPI_STAT() & SPI_ERR_MASK) irq = IRQ_SPI_ERROR; else if ((bfin_read_UART0_IIR() & UART_ERR_MASK) == UART_ERR_MASK) irq = IRQ_UART0_ERROR; else if ((bfin_read_UART1_IIR() & UART_ERR_MASK) == UART_ERR_MASK) irq = IRQ_UART1_ERROR; if (irq) { if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR))) bfin_handle_irq(irq); else { switch (irq) { case IRQ_PPI_ERROR: bfin_write_PPI_STATUS(PPI_ERR_MASK); break; #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) case IRQ_MAC_ERROR: bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK); break; #endif case IRQ_SPORT0_ERROR: bfin_write_SPORT0_STAT(SPORT_ERR_MASK); break; case IRQ_SPORT1_ERROR: bfin_write_SPORT1_STAT(SPORT_ERR_MASK); break; case IRQ_CAN_ERROR: bfin_write_CAN_GIS(CAN_ERR_MASK); break; case IRQ_SPI_ERROR: bfin_write_SPI_STAT(SPI_ERR_MASK); break; default: break; } pr_debug("IRQ %d:" " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n", irq); } } else pr_err("%s: IRQ ?: PERIPHERAL ERROR INTERRUPT ASSERTED BUT NO SOURCE FOUND\n", __func__); } #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) static int mac_rx_int_mask; static void bf537_mac_rx_mask_irq(struct irq_data *d) { mac_rx_int_mask &= ~(1L << (d->irq - IRQ_MAC_RX)); if (!mac_rx_int_mask) bfin_internal_mask_irq(IRQ_PH_INTA_MAC_RX); } static void bf537_mac_rx_unmask_irq(struct irq_data *d) { bfin_internal_unmask_irq(IRQ_PH_INTA_MAC_RX); mac_rx_int_mask |= 1L << (d->irq - IRQ_MAC_RX); } static struct irq_chip bf537_mac_rx_irqchip = { .name = "ERROR", .irq_ack = bfin_ack_noop, .irq_mask_ack = bf537_mac_rx_mask_irq, .irq_mask = bf537_mac_rx_mask_irq, .irq_unmask = bf537_mac_rx_unmask_irq, }; static void bf537_demux_mac_rx_irq(unsigned int int_irq, struct irq_desc *desc) { if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR)) bfin_handle_irq(IRQ_MAC_RX); else bfin_demux_gpio_irq(int_irq, desc); } #endif void __init init_mach_irq(void) { int irq; #if defined(CONFIG_BF537) || defined(CONFIG_BF536) /* Clear EMAC Interrupt Status bits so we can demux it later */ bfin_write_EMAC_SYSTAT(-1); #endif irq_set_chained_handler(IRQ_GENERIC_ERROR, bf537_demux_error_irq); for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) irq_set_chip_and_handler(irq, &bf537_generic_error_irqchip, handle_level_irq); #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) irq_set_chained_handler(IRQ_PH_INTA_MAC_RX, bf537_demux_mac_rx_irq); irq_set_chip_and_handler(IRQ_MAC_RX, &bf537_mac_rx_irqchip, handle_level_irq); irq_set_chip_and_handler(IRQ_PORTH_INTA, &bf537_mac_rx_irqchip, handle_level_irq); irq_set_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq); #endif }
gpl-2.0
MinimalOS/android_kernel_xiaomi_armani
sound/aoa/soundbus/core.c
12140
5085
/* * soundbus * * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * * GPL v2, can be found in COPYING. */ #include <linux/module.h> #include "soundbus.h" MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Apple Soundbus"); struct soundbus_dev *soundbus_dev_get(struct soundbus_dev *dev) { struct device *tmp; if (!dev) return NULL; tmp = get_device(&dev->ofdev.dev); if (tmp) return to_soundbus_device(tmp); else return NULL; } EXPORT_SYMBOL_GPL(soundbus_dev_get); void soundbus_dev_put(struct soundbus_dev *dev) { if (dev) put_device(&dev->ofdev.dev); } EXPORT_SYMBOL_GPL(soundbus_dev_put); static int soundbus_probe(struct device *dev) { int error = -ENODEV; struct soundbus_driver *drv; struct soundbus_dev *soundbus_dev; drv = to_soundbus_driver(dev->driver); soundbus_dev = to_soundbus_device(dev); if (!drv->probe) return error; soundbus_dev_get(soundbus_dev); error = drv->probe(soundbus_dev); if (error) soundbus_dev_put(soundbus_dev); return error; } static int soundbus_uevent(struct device *dev, struct kobj_uevent_env *env) { struct soundbus_dev * soundbus_dev; struct platform_device * of; const char *compat; int retval = 0; int cplen, seen = 0; if (!dev) return -ENODEV; soundbus_dev = to_soundbus_device(dev); if (!soundbus_dev) return -ENODEV; of = &soundbus_dev->ofdev; /* stuff we want to pass to /sbin/hotplug */ retval = add_uevent_var(env, "OF_NAME=%s", of->dev.of_node->name); if (retval) return retval; retval = add_uevent_var(env, "OF_TYPE=%s", of->dev.of_node->type); if (retval) return retval; /* Since the compatible field can contain pretty much anything * it's not really legal to split it out with commas. We split it * up using a number of environment variables instead. */ compat = of_get_property(of->dev.of_node, "compatible", &cplen); while (compat && cplen > 0) { int tmp = env->buflen; retval = add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat); if (retval) return retval; compat += env->buflen - tmp; cplen -= env->buflen - tmp; seen += 1; } retval = add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen); if (retval) return retval; retval = add_uevent_var(env, "MODALIAS=%s", soundbus_dev->modalias); return retval; } static int soundbus_device_remove(struct device *dev) { struct soundbus_dev * soundbus_dev = to_soundbus_device(dev); struct soundbus_driver * drv = to_soundbus_driver(dev->driver); if (dev->driver && drv->remove) drv->remove(soundbus_dev); soundbus_dev_put(soundbus_dev); return 0; } static void soundbus_device_shutdown(struct device *dev) { struct soundbus_dev * soundbus_dev = to_soundbus_device(dev); struct soundbus_driver * drv = to_soundbus_driver(dev->driver); if (dev->driver && drv->shutdown) drv->shutdown(soundbus_dev); } #ifdef CONFIG_PM static int soundbus_device_suspend(struct device *dev, pm_message_t state) { struct soundbus_dev * soundbus_dev = to_soundbus_device(dev); struct soundbus_driver * drv = to_soundbus_driver(dev->driver); if (dev->driver && drv->suspend) return drv->suspend(soundbus_dev, state); return 0; } static int soundbus_device_resume(struct device * dev) { struct soundbus_dev * soundbus_dev = to_soundbus_device(dev); struct soundbus_driver * drv = to_soundbus_driver(dev->driver); if (dev->driver && drv->resume) return drv->resume(soundbus_dev); return 0; } #endif /* CONFIG_PM */ static struct bus_type soundbus_bus_type = { .name = "aoa-soundbus", .probe = soundbus_probe, .uevent = soundbus_uevent, .remove = soundbus_device_remove, .shutdown = soundbus_device_shutdown, #ifdef CONFIG_PM .suspend = soundbus_device_suspend, .resume = soundbus_device_resume, #endif .dev_attrs = soundbus_dev_attrs, }; int soundbus_add_one(struct soundbus_dev *dev) { static int devcount; /* sanity checks */ if (!dev->attach_codec || !dev->ofdev.dev.of_node || dev->pcmname || dev->pcmid != -1) { printk(KERN_ERR "soundbus: adding device failed sanity check!\n"); return -EINVAL; } dev_set_name(&dev->ofdev.dev, "soundbus:%x", ++devcount); dev->ofdev.dev.bus = &soundbus_bus_type; return of_device_register(&dev->ofdev); } EXPORT_SYMBOL_GPL(soundbus_add_one); void soundbus_remove_one(struct soundbus_dev *dev) { of_device_unregister(&dev->ofdev); } EXPORT_SYMBOL_GPL(soundbus_remove_one); int soundbus_register_driver(struct soundbus_driver *drv) { /* initialize common driver fields */ drv->driver.name = drv->name; drv->driver.bus = &soundbus_bus_type; /* register with core */ return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(soundbus_register_driver); void soundbus_unregister_driver(struct soundbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(soundbus_unregister_driver); static int __init soundbus_init(void) { return bus_register(&soundbus_bus_type); } static void __exit soundbus_exit(void) { bus_unregister(&soundbus_bus_type); } subsys_initcall(soundbus_init); module_exit(soundbus_exit);
gpl-2.0
kerneldevs/RM-35-KERNEL-PECAN
sound/isa/gus/gus_uart.c
13164
8087
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Routines for the GF1 MIDI interface - like UART 6850 * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/time.h> #include <sound/core.h> #include <sound/gus.h> static void snd_gf1_interrupt_midi_in(struct snd_gus_card * gus) { int count; unsigned char stat, data, byte; unsigned long flags; count = 10; while (count) { spin_lock_irqsave(&gus->uart_cmd_lock, flags); stat = snd_gf1_uart_stat(gus); if (!(stat & 0x01)) { /* data in Rx FIFO? */ spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); count--; continue; } count = 100; /* arm counter to new value */ data = snd_gf1_uart_get(gus); if (!(gus->gf1.uart_cmd & 0x80)) { spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); continue; } if (stat & 0x10) { /* framing error */ gus->gf1.uart_framing++; spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); continue; } byte = snd_gf1_uart_get(gus); spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); snd_rawmidi_receive(gus->midi_substream_input, &byte, 1); if (stat & 0x20) { gus->gf1.uart_overrun++; } } } static void snd_gf1_interrupt_midi_out(struct snd_gus_card * gus) { char byte; unsigned long flags; /* try unlock output */ if (snd_gf1_uart_stat(gus) & 0x01) snd_gf1_interrupt_midi_in(gus); spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (snd_gf1_uart_stat(gus) & 0x02) { /* Tx FIFO free? */ if (snd_rawmidi_transmit(gus->midi_substream_output, &byte, 1) != 1) { /* no other bytes or error */ snd_gf1_uart_cmd(gus, gus->gf1.uart_cmd & ~0x20); /* disable Tx interrupt */ } else { snd_gf1_uart_put(gus, byte); } } spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); } static void snd_gf1_uart_reset(struct snd_gus_card * gus, int close) { snd_gf1_uart_cmd(gus, 0x03); /* reset */ if (!close && gus->uart_enable) { udelay(160); snd_gf1_uart_cmd(gus, 0x00); /* normal operations */ } } static int snd_gf1_uart_output_open(struct snd_rawmidi_substream *substream) { unsigned long flags; struct snd_gus_card *gus; gus = substream->rmidi->private_data; spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (!(gus->gf1.uart_cmd & 0x80)) { /* input active? */ snd_gf1_uart_reset(gus, 0); } gus->gf1.interrupt_handler_midi_out = snd_gf1_interrupt_midi_out; gus->midi_substream_output = substream; spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); #if 0 snd_printk(KERN_DEBUG "write init - cmd = 0x%x, stat = 0x%x\n", gus->gf1.uart_cmd, snd_gf1_uart_stat(gus)); #endif return 0; } static int snd_gf1_uart_input_open(struct snd_rawmidi_substream *substream) { unsigned long flags; struct snd_gus_card *gus; int i; gus = substream->rmidi->private_data; spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (gus->gf1.interrupt_handler_midi_out != snd_gf1_interrupt_midi_out) { snd_gf1_uart_reset(gus, 0); } gus->gf1.interrupt_handler_midi_in = snd_gf1_interrupt_midi_in; gus->midi_substream_input = substream; if (gus->uart_enable) { for (i = 0; i < 1000 && (snd_gf1_uart_stat(gus) & 0x01); i++) snd_gf1_uart_get(gus); /* clean Rx */ if (i >= 1000) snd_printk(KERN_ERR "gus midi uart init read - cleanup error\n"); } spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); #if 0 snd_printk(KERN_DEBUG "read init - enable = %i, cmd = 0x%x, stat = 0x%x\n", gus->uart_enable, gus->gf1.uart_cmd, snd_gf1_uart_stat(gus)); snd_printk(KERN_DEBUG "[0x%x] reg (ctrl/status) = 0x%x, reg (data) = 0x%x " "(page = 0x%x)\n", gus->gf1.port + 0x100, inb(gus->gf1.port + 0x100), inb(gus->gf1.port + 0x101), inb(gus->gf1.port + 0x102)); #endif return 0; } static int snd_gf1_uart_output_close(struct snd_rawmidi_substream *substream) { unsigned long flags; struct snd_gus_card *gus; gus = substream->rmidi->private_data; spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (gus->gf1.interrupt_handler_midi_in != snd_gf1_interrupt_midi_in) snd_gf1_uart_reset(gus, 1); snd_gf1_set_default_handlers(gus, SNDRV_GF1_HANDLER_MIDI_OUT); gus->midi_substream_output = NULL; spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); return 0; } static int snd_gf1_uart_input_close(struct snd_rawmidi_substream *substream) { unsigned long flags; struct snd_gus_card *gus; gus = substream->rmidi->private_data; spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (gus->gf1.interrupt_handler_midi_out != snd_gf1_interrupt_midi_out) snd_gf1_uart_reset(gus, 1); snd_gf1_set_default_handlers(gus, SNDRV_GF1_HANDLER_MIDI_IN); gus->midi_substream_input = NULL; spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); return 0; } static void snd_gf1_uart_input_trigger(struct snd_rawmidi_substream *substream, int up) { struct snd_gus_card *gus; unsigned long flags; gus = substream->rmidi->private_data; spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (up) { if ((gus->gf1.uart_cmd & 0x80) == 0) snd_gf1_uart_cmd(gus, gus->gf1.uart_cmd | 0x80); /* enable Rx interrupts */ } else { if (gus->gf1.uart_cmd & 0x80) snd_gf1_uart_cmd(gus, gus->gf1.uart_cmd & ~0x80); /* disable Rx interrupts */ } spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); } static void snd_gf1_uart_output_trigger(struct snd_rawmidi_substream *substream, int up) { unsigned long flags; struct snd_gus_card *gus; char byte; int timeout; gus = substream->rmidi->private_data; spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (up) { if ((gus->gf1.uart_cmd & 0x20) == 0) { spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); /* wait for empty Rx - Tx is probably unlocked */ timeout = 10000; while (timeout-- > 0 && snd_gf1_uart_stat(gus) & 0x01); /* Tx FIFO free? */ spin_lock_irqsave(&gus->uart_cmd_lock, flags); if (gus->gf1.uart_cmd & 0x20) { spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); return; } if (snd_gf1_uart_stat(gus) & 0x02) { if (snd_rawmidi_transmit(substream, &byte, 1) != 1) { spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); return; } snd_gf1_uart_put(gus, byte); } snd_gf1_uart_cmd(gus, gus->gf1.uart_cmd | 0x20); /* enable Tx interrupt */ } } else { if (gus->gf1.uart_cmd & 0x20) snd_gf1_uart_cmd(gus, gus->gf1.uart_cmd & ~0x20); } spin_unlock_irqrestore(&gus->uart_cmd_lock, flags); } static struct snd_rawmidi_ops snd_gf1_uart_output = { .open = snd_gf1_uart_output_open, .close = snd_gf1_uart_output_close, .trigger = snd_gf1_uart_output_trigger, }; static struct snd_rawmidi_ops snd_gf1_uart_input = { .open = snd_gf1_uart_input_open, .close = snd_gf1_uart_input_close, .trigger = snd_gf1_uart_input_trigger, }; int snd_gf1_rawmidi_new(struct snd_gus_card * gus, int device, struct snd_rawmidi ** rrawmidi) { struct snd_rawmidi *rmidi; int err; if (rrawmidi) *rrawmidi = NULL; if ((err = snd_rawmidi_new(gus->card, "GF1", device, 1, 1, &rmidi)) < 0) return err; strcpy(rmidi->name, gus->interwave ? "AMD InterWave" : "GF1"); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_gf1_uart_output); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_gf1_uart_input); rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; rmidi->private_data = gus; gus->midi_uart = rmidi; if (rrawmidi) *rrawmidi = rmidi; return err; }
gpl-2.0
cmenard/OverStock_T959
arch/blackfin/mach-bf533/ints-priority.c
13676
1585
/* * Set up the interrupt priorities * * Copyright 2005-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/irq.h> #include <asm/blackfin.h> void __init program_IAR(void) { /* Program the IAR0 Register with the configured priority */ bfin_write_SIC_IAR0(((CONFIG_PLLWAKE_ERROR - 7) << PLLWAKE_ERROR_POS) | ((CONFIG_DMA_ERROR - 7) << DMA_ERROR_POS) | ((CONFIG_PPI_ERROR - 7) << PPI_ERROR_POS) | ((CONFIG_SPORT0_ERROR - 7) << SPORT0_ERROR_POS) | ((CONFIG_SPI_ERROR - 7) << SPI_ERROR_POS) | ((CONFIG_SPORT1_ERROR - 7) << SPORT1_ERROR_POS) | ((CONFIG_UART_ERROR - 7) << UART_ERROR_POS) | ((CONFIG_RTC_ERROR - 7) << RTC_ERROR_POS)); bfin_write_SIC_IAR1(((CONFIG_DMA0_PPI - 7) << DMA0_PPI_POS) | ((CONFIG_DMA1_SPORT0RX - 7) << DMA1_SPORT0RX_POS) | ((CONFIG_DMA2_SPORT0TX - 7) << DMA2_SPORT0TX_POS) | ((CONFIG_DMA3_SPORT1RX - 7) << DMA3_SPORT1RX_POS) | ((CONFIG_DMA4_SPORT1TX - 7) << DMA4_SPORT1TX_POS) | ((CONFIG_DMA5_SPI - 7) << DMA5_SPI_POS) | ((CONFIG_DMA6_UARTRX - 7) << DMA6_UARTRX_POS) | ((CONFIG_DMA7_UARTTX - 7) << DMA7_UARTTX_POS)); bfin_write_SIC_IAR2(((CONFIG_TIMER0 - 7) << TIMER0_POS) | ((CONFIG_TIMER1 - 7) << TIMER1_POS) | ((CONFIG_TIMER2 - 7) << TIMER2_POS) | ((CONFIG_PFA - 7) << PFA_POS) | ((CONFIG_PFB - 7) << PFB_POS) | ((CONFIG_MEMDMA0 - 7) << MEMDMA0_POS) | ((CONFIG_MEMDMA1 - 7) << MEMDMA1_POS) | ((CONFIG_WDTIMER - 7) << WDTIMER_POS)); SSYNC(); }
gpl-2.0
adam-lee/linux
sound/soc/codecs/tlv320aic26.c
109
11145
/* * Texas Instruments TLV320AIC26 low power audio CODEC * ALSA SoC CODEC driver * * Copyright (C) 2008 Secret Lab Technologies Ltd. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/device.h> #include <linux/sysfs.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include "tlv320aic26.h" MODULE_DESCRIPTION("ASoC TLV320AIC26 codec driver"); MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); MODULE_LICENSE("GPL"); /* AIC26 driver private data */ struct aic26 { struct spi_device *spi; struct regmap *regmap; struct snd_soc_codec *codec; int master; int datfm; int mclk; /* Keyclick parameters */ int keyclick_amplitude; int keyclick_freq; int keyclick_len; }; static const struct snd_soc_dapm_widget tlv320aic26_dapm_widgets[] = { SND_SOC_DAPM_INPUT("MICIN"), SND_SOC_DAPM_INPUT("AUX"), SND_SOC_DAPM_OUTPUT("HPL"), SND_SOC_DAPM_OUTPUT("HPR"), }; static const struct snd_soc_dapm_route tlv320aic26_dapm_routes[] = { { "Capture", NULL, "MICIN" }, { "Capture", NULL, "AUX" }, { "HPL", NULL, "Playback" }, { "HPR", NULL, "Playback" }, }; /* --------------------------------------------------------------------- * Digital Audio Interface Operations */ static int aic26_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec); int fsref, divisor, wlen, pval, jval, dval, qval; u16 reg; dev_dbg(&aic26->spi->dev, "aic26_hw_params(substream=%p, params=%p)\n", substream, params); dev_dbg(&aic26->spi->dev, "rate=%i format=%i\n", params_rate(params), params_format(params)); switch (params_rate(params)) { case 8000: fsref = 48000; divisor = AIC26_DIV_6; break; case 11025: fsref = 44100; divisor = AIC26_DIV_4; break; case 12000: fsref = 48000; divisor = AIC26_DIV_4; break; case 16000: fsref = 48000; divisor = AIC26_DIV_3; break; case 22050: fsref = 44100; divisor = AIC26_DIV_2; break; case 24000: fsref = 48000; divisor = AIC26_DIV_2; break; case 32000: fsref = 48000; divisor = AIC26_DIV_1_5; break; case 44100: fsref = 44100; divisor = AIC26_DIV_1; break; case 48000: fsref = 48000; divisor = AIC26_DIV_1; break; default: dev_dbg(&aic26->spi->dev, "bad rate\n"); return -EINVAL; } /* select data word length */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: wlen = AIC26_WLEN_16; break; case SNDRV_PCM_FORMAT_S16_BE: wlen = AIC26_WLEN_16; break; case SNDRV_PCM_FORMAT_S24_BE: wlen = AIC26_WLEN_24; break; case SNDRV_PCM_FORMAT_S32_BE: wlen = AIC26_WLEN_32; break; default: dev_dbg(&aic26->spi->dev, "bad format\n"); return -EINVAL; } /** * Configure PLL * fsref = (mclk * PLLM) / 2048 * where PLLM = J.DDDD (DDDD register ranges from 0 to 9999, decimal) */ pval = 1; /* compute J portion of multiplier */ jval = fsref / (aic26->mclk / 2048); /* compute fractional DDDD component of multiplier */ dval = fsref - (jval * (aic26->mclk / 2048)); dval = (10000 * dval) / (aic26->mclk / 2048); dev_dbg(&aic26->spi->dev, "Setting PLLM to %d.%04d\n", jval, dval); qval = 0; reg = 0x8000 | qval << 11 | pval << 8 | jval << 2; snd_soc_write(codec, AIC26_REG_PLL_PROG1, reg); reg = dval << 2; snd_soc_write(codec, AIC26_REG_PLL_PROG2, reg); /* Audio Control 3 (master mode, fsref rate) */ if (aic26->master) reg = 0x0800; if (fsref == 48000) reg = 0x2000; snd_soc_update_bits(codec, AIC26_REG_AUDIO_CTRL3, 0xf800, reg); /* Audio Control 1 (FSref divisor) */ reg = wlen | aic26->datfm | (divisor << 3) | divisor; snd_soc_update_bits(codec, AIC26_REG_AUDIO_CTRL1, 0xfff, reg); return 0; } /** * aic26_mute - Mute control to reduce noise when changing audio format */ static int aic26_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec); u16 reg; dev_dbg(&aic26->spi->dev, "aic26_mute(dai=%p, mute=%i)\n", dai, mute); if (mute) reg = 0x8080; else reg = 0; snd_soc_update_bits(codec, AIC26_REG_DAC_GAIN, 0x8000, reg); return 0; } static int aic26_set_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec); dev_dbg(&aic26->spi->dev, "aic26_set_sysclk(dai=%p, clk_id==%i," " freq=%i, dir=%i)\n", codec_dai, clk_id, freq, dir); /* MCLK needs to fall between 2MHz and 50 MHz */ if ((freq < 2000000) || (freq > 50000000)) return -EINVAL; aic26->mclk = freq; return 0; } static int aic26_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec); dev_dbg(&aic26->spi->dev, "aic26_set_fmt(dai=%p, fmt==%i)\n", codec_dai, fmt); /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: aic26->master = 1; break; case SND_SOC_DAIFMT_CBS_CFS: aic26->master = 0; break; default: dev_dbg(&aic26->spi->dev, "bad master\n"); return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: aic26->datfm = AIC26_DATFM_I2S; break; case SND_SOC_DAIFMT_DSP_A: aic26->datfm = AIC26_DATFM_DSP; break; case SND_SOC_DAIFMT_RIGHT_J: aic26->datfm = AIC26_DATFM_RIGHTJ; break; case SND_SOC_DAIFMT_LEFT_J: aic26->datfm = AIC26_DATFM_LEFTJ; break; default: dev_dbg(&aic26->spi->dev, "bad format\n"); return -EINVAL; } return 0; } /* --------------------------------------------------------------------- * Digital Audio Interface Definition */ #define AIC26_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |\ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\ SNDRV_PCM_RATE_48000) #define AIC26_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE |\ SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE) static const struct snd_soc_dai_ops aic26_dai_ops = { .hw_params = aic26_hw_params, .digital_mute = aic26_mute, .set_sysclk = aic26_set_sysclk, .set_fmt = aic26_set_fmt, }; static struct snd_soc_dai_driver aic26_dai = { .name = "tlv320aic26-hifi", .playback = { .stream_name = "Playback", .channels_min = 2, .channels_max = 2, .rates = AIC26_RATES, .formats = AIC26_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 2, .channels_max = 2, .rates = AIC26_RATES, .formats = AIC26_FORMATS, }, .ops = &aic26_dai_ops, }; /* --------------------------------------------------------------------- * ALSA controls */ static const char *aic26_capture_src_text[] = {"Mic", "Aux"}; static SOC_ENUM_SINGLE_DECL(aic26_capture_src_enum, AIC26_REG_AUDIO_CTRL1, 12, aic26_capture_src_text); static const struct snd_kcontrol_new aic26_snd_controls[] = { /* Output */ SOC_DOUBLE("PCM Playback Volume", AIC26_REG_DAC_GAIN, 8, 0, 0x7f, 1), SOC_DOUBLE("PCM Playback Switch", AIC26_REG_DAC_GAIN, 15, 7, 1, 1), SOC_SINGLE("PCM Capture Volume", AIC26_REG_ADC_GAIN, 8, 0x7f, 0), SOC_SINGLE("PCM Capture Mute", AIC26_REG_ADC_GAIN, 15, 1, 1), SOC_SINGLE("Keyclick activate", AIC26_REG_AUDIO_CTRL2, 15, 0x1, 0), SOC_SINGLE("Keyclick amplitude", AIC26_REG_AUDIO_CTRL2, 12, 0x7, 0), SOC_SINGLE("Keyclick frequency", AIC26_REG_AUDIO_CTRL2, 8, 0x7, 0), SOC_SINGLE("Keyclick period", AIC26_REG_AUDIO_CTRL2, 4, 0xf, 0), SOC_ENUM("Capture Source", aic26_capture_src_enum), }; /* --------------------------------------------------------------------- * SPI device portion of driver: sysfs files for debugging */ static ssize_t aic26_keyclick_show(struct device *dev, struct device_attribute *attr, char *buf) { struct aic26 *aic26 = dev_get_drvdata(dev); int val, amp, freq, len; val = snd_soc_read(aic26->codec, AIC26_REG_AUDIO_CTRL2); amp = (val >> 12) & 0x7; freq = (125 << ((val >> 8) & 0x7)) >> 1; len = 2 * (1 + ((val >> 4) & 0xf)); return sprintf(buf, "amp=%x freq=%iHz len=%iclks\n", amp, freq, len); } /* Any write to the keyclick attribute will trigger the keyclick event */ static ssize_t aic26_keyclick_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct aic26 *aic26 = dev_get_drvdata(dev); snd_soc_update_bits(aic26->codec, AIC26_REG_AUDIO_CTRL2, 0x8000, 0x800); return count; } static DEVICE_ATTR(keyclick, 0644, aic26_keyclick_show, aic26_keyclick_set); /* --------------------------------------------------------------------- * SoC CODEC portion of driver: probe and release routines */ static int aic26_probe(struct snd_soc_codec *codec) { struct aic26 *aic26 = dev_get_drvdata(codec->dev); int ret, reg; aic26->codec = codec; /* Reset the codec to power on defaults */ snd_soc_write(codec, AIC26_REG_RESET, 0xBB00); /* Power up CODEC */ snd_soc_write(codec, AIC26_REG_POWER_CTRL, 0); /* Audio Control 3 (master mode, fsref rate) */ reg = snd_soc_read(codec, AIC26_REG_AUDIO_CTRL3); reg &= ~0xf800; reg |= 0x0800; /* set master mode */ snd_soc_write(codec, AIC26_REG_AUDIO_CTRL3, reg); /* Register the sysfs files for debugging */ /* Create SysFS files */ ret = device_create_file(codec->dev, &dev_attr_keyclick); if (ret) dev_info(codec->dev, "error creating sysfs files\n"); return 0; } static struct snd_soc_codec_driver aic26_soc_codec_dev = { .probe = aic26_probe, .controls = aic26_snd_controls, .num_controls = ARRAY_SIZE(aic26_snd_controls), .dapm_widgets = tlv320aic26_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(tlv320aic26_dapm_widgets), .dapm_routes = tlv320aic26_dapm_routes, .num_dapm_routes = ARRAY_SIZE(tlv320aic26_dapm_routes), }; static const struct regmap_config aic26_regmap = { .reg_bits = 16, .val_bits = 16, }; /* --------------------------------------------------------------------- * SPI device portion of driver: probe and release routines and SPI * driver registration. */ static int aic26_spi_probe(struct spi_device *spi) { struct aic26 *aic26; int ret; dev_dbg(&spi->dev, "probing tlv320aic26 spi device\n"); /* Allocate driver data */ aic26 = devm_kzalloc(&spi->dev, sizeof *aic26, GFP_KERNEL); if (!aic26) return -ENOMEM; aic26->regmap = devm_regmap_init_spi(spi, &aic26_regmap); if (IS_ERR(aic26->regmap)) return PTR_ERR(aic26->regmap); /* Initialize the driver data */ aic26->spi = spi; dev_set_drvdata(&spi->dev, aic26); aic26->master = 1; ret = snd_soc_register_codec(&spi->dev, &aic26_soc_codec_dev, &aic26_dai, 1); return ret; } static int aic26_spi_remove(struct spi_device *spi) { snd_soc_unregister_codec(&spi->dev); return 0; } static struct spi_driver aic26_spi = { .driver = { .name = "tlv320aic26-codec", .owner = THIS_MODULE, }, .probe = aic26_spi_probe, .remove = aic26_spi_remove, }; module_spi_driver(aic26_spi);
gpl-2.0
ApisSys/linux-analogdevicesinc-ap6
ipc/shm.c
109
34148
/* * linux/ipc/shm.c * Copyright (C) 1992, 1993 Krishna Balasubramanian * Many improvements/fixes by Bruno Haible. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. * * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * BIGMEM support, Andrea Arcangeli <andrea@suse.de> * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> * HIGHMEM support, Ingo Molnar <mingo@redhat.com> * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> * * support for audit of ipc object properties and permission changes * Dustin Kirkland <dustin.kirkland@us.ibm.com> * * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> * * Better ipc lock (kern_ipc_perm.lock) handling * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. */ #include <linux/slab.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/init.h> #include <linux/file.h> #include <linux/mman.h> #include <linux/shmem_fs.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/capability.h> #include <linux/ptrace.h> #include <linux/seq_file.h> #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/mount.h> #include <linux/ipc_namespace.h> #include <linux/uaccess.h> #include "util.h" struct shm_file_data { int id; struct ipc_namespace *ns; struct file *file; const struct vm_operations_struct *vm_ops; }; #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) static const struct file_operations shm_file_operations; static const struct vm_operations_struct shm_vm_ops; #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) #define shm_unlock(shp) \ ipc_unlock(&(shp)->shm_perm) static int newseg(struct ipc_namespace *, struct ipc_params *); static void shm_open(struct vm_area_struct *vma); static void shm_close(struct vm_area_struct *vma); static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp); #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it); #endif void shm_init_ns(struct ipc_namespace *ns) { ns->shm_ctlmax = SHMMAX; ns->shm_ctlall = SHMALL; ns->shm_ctlmni = SHMMNI; ns->shm_rmid_forced = 0; ns->shm_tot = 0; ipc_init_ids(&shm_ids(ns)); } /* * Called with shm_ids.rwsem (writer) and the shp structure locked. * Only shm_ids.rwsem remains locked on exit. */ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_nattch) { shp->shm_perm.mode |= SHM_DEST; /* Do not find it any more */ shp->shm_perm.key = IPC_PRIVATE; shm_unlock(shp); } else shm_destroy(ns, shp); } #ifdef CONFIG_IPC_NS void shm_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &shm_ids(ns), do_shm_rmid); idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); } #endif static int __init ipc_ns_init(void) { shm_init_ns(&init_ipc_ns); return 0; } pure_initcall(ipc_ns_init); void __init shm_init(void) { ipc_init_proc_interface("sysvipc/shm", #if BITS_PER_LONG <= 32 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", #else " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", #endif IPC_SHM_IDS, sysvipc_shm_proc_show); } static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } /* * shm_lock_(check_) routines are called in the paths where the rwsem * is not necessarily held. */ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); /* * Callers of shm_lock() must validate the status of the returned ipc * object pointer (as returned by ipc_lock()), and error out as * appropriate. */ if (IS_ERR(ipcp)) return (void *)ipcp; return container_of(ipcp, struct shmid_kernel, shm_perm); } static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) { rcu_read_lock(); ipc_lock_object(&ipcp->shm_perm); } static void shm_rcu_free(struct rcu_head *head) { struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); struct shmid_kernel *shp = ipc_rcu_to_struct(p); security_shm_free(shp); ipc_rcu_free(head); } static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) { list_del(&s->shm_clist); ipc_rmid(&shm_ids(ns), &s->shm_perm); } static int __shm_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; shp = shm_lock(sfd->ns, sfd->id); if (IS_ERR(shp)) return PTR_ERR(shp); shp->shm_atim = get_seconds(); shp->shm_lprid = task_tgid_vnr(current); shp->shm_nattch++; shm_unlock(shp); return 0; } /* This is called by fork, once for every shm attach. */ static void shm_open(struct vm_area_struct *vma) { int err = __shm_open(vma); /* * We raced in the idr lookup or with shm_destroy(). * Either way, the ID is busted. */ WARN_ON_ONCE(err); } /* * shm_destroy - free the struct shmid_kernel * * @ns: namespace * @shp: struct to free * * It has to be called with shp and shm_ids.rwsem (writer) locked, * but returns with shp unlocked and freed. */ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { struct file *shm_file; shm_file = shp->shm_file; shp->shm_file = NULL; ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; shm_rmid(ns, shp); shm_unlock(shp); if (!is_file_hugepages(shm_file)) shmem_lock(shm_file, 0, shp->mlock_user); else if (shp->mlock_user) user_shm_unlock(i_size_read(file_inode(shm_file)), shp->mlock_user); fput(shm_file); ipc_rcu_putref(shp, shm_rcu_free); } /* * shm_may_destroy - identifies whether shm segment should be destroyed now * * Returns true if and only if there are no active users of the segment and * one of the following is true: * * 1) shmctl(id, IPC_RMID, NULL) was called for this shp * * 2) sysctl kernel.shm_rmid_forced is set to 1. */ static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { return (shp->shm_nattch == 0) && (ns->shm_rmid_forced || (shp->shm_perm.mode & SHM_DEST)); } /* * remove the attach descriptor vma. * free memory for segment if it is marked destroyed. * The descriptor has already been removed from the current->mm->mmap list * and will later be kfree()d. */ static void shm_close(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; struct ipc_namespace *ns = sfd->ns; down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ shp = shm_lock(ns, sfd->id); /* * We raced in the idr lookup or with shm_destroy(). * Either way, the ID is busted. */ if (WARN_ON_ONCE(IS_ERR(shp))) goto done; /* no-op */ shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); done: up_write(&shm_ids(ns).rwsem); } /* Called with ns->shm_ids(ns).rwsem locked */ static int shm_try_destroy_orphaned(int id, void *p, void *data) { struct ipc_namespace *ns = data; struct kern_ipc_perm *ipcp = p; struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); /* * We want to destroy segments without users and with already * exit'ed originating process. * * As shp->* are changed under rwsem, it's safe to skip shp locking. */ if (shp->shm_creator != NULL) return 0; if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } return 0; } void shm_destroy_orphaned(struct ipc_namespace *ns) { down_write(&shm_ids(ns).rwsem); if (shm_ids(ns).in_use) idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); up_write(&shm_ids(ns).rwsem); } /* Locking assumes this will only be called with task == current */ void exit_shm(struct task_struct *task) { struct ipc_namespace *ns = task->nsproxy->ipc_ns; struct shmid_kernel *shp, *n; if (list_empty(&task->sysvshm.shm_clist)) return; /* * If kernel.shm_rmid_forced is not set then only keep track of * which shmids are orphaned, so that a later set of the sysctl * can clean them up. */ if (!ns->shm_rmid_forced) { down_read(&shm_ids(ns).rwsem); list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist) shp->shm_creator = NULL; /* * Only under read lock but we are only called on current * so no entry on the list will be shared. */ list_del(&task->sysvshm.shm_clist); up_read(&shm_ids(ns).rwsem); return; } /* * Destroy all already created segments, that were not yet mapped, * and mark any mapped as orphan to cover the sysctl toggling. * Destroy is skipped if shm_may_destroy() returns false. */ down_write(&shm_ids(ns).rwsem); list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) { shp->shm_creator = NULL; if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } } /* Remove the list head from any segments still attached. */ list_del(&task->sysvshm.shm_clist); up_write(&shm_ids(ns).rwsem); } static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); return sfd->vm_ops->fault(vma, vmf); } #ifdef CONFIG_NUMA static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); int err = 0; if (sfd->vm_ops->set_policy) err = sfd->vm_ops->set_policy(vma, new); return err; } static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, unsigned long addr) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct mempolicy *pol = NULL; if (sfd->vm_ops->get_policy) pol = sfd->vm_ops->get_policy(vma, addr); else if (vma->vm_policy) pol = vma->vm_policy; return pol; } #endif static int shm_mmap(struct file *file, struct vm_area_struct *vma) { struct shm_file_data *sfd = shm_file_data(file); int ret; /* * In case of remap_file_pages() emulation, the file can represent * removed IPC ID: propogate shm_lock() error to caller. */ ret =__shm_open(vma); if (ret) return ret; ret = sfd->file->f_op->mmap(sfd->file, vma); if (ret) { shm_close(vma); return ret; } sfd->vm_ops = vma->vm_ops; #ifdef CONFIG_MMU WARN_ON(!sfd->vm_ops->fault); #endif vma->vm_ops = &shm_vm_ops; return 0; } static int shm_release(struct inode *ino, struct file *file) { struct shm_file_data *sfd = shm_file_data(file); put_ipc_ns(sfd->ns); shm_file_data(file) = NULL; kfree(sfd); return 0; } static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct shm_file_data *sfd = shm_file_data(file); if (!sfd->file->f_op->fsync) return -EINVAL; return sfd->file->f_op->fsync(sfd->file, start, end, datasync); } static long shm_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct shm_file_data *sfd = shm_file_data(file); if (!sfd->file->f_op->fallocate) return -EOPNOTSUPP; return sfd->file->f_op->fallocate(file, mode, offset, len); } static unsigned long shm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct shm_file_data *sfd = shm_file_data(file); return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, pgoff, flags); } static const struct file_operations shm_file_operations = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, .get_unmapped_area = shm_get_unmapped_area, .llseek = noop_llseek, .fallocate = shm_fallocate, }; /* * shm_file_operations_huge is now identical to shm_file_operations, * but we keep it distinct for the sake of is_file_shm_hugepages(). */ static const struct file_operations shm_file_operations_huge = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, .get_unmapped_area = shm_get_unmapped_area, .llseek = noop_llseek, .fallocate = shm_fallocate, }; bool is_file_shm_hugepages(struct file *file) { return file->f_op == &shm_file_operations_huge; } static const struct vm_operations_struct shm_vm_ops = { .open = shm_open, /* callback for a new vm-area open */ .close = shm_close, /* callback for when the vm-area is released */ .fault = shm_fault, #if defined(CONFIG_NUMA) .set_policy = shm_set_policy, .get_policy = shm_get_policy, #endif }; /** * newseg - Create a new shared memory segment * @ns: namespace * @params: ptr to the structure that contains key, size and shmflg * * Called with shm_ids.rwsem held as a writer. */ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) { key_t key = params->key; int shmflg = params->flg; size_t size = params->u.size; int error; struct shmid_kernel *shp; size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; struct file *file; char name[13]; int id; vm_flags_t acctflag = 0; if (size < SHMMIN || size > ns->shm_ctlmax) return -EINVAL; if (numpages << PAGE_SHIFT < size) return -ENOSPC; if (ns->shm_tot + numpages < ns->shm_tot || ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; shp = ipc_rcu_alloc(sizeof(*shp)); if (!shp) return -ENOMEM; shp->shm_perm.key = key; shp->shm_perm.mode = (shmflg & S_IRWXUGO); shp->mlock_user = NULL; shp->shm_perm.security = NULL; error = security_shm_alloc(shp); if (error) { ipc_rcu_putref(shp, ipc_rcu_free); return error; } sprintf(name, "SYSV%08x", key); if (shmflg & SHM_HUGETLB) { struct hstate *hs; size_t hugesize; hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); if (!hs) { error = -EINVAL; goto no_file; } hugesize = ALIGN(size, huge_page_size(hs)); /* hugetlb_file_setup applies strict accounting */ if (shmflg & SHM_NORESERVE) acctflag = VM_NORESERVE; file = hugetlb_file_setup(name, hugesize, acctflag, &shp->mlock_user, HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); } else { /* * Do not allow no accounting for OVERCOMMIT_NEVER, even * if it's asked for. */ if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) acctflag = VM_NORESERVE; file = shmem_kernel_file_setup(name, size, acctflag); } error = PTR_ERR(file); if (IS_ERR(file)) goto no_file; shp->shm_cprid = task_tgid_vnr(current); shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; shp->shm_ctim = get_seconds(); shp->shm_segsz = size; shp->shm_nattch = 0; shp->shm_file = file; shp->shm_creator = current; id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); if (id < 0) { error = id; goto no_id; } list_add(&shp->shm_clist, &current->sysvshm.shm_clist); /* * shmid gets reported as "inode#" in /proc/pid/maps. * proc-ps tools use this. Changing this will break them. */ file_inode(file)->i_ino = shp->shm_perm.id; ns->shm_tot += numpages; error = shp->shm_perm.id; ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); return error; no_id: if (is_file_hugepages(file) && shp->mlock_user) user_shm_unlock(size, shp->mlock_user); fput(file); no_file: ipc_rcu_putref(shp, shm_rcu_free); return error; } /* * Called with shm_ids.rwsem and ipcp locked. */ static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); return security_shm_associate(shp, shmflg); } /* * Called with shm_ids.rwsem and ipcp locked. */ static inline int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_segsz < params->u.size) return -EINVAL; return 0; } SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) { struct ipc_namespace *ns; static const struct ipc_ops shm_ops = { .getnew = newseg, .associate = shm_security, .more_checks = shm_more_checks, }; struct ipc_params shm_params; ns = current->nsproxy->ipc_ns; shm_params.key = key; shm_params.flg = shmflg; shm_params.u.size = size; return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); } static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) { switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shmid_ds out; memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); out.shm_segsz = in->shm_segsz; out.shm_atime = in->shm_atime; out.shm_dtime = in->shm_dtime; out.shm_ctime = in->shm_ctime; out.shm_cpid = in->shm_cpid; out.shm_lpid = in->shm_lpid; out.shm_nattch = in->shm_nattch; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } static inline unsigned long copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) { switch (version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; return 0; case IPC_OLD: { struct shmid_ds tbuf_old; if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->shm_perm.uid = tbuf_old.shm_perm.uid; out->shm_perm.gid = tbuf_old.shm_perm.gid; out->shm_perm.mode = tbuf_old.shm_perm.mode; return 0; } default: return -EINVAL; } } static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) { switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shminfo out; if (in->shmmax > INT_MAX) out.shmmax = INT_MAX; else out.shmmax = (int)in->shmmax; out.shmmin = in->shmmin; out.shmmni = in->shmmni; out.shmseg = in->shmseg; out.shmall = in->shmall; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } /* * Calculate and add used RSS and swap pages of a shm. * Called with shm_ids.rwsem held as a reader */ static void shm_add_rss_swap(struct shmid_kernel *shp, unsigned long *rss_add, unsigned long *swp_add) { struct inode *inode; inode = file_inode(shp->shm_file); if (is_file_hugepages(shp->shm_file)) { struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_file(shp->shm_file); *rss_add += pages_per_huge_page(h) * mapping->nrpages; } else { #ifdef CONFIG_SHMEM struct shmem_inode_info *info = SHMEM_I(inode); spin_lock_irq(&info->lock); *rss_add += inode->i_mapping->nrpages; *swp_add += info->swapped; spin_unlock_irq(&info->lock); #else *rss_add += inode->i_mapping->nrpages; #endif } } /* * Called with shm_ids.rwsem held as a reader */ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, unsigned long *swp) { int next_id; int total, in_use; *rss = 0; *swp = 0; in_use = shm_ids(ns).in_use; for (total = 0, next_id = 0; total < in_use; next_id++) { struct kern_ipc_perm *ipc; struct shmid_kernel *shp; ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); if (ipc == NULL) continue; shp = container_of(ipc, struct shmid_kernel, shm_perm); shm_add_rss_swap(shp, rss, swp); total++; } } /* * This function handles some shmctl commands which require the rwsem * to be held in write mode. * NOTE: no locks must be held, the rwsem is taken inside this function. */ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, struct shmid_ds __user *buf, int version) { struct kern_ipc_perm *ipcp; struct shmid64_ds shmid64; struct shmid_kernel *shp; int err; if (cmd == IPC_SET) { if (copy_shmid_from_user(&shmid64, buf, version)) return -EFAULT; } down_write(&shm_ids(ns).rwsem); rcu_read_lock(); ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); goto out_unlock1; } shp = container_of(ipcp, struct shmid_kernel, shm_perm); err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock1; switch (cmd) { case IPC_RMID: ipc_lock_object(&shp->shm_perm); /* do_shm_rmid unlocks the ipc object and rcu */ do_shm_rmid(ns, ipcp); goto out_up; case IPC_SET: ipc_lock_object(&shp->shm_perm); err = ipc_update_perm(&shmid64.shm_perm, ipcp); if (err) goto out_unlock0; shp->shm_ctim = get_seconds(); break; default: err = -EINVAL; goto out_unlock1; } out_unlock0: ipc_unlock_object(&shp->shm_perm); out_unlock1: rcu_read_unlock(); out_up: up_write(&shm_ids(ns).rwsem); return err; } static int shmctl_nolock(struct ipc_namespace *ns, int shmid, int cmd, int version, void __user *buf) { int err; struct shmid_kernel *shp; /* preliminary security checks for *_INFO */ if (cmd == IPC_INFO || cmd == SHM_INFO) { err = security_shm_shmctl(NULL, cmd); if (err) return err; } switch (cmd) { case IPC_INFO: { struct shminfo64 shminfo; memset(&shminfo, 0, sizeof(shminfo)); shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; shminfo.shmmax = ns->shm_ctlmax; shminfo.shmall = ns->shm_ctlall; shminfo.shmmin = SHMMIN; if (copy_shminfo_to_user(buf, &shminfo, version)) return -EFAULT; down_read(&shm_ids(ns).rwsem); err = ipc_get_maxid(&shm_ids(ns)); up_read(&shm_ids(ns).rwsem); if (err < 0) err = 0; goto out; } case SHM_INFO: { struct shm_info shm_info; memset(&shm_info, 0, sizeof(shm_info)); down_read(&shm_ids(ns).rwsem); shm_info.used_ids = shm_ids(ns).in_use; shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp); shm_info.shm_tot = ns->shm_tot; shm_info.swap_attempts = 0; shm_info.swap_successes = 0; err = ipc_get_maxid(&shm_ids(ns)); up_read(&shm_ids(ns).rwsem); if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { err = -EFAULT; goto out; } err = err < 0 ? 0 : err; goto out; } case SHM_STAT: case IPC_STAT: { struct shmid64_ds tbuf; int result; rcu_read_lock(); if (cmd == SHM_STAT) { shp = shm_obtain_object(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } result = shp->shm_perm.id; } else { shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } result = 0; } err = -EACCES; if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) goto out_unlock; err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock; memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); tbuf.shm_segsz = shp->shm_segsz; tbuf.shm_atime = shp->shm_atim; tbuf.shm_dtime = shp->shm_dtim; tbuf.shm_ctime = shp->shm_ctim; tbuf.shm_cpid = shp->shm_cprid; tbuf.shm_lpid = shp->shm_lprid; tbuf.shm_nattch = shp->shm_nattch; rcu_read_unlock(); if (copy_shmid_to_user(buf, &tbuf, version)) err = -EFAULT; else err = result; goto out; } default: return -EINVAL; } out_unlock: rcu_read_unlock(); out: return err; } SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) { struct shmid_kernel *shp; int err, version; struct ipc_namespace *ns; if (cmd < 0 || shmid < 0) return -EINVAL; version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: case SHM_INFO: case SHM_STAT: case IPC_STAT: return shmctl_nolock(ns, shmid, cmd, version, buf); case IPC_RMID: case IPC_SET: return shmctl_down(ns, shmid, cmd, buf, version); case SHM_LOCK: case SHM_UNLOCK: { struct file *shm_file; rcu_read_lock(); shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock1; } audit_ipc_obj(&(shp->shm_perm)); err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock1; ipc_lock_object(&shp->shm_perm); /* check if shm_destroy() is tearing down shp */ if (!ipc_valid_object(&shp->shm_perm)) { err = -EIDRM; goto out_unlock0; } if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { kuid_t euid = current_euid(); if (!uid_eq(euid, shp->shm_perm.uid) && !uid_eq(euid, shp->shm_perm.cuid)) { err = -EPERM; goto out_unlock0; } if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) { err = -EPERM; goto out_unlock0; } } shm_file = shp->shm_file; if (is_file_hugepages(shm_file)) goto out_unlock0; if (cmd == SHM_LOCK) { struct user_struct *user = current_user(); err = shmem_lock(shm_file, 1, user); if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { shp->shm_perm.mode |= SHM_LOCKED; shp->mlock_user = user; } goto out_unlock0; } /* SHM_UNLOCK */ if (!(shp->shm_perm.mode & SHM_LOCKED)) goto out_unlock0; shmem_lock(shm_file, 0, shp->mlock_user); shp->shm_perm.mode &= ~SHM_LOCKED; shp->mlock_user = NULL; get_file(shm_file); ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); shmem_unlock_mapping(shm_file->f_mapping); fput(shm_file); return err; } default: return -EINVAL; } out_unlock0: ipc_unlock_object(&shp->shm_perm); out_unlock1: rcu_read_unlock(); return err; } /* * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. * * NOTE! Despite the name, this is NOT a direct system call entrypoint. The * "raddr" thing points to kernel space, and there has to be a wrapper around * this. */ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, unsigned long shmlba) { struct shmid_kernel *shp; unsigned long addr; unsigned long size; struct file *file; int err; unsigned long flags; unsigned long prot; int acc_mode; struct ipc_namespace *ns; struct shm_file_data *sfd; struct path path; fmode_t f_mode; unsigned long populate = 0; err = -EINVAL; if (shmid < 0) goto out; else if ((addr = (ulong)shmaddr)) { if (addr & (shmlba - 1)) { if (shmflg & SHM_RND) addr &= ~(shmlba - 1); /* round down */ else #ifndef __ARCH_FORCE_SHMLBA if (addr & ~PAGE_MASK) #endif goto out; } flags = MAP_SHARED | MAP_FIXED; } else { if ((shmflg & SHM_REMAP)) goto out; flags = MAP_SHARED; } if (shmflg & SHM_RDONLY) { prot = PROT_READ; acc_mode = S_IRUGO; f_mode = FMODE_READ; } else { prot = PROT_READ | PROT_WRITE; acc_mode = S_IRUGO | S_IWUGO; f_mode = FMODE_READ | FMODE_WRITE; } if (shmflg & SHM_EXEC) { prot |= PROT_EXEC; acc_mode |= S_IXUGO; } /* * We cannot rely on the fs check since SYSV IPC does have an * additional creator id... */ ns = current->nsproxy->ipc_ns; rcu_read_lock(); shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } err = -EACCES; if (ipcperms(ns, &shp->shm_perm, acc_mode)) goto out_unlock; err = security_shm_shmat(shp, shmaddr, shmflg); if (err) goto out_unlock; ipc_lock_object(&shp->shm_perm); /* check if shm_destroy() is tearing down shp */ if (!ipc_valid_object(&shp->shm_perm)) { ipc_unlock_object(&shp->shm_perm); err = -EIDRM; goto out_unlock; } path = shp->shm_file->f_path; path_get(&path); shp->shm_nattch++; size = i_size_read(d_inode(path.dentry)); ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); err = -ENOMEM; sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); if (!sfd) { path_put(&path); goto out_nattch; } file = alloc_file(&path, f_mode, is_file_hugepages(shp->shm_file) ? &shm_file_operations_huge : &shm_file_operations); err = PTR_ERR(file); if (IS_ERR(file)) { kfree(sfd); path_put(&path); goto out_nattch; } file->private_data = sfd; file->f_mapping = shp->shm_file->f_mapping; sfd->id = shp->shm_perm.id; sfd->ns = get_ipc_ns(ns); sfd->file = shp->shm_file; sfd->vm_ops = NULL; err = security_mmap_file(file, prot, flags); if (err) goto out_fput; if (down_write_killable(&current->mm->mmap_sem)) { err = -EINTR; goto out_fput; } if (addr && !(shmflg & SHM_REMAP)) { err = -EINVAL; if (addr + size < addr) goto invalid; if (find_vma_intersection(current->mm, addr, addr + size)) goto invalid; } addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); *raddr = addr; err = 0; if (IS_ERR_VALUE(addr)) err = (long)addr; invalid: up_write(&current->mm->mmap_sem); if (populate) mm_populate(addr, populate); out_fput: fput(file); out_nattch: down_write(&shm_ids(ns).rwsem); shp = shm_lock(ns, shmid); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); up_write(&shm_ids(ns).rwsem); return err; out_unlock: rcu_read_unlock(); out: return err; } SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) { unsigned long ret; long err; err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); if (err) return err; force_successful_syscall_return(); return (long)ret; } /* * detach and kill segment if marked destroyed. * The work is done in shm_close. */ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr = (unsigned long)shmaddr; int retval = -EINVAL; #ifdef CONFIG_MMU loff_t size = 0; struct file *file; struct vm_area_struct *next; #endif if (addr & ~PAGE_MASK) return retval; if (down_write_killable(&mm->mmap_sem)) return -EINTR; /* * This function tries to be smart and unmap shm segments that * were modified by partial mlock or munmap calls: * - It first determines the size of the shm segment that should be * unmapped: It searches for a vma that is backed by shm and that * started at address shmaddr. It records it's size and then unmaps * it. * - Then it unmaps all shm vmas that started at shmaddr and that * are within the initially determined size and that are from the * same shm segment from which we determined the size. * Errors from do_munmap are ignored: the function only fails if * it's called with invalid parameters or if it's called to unmap * a part of a vma. Both calls in this function are for full vmas, * the parameters are directly copied from the vma itself and always * valid - therefore do_munmap cannot fail. (famous last words?) */ /* * If it had been mremap()'d, the starting address would not * match the usual checks anyway. So assume all vma's are * above the starting address given. */ vma = find_vma(mm, addr); #ifdef CONFIG_MMU while (vma) { next = vma->vm_next; /* * Check if the starting address would match, i.e. it's * a fragment created by mprotect() and/or munmap(), or it * otherwise it starts at this address with no hassles. */ if ((vma->vm_ops == &shm_vm_ops) && (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { /* * Record the file of the shm segment being * unmapped. With mremap(), someone could place * page from another segment but with equal offsets * in the range we are unmapping. */ file = vma->vm_file; size = i_size_read(file_inode(vma->vm_file)); do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); /* * We discovered the size of the shm segment, so * break out of here and fall through to the next * loop that uses the size information to stop * searching for matching vma's. */ retval = 0; vma = next; break; } vma = next; } /* * We need look no further than the maximum address a fragment * could possibly have landed at. Also cast things to loff_t to * prevent overflows and make comparisons vs. equal-width types. */ size = PAGE_ALIGN(size); while (vma && (loff_t)(vma->vm_end - addr) <= size) { next = vma->vm_next; /* finding a matching vma now does not alter retval */ if ((vma->vm_ops == &shm_vm_ops) && ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && (vma->vm_file == file)) do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); vma = next; } #else /* CONFIG_MMU */ /* under NOMMU conditions, the exact address to be destroyed must be * given */ if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); retval = 0; } #endif up_write(&mm->mmap_sem); return retval; } #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it) { struct user_namespace *user_ns = seq_user_ns(s); struct shmid_kernel *shp = it; unsigned long rss = 0, swp = 0; shm_add_rss_swap(shp, &rss, &swp); #if BITS_PER_LONG <= 32 #define SIZE_SPEC "%10lu" #else #define SIZE_SPEC "%21lu" #endif seq_printf(s, "%10d %10d %4o " SIZE_SPEC " %5u %5u " "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " SIZE_SPEC " " SIZE_SPEC "\n", shp->shm_perm.key, shp->shm_perm.id, shp->shm_perm.mode, shp->shm_segsz, shp->shm_cprid, shp->shm_lprid, shp->shm_nattch, from_kuid_munged(user_ns, shp->shm_perm.uid), from_kgid_munged(user_ns, shp->shm_perm.gid), from_kuid_munged(user_ns, shp->shm_perm.cuid), from_kgid_munged(user_ns, shp->shm_perm.cgid), shp->shm_atim, shp->shm_dtim, shp->shm_ctim, rss * PAGE_SIZE, swp * PAGE_SIZE); return 0; } #endif
gpl-2.0
LeJay/android_kernel_samsung_jactivelte
arch/arm/mvp/mvpkm/cpufreq_kernel.c
109
7333
/* * Linux 2.6.32 and later Kernel module for VMware MVP Hypervisor Support * * Copyright (C) 2010-2012 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; see the file COPYING. If not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #line 5 /** * @file * * @brief MVP host kernel cpufreq related * * Track CPU frequency changes. */ #include <linux/module.h> #include <linux/notifier.h> #include <linux/cpufreq.h> #include <linux/smp.h> #include "mvp.h" #include "cpufreq_kernel.h" #include "mvpkm_kernel.h" #include "mvp_timer.h" /** * @brief Return current CPU frequency * @param cpu CPU number * @return CPU frequency in kHz, might be 0 (cpufreq) * * When CPU_FREQ is not available, it uses hardcoded frequencies. */ static uint32 GetCpuFrequency(unsigned int cpu) { unsigned int counterKHZ; #ifdef CONFIG_CPU_FREQ counterKHZ = cpufreq_quick_get(cpu); #elif defined(MVP_HOST_BOARD_ve) /** * @knownjira{MVP-143} * We're only using this under the simulator, and it's almost non perceptible to * provide a fixed TSC frequency as the instructions / second executed widely * varies depending over time. While we resolve this issue we can use the * BogoMIPS reported at boot for now. */ KNOWN_BUG(MVP-143); counterKHZ = 125e3; printk_once(KERN_INFO "mvpkm: CPU_FREQ not available, forcing TSC to %d kHz\n", counterKHZ); #elif defined(MVP_HOST_BOARD_panda) counterKHZ = 1e6; #else /* * If the kernel can't tell us and we have no further host knowledge, * time to die. */ #error "host TSC frequency unknown." #endif return counterKHZ; } /** * @brief Compute TSC to RATE64 ratio * @param cpuFreq TSC frequency in kHz * @param[out] ttr tscToRate64 pointer */ static void TscToRate64(uint32 cpuFreq, struct TscToRate64Cb *ttr) { uint32 shift; uint64 mult; /* * A little bit of math ! * * We need here to convert the TSC value to our RATE64 timebase. * * In other words: * * tsc * MVP_TIMER_RATE64 * rate64 = ---------------------- * cpuFreq * * But we are limited by CPU performance (does not divide easily), CPU * instruction set, and CPU register file width. To fit performance * requirement, the math becomes: * * rate64 = (cpuFreq * mult) >> shift * * To respect instruction set, both cpuFreq and mult must be 32-bit * numbers. Thus (cpuFreq * mult) will be a 64-bit number. * * * Log2 rate64 = Log2 cpuFreq + Log2 mult - shift * * shift = Log2 mult + Log2 cpuFreq - Log2 rate64 * * && Log2 mult < 32 * * => shift < 32 + Log2 cpuFreq - Log2 rate64 * * rate64 << shift * => mult = --------------- * cpuFreq * * (rate64 << shift) must be a 64-bit number: * * Log2 rate64 + shift < 64 * * => shift < 64 - Log2 rate64 * * While cpuFreq is lower than 2^32 Hz, we have: * * shift < 32 + Log2 cpuFreq - Log2 rate64 < 64 - Log2 rate64 * * As (31 - CLZ32 x) <= Log2 x < (32 - CLZ32 x): * * 31 - CLZ32 cpuFreq <= Log2 cpuFreq && * * CLZ32 rate64 - 32 < - Log2 rate64 * * 31 + CLZ32 rate64 - CLZ32 cpuFreq < 32 + Log2 cpuFreq - Log2 rate64 * * As we want shift to be as great as possible: * * => shift = 31 + CLZ32 rate64 - CLZ32 cpuFreq * * rate64 << shift * && mult = --------------- * cpuFreq * * */ /* cpuFreq comes in kHz */ cpuFreq *= 1000; /* CLZ(MVP_TIMER_RATE64) is optimized by compiler in a constant */ shift = 31 + CLZ(MVP_TIMER_RATE64) - CLZ(cpuFreq); mult = MVP_TIMER_RATE64; mult <<= shift; do_div(mult, cpuFreq); /* verify Log2 mult < 32 */ ASSERT(mult < (1ULL<<32)); /* update global variables */ ttr->mult = mult; ttr->shift = shift; } /** * @brief Compute a new TSC to rate64 ratio if CPU frequency changed * @param[in,out] freq Pointer to previous CPU frequency in kHz * @param[in,out] ttr Pointer to ratio values, set on change * @return 1 if ratio has changed, else 0 */ int CpuFreqUpdate(unsigned int *freq, struct TscToRate64Cb *ttr) { unsigned int cur = GetCpuFrequency(smp_processor_id()); int ret = (cur != *freq); if (ret) { if (cur) { TscToRate64(cur, ttr); } else { /* * Note that cpufreq_quick_get(cpu) can return 0 while cpufreq is * not yet ready on that core. This will make monitor run with a * degraded time for few ms. */ ttr->mult = 1; ttr->shift = 64; } *freq = cur; } return ret; } /** * @brief Nop function * @param info Ignored */ static void CpuFreqNop(void *info) { } /** * @brief Handle cpufreq transition notifications. * @param nb Notifier block * @param val Notified event * @param data Linux cpufreq_freqs info * @return NOTIFY_OK * * @note A frequency change can fail in which case PRECHANGE and POSTCHANGE * will not be paired and you get any number of PRECHANGE and maybe never a * POSTCHANGE (i.e. there is not enough battery voltage available to support a * high frequency). * @note This is called once per cpu core that is changing but not always on * the core that is changing. */ static int CpuFreqNotifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; /* * Call CpuFreqNop() on the correct CPU core to force any currently running * vCPU's to worldswitch back to the host and update TSC to rate64 ratio * on next execution. */ if (freq->old != freq->new && val == CPUFREQ_POSTCHANGE && cpumask_test_cpu(freq->cpu, &inMonitor)) { smp_call_function_single(freq->cpu, CpuFreqNop, NULL, false); } return NOTIFY_OK; } /** * @brief Notifier block for cpufreq transitions */ static struct notifier_block cpuFreqNotifierBlock = { .notifier_call = CpuFreqNotifier }; /** * @brief Initialize TSC ratio and register cpufreq transitions. */ void CpuFreq_Init(void) { int ret; /* register callback on frequency change */ ret = cpufreq_register_notifier(&cpuFreqNotifierBlock, CPUFREQ_TRANSITION_NOTIFIER); FATAL_IF(ret < 0); } /** * @brief Exit cpufreq, unregister cpufreq transitions */ void CpuFreq_Exit(void) { cpufreq_unregister_notifier(&cpuFreqNotifierBlock, CPUFREQ_TRANSITION_NOTIFIER); }
gpl-2.0
xiandaicxsj/qemu-copy
ui/spice-input.c
109
7428
/* * Copyright (C) 2010 Red Hat, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 or * (at your option) version 3 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <stdlib.h> #include <stdio.h> #include <stdbool.h> #include <string.h> #include <spice.h> #include <spice/enums.h> #include "qemu-common.h" #include "ui/qemu-spice.h" #include "ui/console.h" #include "ui/keymaps.h" #include "ui/input.h" /* keyboard bits */ typedef struct QemuSpiceKbd { SpiceKbdInstance sin; int ledstate; bool emul0; } QemuSpiceKbd; static void kbd_push_key(SpiceKbdInstance *sin, uint8_t frag); static uint8_t kbd_get_leds(SpiceKbdInstance *sin); static void kbd_leds(void *opaque, int l); static const SpiceKbdInterface kbd_interface = { .base.type = SPICE_INTERFACE_KEYBOARD, .base.description = "qemu keyboard", .base.major_version = SPICE_INTERFACE_KEYBOARD_MAJOR, .base.minor_version = SPICE_INTERFACE_KEYBOARD_MINOR, .push_scan_freg = kbd_push_key, .get_leds = kbd_get_leds, }; static void kbd_push_key(SpiceKbdInstance *sin, uint8_t scancode) { QemuSpiceKbd *kbd = container_of(sin, QemuSpiceKbd, sin); int keycode; bool up; if (scancode == SCANCODE_EMUL0) { kbd->emul0 = true; return; } keycode = scancode & ~SCANCODE_UP; up = scancode & SCANCODE_UP; if (kbd->emul0) { kbd->emul0 = false; keycode |= SCANCODE_GREY; } qemu_input_event_send_key_number(NULL, keycode, !up); } static uint8_t kbd_get_leds(SpiceKbdInstance *sin) { QemuSpiceKbd *kbd = container_of(sin, QemuSpiceKbd, sin); return kbd->ledstate; } static void kbd_leds(void *opaque, int ledstate) { QemuSpiceKbd *kbd = opaque; kbd->ledstate = 0; if (ledstate & QEMU_SCROLL_LOCK_LED) { kbd->ledstate |= SPICE_KEYBOARD_MODIFIER_FLAGS_SCROLL_LOCK; } if (ledstate & QEMU_NUM_LOCK_LED) { kbd->ledstate |= SPICE_KEYBOARD_MODIFIER_FLAGS_NUM_LOCK; } if (ledstate & QEMU_CAPS_LOCK_LED) { kbd->ledstate |= SPICE_KEYBOARD_MODIFIER_FLAGS_CAPS_LOCK; } spice_server_kbd_leds(&kbd->sin, ledstate); } /* mouse bits */ typedef struct QemuSpicePointer { SpiceMouseInstance mouse; SpiceTabletInstance tablet; int width, height; uint32_t last_bmask; Notifier mouse_mode; bool absolute; } QemuSpicePointer; static void spice_update_buttons(QemuSpicePointer *pointer, int wheel, uint32_t button_mask) { static uint32_t bmap[INPUT_BUTTON_MAX] = { [INPUT_BUTTON_LEFT] = 0x01, [INPUT_BUTTON_MIDDLE] = 0x04, [INPUT_BUTTON_RIGHT] = 0x02, [INPUT_BUTTON_WHEEL_UP] = 0x10, [INPUT_BUTTON_WHEEL_DOWN] = 0x20, }; if (wheel < 0) { button_mask |= 0x10; } if (wheel > 0) { button_mask |= 0x20; } if (pointer->last_bmask == button_mask) { return; } qemu_input_update_buttons(NULL, bmap, pointer->last_bmask, button_mask); pointer->last_bmask = button_mask; } static void mouse_motion(SpiceMouseInstance *sin, int dx, int dy, int dz, uint32_t buttons_state) { QemuSpicePointer *pointer = container_of(sin, QemuSpicePointer, mouse); spice_update_buttons(pointer, dz, buttons_state); qemu_input_queue_rel(NULL, INPUT_AXIS_X, dx); qemu_input_queue_rel(NULL, INPUT_AXIS_Y, dy); qemu_input_event_sync(); } static void mouse_buttons(SpiceMouseInstance *sin, uint32_t buttons_state) { QemuSpicePointer *pointer = container_of(sin, QemuSpicePointer, mouse); spice_update_buttons(pointer, 0, buttons_state); qemu_input_event_sync(); } static const SpiceMouseInterface mouse_interface = { .base.type = SPICE_INTERFACE_MOUSE, .base.description = "mouse", .base.major_version = SPICE_INTERFACE_MOUSE_MAJOR, .base.minor_version = SPICE_INTERFACE_MOUSE_MINOR, .motion = mouse_motion, .buttons = mouse_buttons, }; static void tablet_set_logical_size(SpiceTabletInstance* sin, int width, int height) { QemuSpicePointer *pointer = container_of(sin, QemuSpicePointer, tablet); if (height < 16) { height = 16; } if (width < 16) { width = 16; } pointer->width = width; pointer->height = height; } static void tablet_position(SpiceTabletInstance* sin, int x, int y, uint32_t buttons_state) { QemuSpicePointer *pointer = container_of(sin, QemuSpicePointer, tablet); spice_update_buttons(pointer, 0, buttons_state); qemu_input_queue_abs(NULL, INPUT_AXIS_X, x, pointer->width); qemu_input_queue_abs(NULL, INPUT_AXIS_Y, y, pointer->height); qemu_input_event_sync(); } static void tablet_wheel(SpiceTabletInstance* sin, int wheel, uint32_t buttons_state) { QemuSpicePointer *pointer = container_of(sin, QemuSpicePointer, tablet); spice_update_buttons(pointer, wheel, buttons_state); qemu_input_event_sync(); } static void tablet_buttons(SpiceTabletInstance *sin, uint32_t buttons_state) { QemuSpicePointer *pointer = container_of(sin, QemuSpicePointer, tablet); spice_update_buttons(pointer, 0, buttons_state); qemu_input_event_sync(); } static const SpiceTabletInterface tablet_interface = { .base.type = SPICE_INTERFACE_TABLET, .base.description = "tablet", .base.major_version = SPICE_INTERFACE_TABLET_MAJOR, .base.minor_version = SPICE_INTERFACE_TABLET_MINOR, .set_logical_size = tablet_set_logical_size, .position = tablet_position, .wheel = tablet_wheel, .buttons = tablet_buttons, }; static void mouse_mode_notifier(Notifier *notifier, void *data) { QemuSpicePointer *pointer = container_of(notifier, QemuSpicePointer, mouse_mode); bool is_absolute = qemu_input_is_absolute(); if (pointer->absolute == is_absolute) { return; } if (is_absolute) { qemu_spice_add_interface(&pointer->tablet.base); } else { spice_server_remove_interface(&pointer->tablet.base); } pointer->absolute = is_absolute; } void qemu_spice_input_init(void) { QemuSpiceKbd *kbd; QemuSpicePointer *pointer; kbd = g_malloc0(sizeof(*kbd)); kbd->sin.base.sif = &kbd_interface.base; qemu_spice_add_interface(&kbd->sin.base); qemu_add_led_event_handler(kbd_leds, kbd); pointer = g_malloc0(sizeof(*pointer)); pointer->mouse.base.sif = &mouse_interface.base; pointer->tablet.base.sif = &tablet_interface.base; qemu_spice_add_interface(&pointer->mouse.base); pointer->absolute = false; pointer->mouse_mode.notify = mouse_mode_notifier; qemu_add_mouse_mode_change_notifier(&pointer->mouse_mode); mouse_mode_notifier(&pointer->mouse_mode, NULL); }
gpl-2.0
malsony/linux
drivers/input/touchscreen/wacom_i2c.c
621
6759
/* * Wacom Penabled Driver for I2C * * Copyright (c) 2011 - 2013 Tatsunosuke Tobita, Wacom. * <tobita.tatsunosuke@wacom.co.jp> * * This program is free software; you can redistribute it * and/or modify it under the terms of the GNU General * Public License as published by the Free Software * Foundation; either version of 2 of the License, * or (at your option) any later version. */ #include <linux/module.h> #include <linux/input.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <asm/unaligned.h> #define WACOM_CMD_QUERY0 0x04 #define WACOM_CMD_QUERY1 0x00 #define WACOM_CMD_QUERY2 0x33 #define WACOM_CMD_QUERY3 0x02 #define WACOM_CMD_THROW0 0x05 #define WACOM_CMD_THROW1 0x00 #define WACOM_QUERY_SIZE 19 struct wacom_features { int x_max; int y_max; int pressure_max; char fw_version; }; struct wacom_i2c { struct i2c_client *client; struct input_dev *input; u8 data[WACOM_QUERY_SIZE]; bool prox; int tool; }; static int wacom_query_device(struct i2c_client *client, struct wacom_features *features) { int ret; u8 cmd1[] = { WACOM_CMD_QUERY0, WACOM_CMD_QUERY1, WACOM_CMD_QUERY2, WACOM_CMD_QUERY3 }; u8 cmd2[] = { WACOM_CMD_THROW0, WACOM_CMD_THROW1 }; u8 data[WACOM_QUERY_SIZE]; struct i2c_msg msgs[] = { { .addr = client->addr, .flags = 0, .len = sizeof(cmd1), .buf = cmd1, }, { .addr = client->addr, .flags = 0, .len = sizeof(cmd2), .buf = cmd2, }, { .addr = client->addr, .flags = I2C_M_RD, .len = sizeof(data), .buf = data, }, }; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret < 0) return ret; if (ret != ARRAY_SIZE(msgs)) return -EIO; features->x_max = get_unaligned_le16(&data[3]); features->y_max = get_unaligned_le16(&data[5]); features->pressure_max = get_unaligned_le16(&data[11]); features->fw_version = get_unaligned_le16(&data[13]); dev_dbg(&client->dev, "x_max:%d, y_max:%d, pressure:%d, fw:%d\n", features->x_max, features->y_max, features->pressure_max, features->fw_version); return 0; } static irqreturn_t wacom_i2c_irq(int irq, void *dev_id) { struct wacom_i2c *wac_i2c = dev_id; struct input_dev *input = wac_i2c->input; u8 *data = wac_i2c->data; unsigned int x, y, pressure; unsigned char tsw, f1, f2, ers; int error; error = i2c_master_recv(wac_i2c->client, wac_i2c->data, sizeof(wac_i2c->data)); if (error < 0) goto out; tsw = data[3] & 0x01; ers = data[3] & 0x04; f1 = data[3] & 0x02; f2 = data[3] & 0x10; x = le16_to_cpup((__le16 *)&data[4]); y = le16_to_cpup((__le16 *)&data[6]); pressure = le16_to_cpup((__le16 *)&data[8]); if (!wac_i2c->prox) wac_i2c->tool = (data[3] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; wac_i2c->prox = data[3] & 0x20; input_report_key(input, BTN_TOUCH, tsw || ers); input_report_key(input, wac_i2c->tool, wac_i2c->prox); input_report_key(input, BTN_STYLUS, f1); input_report_key(input, BTN_STYLUS2, f2); input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); input_report_abs(input, ABS_PRESSURE, pressure); input_sync(input); out: return IRQ_HANDLED; } static int wacom_i2c_open(struct input_dev *dev) { struct wacom_i2c *wac_i2c = input_get_drvdata(dev); struct i2c_client *client = wac_i2c->client; enable_irq(client->irq); return 0; } static void wacom_i2c_close(struct input_dev *dev) { struct wacom_i2c *wac_i2c = input_get_drvdata(dev); struct i2c_client *client = wac_i2c->client; disable_irq(client->irq); } static int wacom_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct wacom_i2c *wac_i2c; struct input_dev *input; struct wacom_features features = { 0 }; int error; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(&client->dev, "i2c_check_functionality error\n"); return -EIO; } error = wacom_query_device(client, &features); if (error) return error; wac_i2c = kzalloc(sizeof(*wac_i2c), GFP_KERNEL); input = input_allocate_device(); if (!wac_i2c || !input) { error = -ENOMEM; goto err_free_mem; } wac_i2c->client = client; wac_i2c->input = input; input->name = "Wacom I2C Digitizer"; input->id.bustype = BUS_I2C; input->id.vendor = 0x56a; input->id.version = features.fw_version; input->dev.parent = &client->dev; input->open = wacom_i2c_open; input->close = wacom_i2c_close; input->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOOL_PEN, input->keybit); __set_bit(BTN_TOOL_RUBBER, input->keybit); __set_bit(BTN_STYLUS, input->keybit); __set_bit(BTN_STYLUS2, input->keybit); __set_bit(BTN_TOUCH, input->keybit); input_set_abs_params(input, ABS_X, 0, features.x_max, 0, 0); input_set_abs_params(input, ABS_Y, 0, features.y_max, 0, 0); input_set_abs_params(input, ABS_PRESSURE, 0, features.pressure_max, 0, 0); input_set_drvdata(input, wac_i2c); error = request_threaded_irq(client->irq, NULL, wacom_i2c_irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "wacom_i2c", wac_i2c); if (error) { dev_err(&client->dev, "Failed to enable IRQ, error: %d\n", error); goto err_free_mem; } /* Disable the IRQ, we'll enable it in wac_i2c_open() */ disable_irq(client->irq); error = input_register_device(wac_i2c->input); if (error) { dev_err(&client->dev, "Failed to register input device, error: %d\n", error); goto err_free_irq; } i2c_set_clientdata(client, wac_i2c); return 0; err_free_irq: free_irq(client->irq, wac_i2c); err_free_mem: input_free_device(input); kfree(wac_i2c); return error; } static int wacom_i2c_remove(struct i2c_client *client) { struct wacom_i2c *wac_i2c = i2c_get_clientdata(client); free_irq(client->irq, wac_i2c); input_unregister_device(wac_i2c->input); kfree(wac_i2c); return 0; } static int __maybe_unused wacom_i2c_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); disable_irq(client->irq); return 0; } static int __maybe_unused wacom_i2c_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); enable_irq(client->irq); return 0; } static SIMPLE_DEV_PM_OPS(wacom_i2c_pm, wacom_i2c_suspend, wacom_i2c_resume); static const struct i2c_device_id wacom_i2c_id[] = { { "WAC_I2C_EMR", 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, wacom_i2c_id); static struct i2c_driver wacom_i2c_driver = { .driver = { .name = "wacom_i2c", .owner = THIS_MODULE, .pm = &wacom_i2c_pm, }, .probe = wacom_i2c_probe, .remove = wacom_i2c_remove, .id_table = wacom_i2c_id, }; module_i2c_driver(wacom_i2c_driver); MODULE_AUTHOR("Tatsunosuke Tobita <tobita.tatsunosuke@wacom.co.jp>"); MODULE_DESCRIPTION("WACOM EMR I2C Driver"); MODULE_LICENSE("GPL");
gpl-2.0
AndyPhoenix9879/SkyMelon-msm8939
arch/mips/mm/init.c
877
12118
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 2000 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. */ #include <linux/bug.h> #include <linux/init.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/pagemap.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/proc_fs.h> #include <linux/pfn.h> #include <linux/hardirq.h> #include <linux/gfp.h> #include <linux/kcore.h> #include <asm/asm-offsets.h> #include <asm/bootinfo.h> #include <asm/cachectl.h> #include <asm/cpu.h> #include <asm/dma.h> #include <asm/kmap_types.h> #include <asm/mmu_context.h> #include <asm/sections.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/fixmap.h> /* Atomicity and interruptability */ #ifdef CONFIG_MIPS_MT_SMTC #include <asm/mipsmtregs.h> #define ENTER_CRITICAL(flags) \ { \ unsigned int mvpflags; \ local_irq_save(flags);\ mvpflags = dvpe() #define EXIT_CRITICAL(flags) \ evpe(mvpflags); \ local_irq_restore(flags); \ } #else #define ENTER_CRITICAL(flags) local_irq_save(flags) #define EXIT_CRITICAL(flags) local_irq_restore(flags) #endif /* CONFIG_MIPS_MT_SMTC */ /* * We have up to 8 empty zeroed pages so we can map one of the right colour * when needed. This is necessary only on R4000 / R4400 SC and MC versions * where we have to avoid VCED / VECI exceptions for good performance at * any price. Since page is never written to after the initialization we * don't have to care about aliases on other CPUs. */ unsigned long empty_zero_page, zero_page_mask; EXPORT_SYMBOL_GPL(empty_zero_page); /* * Not static inline because used by IP27 special magic initialization code */ void setup_zero_pages(void) { unsigned int order, i; struct page *page; if (cpu_has_vce) order = 3; else order = 0; empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!empty_zero_page) panic("Oh boy, that early out of memory?"); page = virt_to_page((void *)empty_zero_page); split_page(page, order); for (i = 0; i < (1 << order); i++, page++) mark_page_reserved(page); zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; } #ifdef CONFIG_MIPS_MT_SMTC static pte_t *kmap_coherent_pte; static void __init kmap_coherent_init(void) { unsigned long vaddr; /* cache the first coherent kmap pte */ vaddr = __fix_to_virt(FIX_CMAP_BEGIN); kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); } #else static inline void kmap_coherent_init(void) {} #endif void *kmap_coherent(struct page *page, unsigned long addr) { enum fixed_addresses idx; unsigned long vaddr, flags, entrylo; unsigned long old_ctx; pte_t pte; int tlbidx; BUG_ON(Page_dcache_dirty(page)); inc_preempt_count(); idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); #ifdef CONFIG_MIPS_MT_SMTC idx += FIX_N_COLOURS * smp_processor_id() + (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0); #else idx += in_interrupt() ? FIX_N_COLOURS : 0; #endif vaddr = __fix_to_virt(FIX_CMAP_END - idx); pte = mk_pte(page, PAGE_KERNEL); #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) entrylo = pte.pte_high; #else entrylo = pte_to_entrylo(pte_val(pte)); #endif ENTER_CRITICAL(flags); old_ctx = read_c0_entryhi(); write_c0_entryhi(vaddr & (PAGE_MASK << 1)); write_c0_entrylo0(entrylo); write_c0_entrylo1(entrylo); #ifdef CONFIG_MIPS_MT_SMTC set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); /* preload TLB instead of local_flush_tlb_one() */ mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); tlbidx = read_c0_index(); mtc0_tlbw_hazard(); if (tlbidx < 0) tlb_write_random(); else tlb_write_indexed(); #else tlbidx = read_c0_wired(); write_c0_wired(tlbidx + 1); write_c0_index(tlbidx); mtc0_tlbw_hazard(); tlb_write_indexed(); #endif tlbw_use_hazard(); write_c0_entryhi(old_ctx); EXIT_CRITICAL(flags); return (void*) vaddr; } #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) void kunmap_coherent(void) { #ifndef CONFIG_MIPS_MT_SMTC unsigned int wired; unsigned long flags, old_ctx; ENTER_CRITICAL(flags); old_ctx = read_c0_entryhi(); wired = read_c0_wired() - 1; write_c0_wired(wired); write_c0_index(wired); write_c0_entryhi(UNIQUE_ENTRYHI(wired)); write_c0_entrylo0(0); write_c0_entrylo1(0); mtc0_tlbw_hazard(); tlb_write_indexed(); tlbw_use_hazard(); write_c0_entryhi(old_ctx); EXIT_CRITICAL(flags); #endif dec_preempt_count(); preempt_check_resched(); } void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *vfrom, *vto; vto = kmap_atomic(to); if (cpu_has_dc_aliases && page_mapped(from) && !Page_dcache_dirty(from)) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); kunmap_coherent(); } else { vfrom = kmap_atomic(from); copy_page(vto, vfrom); kunmap_atomic(vfrom); } if ((!cpu_has_ic_fills_f_dc) || pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) flush_data_cache_page((unsigned long)vto); kunmap_atomic(vto); /* Make sure this page is cleared on other CPU's too before using it */ smp_wmb(); } void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { if (cpu_has_dc_aliases && page_mapped(page) && !Page_dcache_dirty(page)) { void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(vto, src, len); kunmap_coherent(); } else { memcpy(dst, src, len); if (cpu_has_dc_aliases) SetPageDcacheDirty(page); } if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) flush_cache_page(vma, vaddr, page_to_pfn(page)); } void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { if (cpu_has_dc_aliases && page_mapped(page) && !Page_dcache_dirty(page)) { void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); kunmap_coherent(); } else { memcpy(dst, src, len); if (cpu_has_dc_aliases) SetPageDcacheDirty(page); } } void __init fixrange_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; int i, j, k; unsigned long vaddr; vaddr = start; i = __pgd_offset(vaddr); j = __pud_offset(vaddr); k = __pmd_offset(vaddr); pgd = pgd_base + i; for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { pud = (pud_t *)pgd; for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) { pmd = (pmd_t *)pud; for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) { if (pmd_none(*pmd)) { pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); set_pmd(pmd, __pmd((unsigned long)pte)); BUG_ON(pte != pte_offset_kernel(pmd, 0)); } vaddr += PMD_SIZE; } k = 0; } j = 0; } #endif } #ifndef CONFIG_NEED_MULTIPLE_NODES int page_is_ram(unsigned long pagenr) { int i; for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long addr, end; switch (boot_mem_map.map[i].type) { case BOOT_MEM_RAM: case BOOT_MEM_INIT_RAM: break; default: /* not usable memory */ continue; } addr = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); if (pagenr >= addr && pagenr < end) return 1; } return 0; } void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long lastpfn __maybe_unused; pagetable_init(); #ifdef CONFIG_HIGHMEM kmap_init(); #endif kmap_coherent_init(); #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; #endif #ifdef CONFIG_ZONE_DMA32 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; lastpfn = max_low_pfn; #ifdef CONFIG_HIGHMEM max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; lastpfn = highend_pfn; if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) { printk(KERN_WARNING "This processor doesn't support highmem." " %ldk highmem ignored\n", (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; lastpfn = max_low_pfn; } #endif free_area_init_nodes(max_zone_pfns); } #ifdef CONFIG_64BIT static struct kcore_list kcore_kseg0; #endif void __init mem_init(void) { unsigned long codesize, reservedpages, datasize, initsize; unsigned long tmp, ram; #ifdef CONFIG_HIGHMEM #ifdef CONFIG_DISCONTIGMEM #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" #endif max_mapnr = highend_pfn ? highend_pfn : max_low_pfn; #else max_mapnr = max_low_pfn; #endif high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); free_all_bootmem(); setup_zero_pages(); /* Setup zeroed pages. */ reservedpages = ram = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) if (page_is_ram(tmp) && pfn_valid(tmp)) { ram++; if (PageReserved(pfn_to_page(tmp))) reservedpages++; } num_physpages = ram; #ifdef CONFIG_HIGHMEM for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { struct page *page = pfn_to_page(tmp); if (!page_is_ram(tmp)) { SetPageReserved(page); continue; } free_highmem_page(page); } num_physpages += totalhigh_pages; #endif codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) /* The -4 is a hack so that user tools don't have to handle the overflow. */ kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4, KCORE_TEXT); #endif printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", nr_free_pages() << (PAGE_SHIFT-10), ram << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, totalhigh_pages << (PAGE_SHIFT-10)); } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ void free_init_pages(const char *what, unsigned long begin, unsigned long end) { unsigned long pfn; for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { struct page *page = pfn_to_page(pfn); void *addr = phys_to_virt(PFN_PHYS(pfn)); memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); free_reserved_page(page); } printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd"); } #endif void __init_refok free_initmem(void) { prom_free_prom_memory(); free_initmem_default(POISON_FREE_INITMEM); } #ifndef CONFIG_MIPS_PGD_C0_CONTEXT unsigned long pgd_current[NR_CPUS]; #endif /* * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER * are constants. So we use the variants from asm-offset.h until that gcc * will officially be retired. * * Align swapper_pg_dir in to 64K, allows its address to be loaded * with a single LUI instruction in the TLB handlers. If we used * __aligned(64K), its size would get rounded up to the alignment * size, and waste space. So we place it in its own section and align * it in the linker script. */ pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir); #ifndef __PAGETABLE_PMD_FOLDED pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; #endif pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
gpl-2.0
lx324310/linux
arch/arm/mach-keystone/platsmp.c
877
1477
/* * Keystone SOC SMP platform code * * Copyright 2013 Texas Instruments, Inc. * Cyril Chemparathy <cyril@ti.com> * Santosh Shilimkar <santosh.shillimkar@ti.com> * * Based on platsmp.c, Copyright (C) 2002 ARM Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/smp.h> #include <linux/io.h> #include <asm/smp_plat.h> #include <asm/prom.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> #include "keystone.h" static int keystone_smp_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long start = virt_to_idmap(&secondary_startup); int error; pr_debug("keystone-smp: booting cpu %d, vector %08lx\n", cpu, start); error = keystone_cpu_smc(KEYSTONE_MON_CPU_UP_IDX, cpu, start); if (error) pr_err("CPU %d bringup failed with %d\n", cpu, error); return error; } #ifdef CONFIG_ARM_LPAE static void __cpuinit keystone_smp_secondary_initmem(unsigned int cpu) { pgd_t *pgd0 = pgd_offset_k(0); cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET); local_flush_tlb_all(); } #else static inline void __cpuinit keystone_smp_secondary_initmem(unsigned int cpu) {} #endif struct smp_operations keystone_smp_ops __initdata = { .smp_boot_secondary = keystone_smp_boot_secondary, .smp_secondary_init = keystone_smp_secondary_initmem, };
gpl-2.0
Hani-K/Simplicity_Kernel_Exynos5433_LL
drivers/gpu/drm/nouveau/nouveau_chan.c
2157
11187
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/object.h> #include <core/client.h> #include <core/device.h> #include <core/class.h> #include <subdev/fb.h> #include <subdev/vm.h> #include <subdev/instmem.h> #include <engine/software.h> #include "nouveau_drm.h" #include "nouveau_dma.h" #include "nouveau_bo.h" #include "nouveau_chan.h" #include "nouveau_fence.h" #include "nouveau_abi16.h" MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM"); static int nouveau_vram_pushbuf; module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); int nouveau_channel_idle(struct nouveau_channel *chan) { struct nouveau_cli *cli = chan->cli; struct nouveau_fence *fence = NULL; int ret; ret = nouveau_fence_new(chan, false, &fence); if (!ret) { ret = nouveau_fence_wait(fence, false, false); nouveau_fence_unref(&fence); } if (ret) NV_ERROR(cli, "failed to idle channel 0x%08x [%s]\n", chan->handle, cli->base.name); return ret; } void nouveau_channel_del(struct nouveau_channel **pchan) { struct nouveau_channel *chan = *pchan; if (chan) { struct nouveau_object *client = nv_object(chan->cli); if (chan->fence) { nouveau_channel_idle(chan); nouveau_fence(chan->drm)->context_del(chan); } nouveau_object_del(client, NVDRM_DEVICE, chan->handle); nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle); nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); nouveau_bo_unmap(chan->push.buffer); if (chan->push.buffer && chan->push.buffer->pin_refcnt) nouveau_bo_unpin(chan->push.buffer); nouveau_bo_ref(NULL, &chan->push.buffer); kfree(chan); } *pchan = NULL; } static int nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli, u32 parent, u32 handle, u32 size, struct nouveau_channel **pchan) { struct nouveau_device *device = nv_device(drm->device); struct nouveau_instmem *imem = nouveau_instmem(device); struct nouveau_vmmgr *vmm = nouveau_vmmgr(device); struct nouveau_fb *pfb = nouveau_fb(device); struct nouveau_client *client = &cli->base; struct nv_dma_class args = {}; struct nouveau_channel *chan; struct nouveau_object *push; u32 target; int ret; chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) return -ENOMEM; chan->cli = cli; chan->drm = drm; chan->handle = handle; /* allocate memory for dma push buffer */ target = TTM_PL_FLAG_TT; if (nouveau_vram_pushbuf) target = TTM_PL_FLAG_VRAM; ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, &chan->push.buffer); if (ret == 0) { ret = nouveau_bo_pin(chan->push.buffer, target); if (ret == 0) ret = nouveau_bo_map(chan->push.buffer); } if (ret) { nouveau_channel_del(pchan); return ret; } /* create dma object covering the *entire* memory space that the * pushbuf lives in, this is because the GEM code requires that * we be able to call out to other (indirect) push buffers */ chan->push.vma.offset = chan->push.buffer->bo.offset; chan->push.handle = NVDRM_PUSH | (handle & 0xffff); if (device->card_type >= NV_50) { ret = nouveau_bo_vma_add(chan->push.buffer, client->vm, &chan->push.vma); if (ret) { nouveau_channel_del(pchan); return ret; } args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; args.start = 0; args.limit = client->vm->vmm->limit - 1; } else if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) { u64 limit = pfb->ram.size - imem->reserved - 1; if (device->card_type == NV_04) { /* nv04 vram pushbuf hack, retarget to its location in * the framebuffer bar rather than direct vram access.. * nfi why this exists, it came from the -nv ddx. */ args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR; args.start = pci_resource_start(device->pdev, 1); args.limit = args.start + limit; } else { args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR; args.start = 0; args.limit = limit; } } else { if (chan->drm->agp.stat == ENABLED) { args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; args.start = chan->drm->agp.base; args.limit = chan->drm->agp.base + chan->drm->agp.size - 1; } else { args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; args.start = 0; args.limit = vmm->limit - 1; } } ret = nouveau_object_new(nv_object(chan->cli), parent, chan->push.handle, 0x0002, &args, sizeof(args), &push); if (ret) { nouveau_channel_del(pchan); return ret; } return 0; } static int nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli, u32 parent, u32 handle, u32 engine, struct nouveau_channel **pchan) { static const u16 oclasses[] = { NVE0_CHANNEL_IND_CLASS, NVC0_CHANNEL_IND_CLASS, NV84_CHANNEL_IND_CLASS, NV50_CHANNEL_IND_CLASS, 0 }; const u16 *oclass = oclasses; struct nve0_channel_ind_class args; struct nouveau_channel *chan; int ret; /* allocate dma push buffer */ ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan); *pchan = chan; if (ret) return ret; /* create channel object */ args.pushbuf = chan->push.handle; args.ioffset = 0x10000 + chan->push.vma.offset; args.ilength = 0x02000; args.engine = engine; do { ret = nouveau_object_new(nv_object(cli), parent, handle, *oclass++, &args, sizeof(args), &chan->object); if (ret == 0) return ret; } while (*oclass); nouveau_channel_del(pchan); return ret; } static int nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli, u32 parent, u32 handle, struct nouveau_channel **pchan) { static const u16 oclasses[] = { NV40_CHANNEL_DMA_CLASS, NV17_CHANNEL_DMA_CLASS, NV10_CHANNEL_DMA_CLASS, NV03_CHANNEL_DMA_CLASS, 0 }; const u16 *oclass = oclasses; struct nv03_channel_dma_class args; struct nouveau_channel *chan; int ret; /* allocate dma push buffer */ ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan); *pchan = chan; if (ret) return ret; /* create channel object */ args.pushbuf = chan->push.handle; args.offset = chan->push.vma.offset; do { ret = nouveau_object_new(nv_object(cli), parent, handle, *oclass++, &args, sizeof(args), &chan->object); if (ret == 0) return ret; } while (ret && *oclass); nouveau_channel_del(pchan); return ret; } static int nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) { struct nouveau_client *client = nv_client(chan->cli); struct nouveau_device *device = nv_device(chan->drm->device); struct nouveau_instmem *imem = nouveau_instmem(device); struct nouveau_vmmgr *vmm = nouveau_vmmgr(device); struct nouveau_fb *pfb = nouveau_fb(device); struct nouveau_software_chan *swch; struct nouveau_object *object; struct nv_dma_class args = {}; int ret, i; /* allocate dma objects to cover all allowed vram, and gart */ if (device->card_type < NV_C0) { if (device->card_type >= NV_50) { args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; args.start = 0; args.limit = client->vm->vmm->limit - 1; } else { args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR; args.start = 0; args.limit = pfb->ram.size - imem->reserved - 1; } ret = nouveau_object_new(nv_object(client), chan->handle, vram, 0x003d, &args, sizeof(args), &object); if (ret) return ret; if (device->card_type >= NV_50) { args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; args.start = 0; args.limit = client->vm->vmm->limit - 1; } else if (chan->drm->agp.stat == ENABLED) { args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; args.start = chan->drm->agp.base; args.limit = chan->drm->agp.base + chan->drm->agp.size - 1; } else { args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; args.start = 0; args.limit = vmm->limit - 1; } ret = nouveau_object_new(nv_object(client), chan->handle, gart, 0x003d, &args, sizeof(args), &object); if (ret) return ret; chan->vram = vram; chan->gart = gart; } /* initialise dma tracking parameters */ switch (nv_hclass(chan->object) & 0x00ff) { case 0x006b: case 0x006e: chan->user_put = 0x40; chan->user_get = 0x44; chan->dma.max = (0x10000 / 4) - 2; break; default: chan->user_put = 0x40; chan->user_get = 0x44; chan->user_get_hi = 0x60; chan->dma.ib_base = 0x10000 / 4; chan->dma.ib_max = (0x02000 / 8) - 1; chan->dma.ib_put = 0; chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put; chan->dma.max = chan->dma.ib_base; break; } chan->dma.put = 0; chan->dma.cur = chan->dma.put; chan->dma.free = chan->dma.max - chan->dma.cur; ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); if (ret) return ret; for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) OUT_RING(chan, 0x00000000); /* allocate software object class (used for fences on <= nv05, and * to signal flip completion), bind it to a subchannel. */ if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) { ret = nouveau_object_new(nv_object(client), chan->handle, NvSw, nouveau_abi16_swclass(chan->drm), NULL, 0, &object); if (ret) return ret; swch = (void *)object->parent; swch->flip = nouveau_flip_complete; swch->flip_data = chan; } if (device->card_type < NV_C0) { ret = RING_SPACE(chan, 2); if (ret) return ret; BEGIN_NV04(chan, NvSubSw, 0x0000, 1); OUT_RING (chan, NvSw); FIRE_RING (chan); } /* initialise synchronisation */ return nouveau_fence(chan->drm)->context_new(chan); } int nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli, u32 parent, u32 handle, u32 arg0, u32 arg1, struct nouveau_channel **pchan) { int ret; ret = nouveau_channel_ind(drm, cli, parent, handle, arg0, pchan); if (ret) { NV_DEBUG(cli, "ib channel create, %d\n", ret); ret = nouveau_channel_dma(drm, cli, parent, handle, pchan); if (ret) { NV_DEBUG(cli, "dma channel create, %d\n", ret); return ret; } } ret = nouveau_channel_init(*pchan, arg0, arg1); if (ret) { NV_ERROR(cli, "channel failed to initialise, %d\n", ret); nouveau_channel_del(pchan); return ret; } return 0; }
gpl-2.0
faux123/private_msm8660_ics
drivers/net/usb/rndis_host.c
2925
19921
/* * Host Side support for RNDIS Networking Links * Copyright (C) 2005 by David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> #include <linux/usb/rndis_host.h> /* * RNDIS is NDIS remoted over USB. It's a MSFT variant of CDC ACM ... of * course ACM was intended for modems, not Ethernet links! USB's standard * for Ethernet links is "CDC Ethernet", which is significantly simpler. * * NOTE that Microsoft's "RNDIS 1.0" specification is incomplete. Issues * include: * - Power management in particular relies on information that's scattered * through other documentation, and which is incomplete or incorrect even * there. * - There are various undocumented protocol requirements, such as the * need to send unused garbage in control-OUT messages. * - In some cases, MS-Windows will emit undocumented requests; this * matters more to peripheral implementations than host ones. * * Moreover there's a no-open-specs variant of RNDIS called "ActiveSync". * * For these reasons and others, ** USE OF RNDIS IS STRONGLY DISCOURAGED ** in * favor of such non-proprietary alternatives as CDC Ethernet or the newer (and * currently rare) "Ethernet Emulation Model" (EEM). */ /* * RNDIS notifications from device: command completion; "reverse" * keepalives; etc */ void rndis_status(struct usbnet *dev, struct urb *urb) { netdev_dbg(dev->net, "rndis status urb, len %d stat %d\n", urb->actual_length, urb->status); // FIXME for keepalives, respond immediately (asynchronously) // if not an RNDIS status, do like cdc_status(dev,urb) does } EXPORT_SYMBOL_GPL(rndis_status); /* * RNDIS indicate messages. */ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg, int buflen) { struct cdc_state *info = (void *)&dev->data; struct device *udev = &info->control->dev; if (dev->driver_info->indication) { dev->driver_info->indication(dev, msg, buflen); } else { switch (msg->status) { case RNDIS_STATUS_MEDIA_CONNECT: dev_info(udev, "rndis media connect\n"); break; case RNDIS_STATUS_MEDIA_DISCONNECT: dev_info(udev, "rndis media disconnect\n"); break; default: dev_info(udev, "rndis indication: 0x%08x\n", le32_to_cpu(msg->status)); } } } /* * RPC done RNDIS-style. Caller guarantees: * - message is properly byteswapped * - there's no other request pending * - buf can hold up to 1KB response (required by RNDIS spec) * On return, the first few entries are already byteswapped. * * Call context is likely probe(), before interface name is known, * which is why we won't try to use it in the diagnostics. */ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) { struct cdc_state *info = (void *) &dev->data; struct usb_cdc_notification notification; int master_ifnum; int retval; int partial; unsigned count; __le32 rsp; u32 xid = 0, msg_len, request_id; /* REVISIT when this gets called from contexts other than probe() or * disconnect(): either serialize, or dispatch responses on xid */ /* Issue the request; xid is unique, don't bother byteswapping it */ if (likely(buf->msg_type != RNDIS_MSG_HALT && buf->msg_type != RNDIS_MSG_RESET)) { xid = dev->xid++; if (!xid) xid = dev->xid++; buf->request_id = (__force __le32) xid; } master_ifnum = info->control->cur_altsetting->desc.bInterfaceNumber; retval = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), USB_CDC_SEND_ENCAPSULATED_COMMAND, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, master_ifnum, buf, le32_to_cpu(buf->msg_len), RNDIS_CONTROL_TIMEOUT_MS); if (unlikely(retval < 0 || xid == 0)) return retval; /* Some devices don't respond on the control channel until * polled on the status channel, so do that first. */ if (dev->driver_info->data & RNDIS_DRIVER_DATA_POLL_STATUS) { retval = usb_interrupt_msg( dev->udev, usb_rcvintpipe(dev->udev, dev->status->desc.bEndpointAddress), &notification, sizeof(notification), &partial, RNDIS_CONTROL_TIMEOUT_MS); if (unlikely(retval < 0)) return retval; } /* Poll the control channel; the request probably completed immediately */ rsp = buf->msg_type | RNDIS_MSG_COMPLETION; for (count = 0; count < 10; count++) { memset(buf, 0, CONTROL_BUFFER_SIZE); retval = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), USB_CDC_GET_ENCAPSULATED_RESPONSE, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, master_ifnum, buf, buflen, RNDIS_CONTROL_TIMEOUT_MS); if (likely(retval >= 8)) { msg_len = le32_to_cpu(buf->msg_len); request_id = (__force u32) buf->request_id; if (likely(buf->msg_type == rsp)) { if (likely(request_id == xid)) { if (unlikely(rsp == RNDIS_MSG_RESET_C)) return 0; if (likely(RNDIS_STATUS_SUCCESS == buf->status)) return 0; dev_dbg(&info->control->dev, "rndis reply status %08x\n", le32_to_cpu(buf->status)); return -EL3RST; } dev_dbg(&info->control->dev, "rndis reply id %d expected %d\n", request_id, xid); /* then likely retry */ } else switch (buf->msg_type) { case RNDIS_MSG_INDICATE: /* fault/event */ rndis_msg_indicate(dev, (void *)buf, buflen); break; case RNDIS_MSG_KEEPALIVE: { /* ping */ struct rndis_keepalive_c *msg = (void *)buf; msg->msg_type = RNDIS_MSG_KEEPALIVE_C; msg->msg_len = cpu_to_le32(sizeof *msg); msg->status = RNDIS_STATUS_SUCCESS; retval = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), USB_CDC_SEND_ENCAPSULATED_COMMAND, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, master_ifnum, msg, sizeof *msg, RNDIS_CONTROL_TIMEOUT_MS); if (unlikely(retval < 0)) dev_dbg(&info->control->dev, "rndis keepalive err %d\n", retval); } break; default: dev_dbg(&info->control->dev, "unexpected rndis msg %08x len %d\n", le32_to_cpu(buf->msg_type), msg_len); } } else { /* device probably issued a protocol stall; ignore */ dev_dbg(&info->control->dev, "rndis response error, code %d\n", retval); } msleep(20); } dev_dbg(&info->control->dev, "rndis response timeout\n"); return -ETIMEDOUT; } EXPORT_SYMBOL_GPL(rndis_command); /* * rndis_query: * * Performs a query for @oid along with 0 or more bytes of payload as * specified by @in_len. If @reply_len is not set to -1 then the reply * length is checked against this value, resulting in an error if it * doesn't match. * * NOTE: Adding a payload exactly or greater than the size of the expected * response payload is an evident requirement MSFT added for ActiveSync. * * The only exception is for OIDs that return a variably sized response, * in which case no payload should be added. This undocumented (and * nonsensical!) issue was found by sniffing protocol requests from the * ActiveSync 4.1 Windows driver. */ static int rndis_query(struct usbnet *dev, struct usb_interface *intf, void *buf, __le32 oid, u32 in_len, void **reply, int *reply_len) { int retval; union { void *buf; struct rndis_msg_hdr *header; struct rndis_query *get; struct rndis_query_c *get_c; } u; u32 off, len; u.buf = buf; memset(u.get, 0, sizeof *u.get + in_len); u.get->msg_type = RNDIS_MSG_QUERY; u.get->msg_len = cpu_to_le32(sizeof *u.get + in_len); u.get->oid = oid; u.get->len = cpu_to_le32(in_len); u.get->offset = cpu_to_le32(20); retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); if (unlikely(retval < 0)) { dev_err(&intf->dev, "RNDIS_MSG_QUERY(0x%08x) failed, %d\n", oid, retval); return retval; } off = le32_to_cpu(u.get_c->offset); len = le32_to_cpu(u.get_c->len); if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE)) goto response_error; if (*reply_len != -1 && len != *reply_len) goto response_error; *reply = (unsigned char *) &u.get_c->request_id + off; *reply_len = len; return retval; response_error: dev_err(&intf->dev, "RNDIS_MSG_QUERY(0x%08x) " "invalid response - off %d len %d\n", oid, off, len); return -EDOM; } /* same as usbnet_netdev_ops but MTU change not allowed */ static const struct net_device_ops rndis_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; int generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) { int retval; struct net_device *net = dev->net; struct cdc_state *info = (void *) &dev->data; union { void *buf; struct rndis_msg_hdr *header; struct rndis_init *init; struct rndis_init_c *init_c; struct rndis_query *get; struct rndis_query_c *get_c; struct rndis_set *set; struct rndis_set_c *set_c; struct rndis_halt *halt; } u; u32 tmp; __le32 phym_unspec, *phym; int reply_len; unsigned char *bp; /* we can't rely on i/o from stack working, or stack allocation */ u.buf = kmalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); if (!u.buf) return -ENOMEM; retval = usbnet_generic_cdc_bind(dev, intf); if (retval < 0) goto fail; u.init->msg_type = RNDIS_MSG_INIT; u.init->msg_len = cpu_to_le32(sizeof *u.init); u.init->major_version = cpu_to_le32(1); u.init->minor_version = cpu_to_le32(0); /* max transfer (in spec) is 0x4000 at full speed, but for * TX we'll stick to one Ethernet packet plus RNDIS framing. * For RX we handle drivers that zero-pad to end-of-packet. * Don't let userspace change these settings. * * NOTE: there still seems to be wierdness here, as if we need * to do some more things to make sure WinCE targets accept this. * They default to jumbograms of 8KB or 16KB, which is absurd * for such low data rates and which is also more than Linux * can usually expect to allocate for SKB data... */ net->hard_header_len += sizeof (struct rndis_data_hdr); dev->hard_mtu = net->mtu + net->hard_header_len; dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1); if (dev->maxpacket == 0) { netif_dbg(dev, probe, dev->net, "dev->maxpacket can't be 0\n"); retval = -EINVAL; goto fail_and_release; } dev->rx_urb_size = dev->hard_mtu + (dev->maxpacket + 1); dev->rx_urb_size &= ~(dev->maxpacket - 1); u.init->max_transfer_size = cpu_to_le32(dev->rx_urb_size); net->netdev_ops = &rndis_netdev_ops; retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); if (unlikely(retval < 0)) { /* it might not even be an RNDIS device!! */ dev_err(&intf->dev, "RNDIS init failed, %d\n", retval); goto fail_and_release; } tmp = le32_to_cpu(u.init_c->max_transfer_size); if (tmp < dev->hard_mtu) { if (tmp <= net->hard_header_len) { dev_err(&intf->dev, "dev can't take %u byte packets (max %u)\n", dev->hard_mtu, tmp); retval = -EINVAL; goto halt_fail_and_release; } dev_warn(&intf->dev, "dev can't take %u byte packets (max %u), " "adjusting MTU to %u\n", dev->hard_mtu, tmp, tmp - net->hard_header_len); dev->hard_mtu = tmp; net->mtu = dev->hard_mtu - net->hard_header_len; } /* REVISIT: peripheral "alignment" request is ignored ... */ dev_dbg(&intf->dev, "hard mtu %u (%u from dev), rx buflen %Zu, align %d\n", dev->hard_mtu, tmp, dev->rx_urb_size, 1 << le32_to_cpu(u.init_c->packet_alignment)); /* module has some device initialization code needs to be done right * after RNDIS_INIT */ if (dev->driver_info->early_init && dev->driver_info->early_init(dev) != 0) goto halt_fail_and_release; /* Check physical medium */ phym = NULL; reply_len = sizeof *phym; retval = rndis_query(dev, intf, u.buf, OID_GEN_PHYSICAL_MEDIUM, 0, (void **) &phym, &reply_len); if (retval != 0 || !phym) { /* OID is optional so don't fail here. */ phym_unspec = RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED; phym = &phym_unspec; } if ((flags & FLAG_RNDIS_PHYM_WIRELESS) && *phym != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { netif_dbg(dev, probe, dev->net, "driver requires wireless physical medium, but device is not\n"); retval = -ENODEV; goto halt_fail_and_release; } if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) && *phym == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { netif_dbg(dev, probe, dev->net, "driver requires non-wireless physical medium, but device is wireless.\n"); retval = -ENODEV; goto halt_fail_and_release; } /* Get designated host ethernet address */ reply_len = ETH_ALEN; retval = rndis_query(dev, intf, u.buf, OID_802_3_PERMANENT_ADDRESS, 48, (void **) &bp, &reply_len); if (unlikely(retval< 0)) { dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval); goto halt_fail_and_release; } memcpy(net->dev_addr, bp, ETH_ALEN); memcpy(net->perm_addr, bp, ETH_ALEN); /* set a nonzero filter to enable data transfers */ memset(u.set, 0, sizeof *u.set); u.set->msg_type = RNDIS_MSG_SET; u.set->msg_len = cpu_to_le32(4 + sizeof *u.set); u.set->oid = OID_GEN_CURRENT_PACKET_FILTER; u.set->len = cpu_to_le32(4); u.set->offset = cpu_to_le32((sizeof *u.set) - 8); *(__le32 *)(u.buf + sizeof *u.set) = RNDIS_DEFAULT_FILTER; retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); if (unlikely(retval < 0)) { dev_err(&intf->dev, "rndis set packet filter, %d\n", retval); goto halt_fail_and_release; } retval = 0; kfree(u.buf); return retval; halt_fail_and_release: memset(u.halt, 0, sizeof *u.halt); u.halt->msg_type = RNDIS_MSG_HALT; u.halt->msg_len = cpu_to_le32(sizeof *u.halt); (void) rndis_command(dev, (void *)u.halt, CONTROL_BUFFER_SIZE); fail_and_release: usb_set_intfdata(info->data, NULL); usb_driver_release_interface(driver_of(intf), info->data); info->data = NULL; fail: kfree(u.buf); return retval; } EXPORT_SYMBOL_GPL(generic_rndis_bind); static int rndis_bind(struct usbnet *dev, struct usb_interface *intf) { return generic_rndis_bind(dev, intf, FLAG_RNDIS_PHYM_NOT_WIRELESS); } void rndis_unbind(struct usbnet *dev, struct usb_interface *intf) { struct rndis_halt *halt; /* try to clear any rndis state/activity (no i/o from stack!) */ halt = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); if (halt) { halt->msg_type = RNDIS_MSG_HALT; halt->msg_len = cpu_to_le32(sizeof *halt); (void) rndis_command(dev, (void *)halt, CONTROL_BUFFER_SIZE); kfree(halt); } usbnet_cdc_unbind(dev, intf); } EXPORT_SYMBOL_GPL(rndis_unbind); /* * DATA -- host must not write zlps */ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { /* peripheral may have batched packets to us... */ while (likely(skb->len)) { struct rndis_data_hdr *hdr = (void *)skb->data; struct sk_buff *skb2; u32 msg_len, data_offset, data_len; msg_len = le32_to_cpu(hdr->msg_len); data_offset = le32_to_cpu(hdr->data_offset); data_len = le32_to_cpu(hdr->data_len); /* don't choke if we see oob, per-packet data, etc */ if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET || skb->len < msg_len || (data_offset + data_len + 8) > msg_len)) { dev->net->stats.rx_frame_errors++; netdev_dbg(dev->net, "bad rndis message %d/%d/%d/%d, len %d\n", le32_to_cpu(hdr->msg_type), msg_len, data_offset, data_len, skb->len); return 0; } skb_pull(skb, 8 + data_offset); /* at most one packet left? */ if (likely((data_len - skb->len) <= sizeof *hdr)) { skb_trim(skb, data_len); break; } /* try to return all the packets in the batch */ skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) break; skb_pull(skb, msg_len - sizeof *hdr); skb_trim(skb2, data_len); usbnet_skb_return(dev, skb2); } /* caller will usbnet_skb_return the remaining packet */ return 1; } EXPORT_SYMBOL_GPL(rndis_rx_fixup); struct sk_buff * rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct rndis_data_hdr *hdr; struct sk_buff *skb2; unsigned len = skb->len; if (likely(!skb_cloned(skb))) { int room = skb_headroom(skb); /* enough head room as-is? */ if (unlikely((sizeof *hdr) <= room)) goto fill; /* enough room, but needs to be readjusted? */ room += skb_tailroom(skb); if (likely((sizeof *hdr) <= room)) { skb->data = memmove(skb->head + sizeof *hdr, skb->data, len); skb_set_tail_pointer(skb, len); goto fill; } } /* create a new skb, with the correct size (and tailpad) */ skb2 = skb_copy_expand(skb, sizeof *hdr, 1, flags); dev_kfree_skb_any(skb); if (unlikely(!skb2)) return skb2; skb = skb2; /* fill out the RNDIS header. we won't bother trying to batch * packets; Linux minimizes wasted bandwidth through tx queues. */ fill: hdr = (void *) __skb_push(skb, sizeof *hdr); memset(hdr, 0, sizeof *hdr); hdr->msg_type = RNDIS_MSG_PACKET; hdr->msg_len = cpu_to_le32(skb->len); hdr->data_offset = cpu_to_le32(sizeof(*hdr) - 8); hdr->data_len = cpu_to_le32(len); /* FIXME make the last packet always be short ... */ return skb; } EXPORT_SYMBOL_GPL(rndis_tx_fixup); static const struct driver_info rndis_info = { .description = "RNDIS device", .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, .bind = rndis_bind, .unbind = rndis_unbind, .status = rndis_status, .rx_fixup = rndis_rx_fixup, .tx_fixup = rndis_tx_fixup, }; static const struct driver_info rndis_poll_status_info = { .description = "RNDIS device (poll status before control)", .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, .data = RNDIS_DRIVER_DATA_POLL_STATUS, .bind = rndis_bind, .unbind = rndis_unbind, .status = rndis_status, .rx_fixup = rndis_rx_fixup, .tx_fixup = rndis_tx_fixup, }; /*-------------------------------------------------------------------------*/ static const struct usb_device_id products [] = { { /* 2Wire HomePortal 1000SW */ USB_DEVICE_AND_INTERFACE_INFO(0x1630, 0x0042, USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long) &rndis_poll_status_info, }, { /* RNDIS is MSFT's un-official variant of CDC ACM */ USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long) &rndis_info, }, { /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */ USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1), .driver_info = (unsigned long) &rndis_poll_status_info, }, { /* RNDIS for tethering */ USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), .driver_info = (unsigned long) &rndis_info, }, { }, // END }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver rndis_driver = { .name = "rndis_host", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, }; static int __init rndis_init(void) { return usb_register(&rndis_driver); } module_init(rndis_init); static void __exit rndis_exit(void) { usb_deregister(&rndis_driver); } module_exit(rndis_exit); MODULE_AUTHOR("David Brownell"); MODULE_DESCRIPTION("USB Host side RNDIS driver"); MODULE_LICENSE("GPL");
gpl-2.0
paul-xxx/somc_light_kernel
arch/s390/kvm/kvm-s390.c
2925
23552
/* * s390host.c -- hosting zSeries kernel virtual machines * * Copyright IBM Corp. 2008,2009 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) * as published by the Free Software Foundation. * * Author(s): Carsten Otte <cotte@de.ibm.com> * Christian Borntraeger <borntraeger@de.ibm.com> * Heiko Carstens <heiko.carstens@de.ibm.com> * Christian Ehrhardt <ehrhardt@de.ibm.com> */ #include <linux/compiler.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/hrtimer.h> #include <linux/init.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/timer.h> #include <asm/asm-offsets.h> #include <asm/lowcore.h> #include <asm/pgtable.h> #include <asm/nmi.h> #include <asm/switch_to.h> #include "kvm-s390.h" #include "gaccess.h" #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU struct kvm_stats_debugfs_item debugfs_entries[] = { { "userspace_handled", VCPU_STAT(exit_userspace) }, { "exit_null", VCPU_STAT(exit_null) }, { "exit_validity", VCPU_STAT(exit_validity) }, { "exit_stop_request", VCPU_STAT(exit_stop_request) }, { "exit_external_request", VCPU_STAT(exit_external_request) }, { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, { "exit_instruction", VCPU_STAT(exit_instruction) }, { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, { "instruction_lctl", VCPU_STAT(instruction_lctl) }, { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, { "deliver_external_call", VCPU_STAT(deliver_external_call) }, { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, { "exit_wait_state", VCPU_STAT(exit_wait_state) }, { "instruction_stidp", VCPU_STAT(instruction_stidp) }, { "instruction_spx", VCPU_STAT(instruction_spx) }, { "instruction_stpx", VCPU_STAT(instruction_stpx) }, { "instruction_stap", VCPU_STAT(instruction_stap) }, { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, { "instruction_stsch", VCPU_STAT(instruction_stsch) }, { "instruction_chsc", VCPU_STAT(instruction_chsc) }, { "instruction_stsi", VCPU_STAT(instruction_stsi) }, { "instruction_stfl", VCPU_STAT(instruction_stfl) }, { "instruction_tprot", VCPU_STAT(instruction_tprot) }, { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, { "diagnose_10", VCPU_STAT(diagnose_10) }, { "diagnose_44", VCPU_STAT(diagnose_44) }, { NULL } }; static unsigned long long *facilities; /* Section: not file related */ int kvm_arch_hardware_enable(void *garbage) { /* every s390 is virtualization enabled ;-) */ return 0; } void kvm_arch_hardware_disable(void *garbage) { } int kvm_arch_hardware_setup(void) { return 0; } void kvm_arch_hardware_unsetup(void) { } void kvm_arch_check_processor_compat(void *rtn) { } int kvm_arch_init(void *opaque) { return 0; } void kvm_arch_exit(void) { } /* Section: device related */ long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { if (ioctl == KVM_S390_ENABLE_SIE) return s390_enable_sie(); return -EINVAL; } int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_S390_PSW: case KVM_CAP_S390_GMAP: case KVM_CAP_SYNC_MMU: #ifdef CONFIG_KVM_S390_UCONTROL case KVM_CAP_S390_UCONTROL: #endif case KVM_CAP_SYNC_REGS: r = 1; break; default: r = 0; } return r; } /* Section: vm related */ /* * Get (and clear) the dirty memory log for a memory slot. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { return 0; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r; switch (ioctl) { case KVM_S390_INTERRUPT: { struct kvm_s390_interrupt s390int; r = -EFAULT; if (copy_from_user(&s390int, argp, sizeof(s390int))) break; r = kvm_s390_inject_vm(kvm, &s390int); break; } default: r = -ENOTTY; } return r; } int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int rc; char debug_name[16]; rc = -EINVAL; #ifdef CONFIG_KVM_S390_UCONTROL if (type & ~KVM_VM_S390_UCONTROL) goto out_err; if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) goto out_err; #else if (type) goto out_err; #endif rc = s390_enable_sie(); if (rc) goto out_err; rc = -ENOMEM; kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); if (!kvm->arch.sca) goto out_err; sprintf(debug_name, "kvm-%u", current->pid); kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); if (!kvm->arch.dbf) goto out_nodbf; spin_lock_init(&kvm->arch.float_int.lock); INIT_LIST_HEAD(&kvm->arch.float_int.list); debug_register_view(kvm->arch.dbf, &debug_sprintf_view); VM_EVENT(kvm, 3, "%s", "vm created"); if (type & KVM_VM_S390_UCONTROL) { kvm->arch.gmap = NULL; } else { kvm->arch.gmap = gmap_alloc(current->mm); if (!kvm->arch.gmap) goto out_nogmap; } return 0; out_nogmap: debug_unregister(kvm->arch.dbf); out_nodbf: free_page((unsigned long)(kvm->arch.sca)); out_err: return rc; } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { VCPU_EVENT(vcpu, 3, "%s", "free cpu"); if (!kvm_is_ucontrol(vcpu->kvm)) { clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn); if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == (__u64) vcpu->arch.sie_block) vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; } smp_mb(); if (kvm_is_ucontrol(vcpu->kvm)) gmap_free(vcpu->arch.gmap); free_page((unsigned long)(vcpu->arch.sie_block)); kvm_vcpu_uninit(vcpu); kfree(vcpu); } static void kvm_free_vcpus(struct kvm *kvm) { unsigned int i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) kvm_arch_vcpu_destroy(vcpu); mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) kvm->vcpus[i] = NULL; atomic_set(&kvm->online_vcpus, 0); mutex_unlock(&kvm->lock); } void kvm_arch_sync_events(struct kvm *kvm) { } void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_free_vcpus(kvm); free_page((unsigned long)(kvm->arch.sca)); debug_unregister(kvm->arch.dbf); if (!kvm_is_ucontrol(kvm)) gmap_free(kvm->arch.gmap); } /* Section: vcpu related */ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { if (kvm_is_ucontrol(vcpu->kvm)) { vcpu->arch.gmap = gmap_alloc(current->mm); if (!vcpu->arch.gmap) return -ENOMEM; return 0; } vcpu->arch.gmap = vcpu->kvm->arch.gmap; vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | KVM_SYNC_GPRS | KVM_SYNC_ACRS | KVM_SYNC_CRS; return 0; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { /* Nothing todo */ } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { save_fp_regs(&vcpu->arch.host_fpregs); save_access_regs(vcpu->arch.host_acrs); vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; restore_fp_regs(&vcpu->arch.guest_fpregs); restore_access_regs(vcpu->run->s.regs.acrs); gmap_enable(vcpu->arch.gmap); atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); save_fp_regs(&vcpu->arch.guest_fpregs); save_access_regs(vcpu->run->s.regs.acrs); restore_fp_regs(&vcpu->arch.host_fpregs); restore_access_regs(vcpu->arch.host_acrs); } static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) { /* this equals initial cpu reset in pop, but we don't switch to ESA */ vcpu->arch.sie_block->gpsw.mask = 0UL; vcpu->arch.sie_block->gpsw.addr = 0UL; kvm_s390_set_prefix(vcpu, 0); vcpu->arch.sie_block->cputm = 0UL; vcpu->arch.sie_block->ckc = 0UL; vcpu->arch.sie_block->todpr = 0; memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); vcpu->arch.sie_block->gcr[0] = 0xE0UL; vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; vcpu->arch.guest_fpregs.fpc = 0; asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); vcpu->arch.sie_block->gbea = 1; } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM | CPUSTAT_STOPPED); vcpu->arch.sie_block->ecb = 6; vcpu->arch.sie_block->eca = 0xC1002001U; vcpu->arch.sie_block->fac = (int) (long) facilities; hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, (unsigned long) vcpu); vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; get_cpu_id(&vcpu->arch.cpu_id); vcpu->arch.cpu_id.version = 0xff; return 0; } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvm_vcpu *vcpu; int rc = -EINVAL; if (id >= KVM_MAX_VCPUS) goto out; rc = -ENOMEM; vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); if (!vcpu) goto out; vcpu->arch.sie_block = (struct kvm_s390_sie_block *) get_zeroed_page(GFP_KERNEL); if (!vcpu->arch.sie_block) goto out_free_cpu; vcpu->arch.sie_block->icpua = id; if (!kvm_is_ucontrol(kvm)) { if (!kvm->arch.sca) { WARN_ON_ONCE(1); goto out_free_cpu; } if (!kvm->arch.sca->cpu[id].sda) kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); } spin_lock_init(&vcpu->arch.local_int.lock); INIT_LIST_HEAD(&vcpu->arch.local_int.list); vcpu->arch.local_int.float_int = &kvm->arch.float_int; spin_lock(&kvm->arch.float_int.lock); kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; init_waitqueue_head(&vcpu->arch.local_int.wq); vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; spin_unlock(&kvm->arch.float_int.lock); rc = kvm_vcpu_init(vcpu, kvm, id); if (rc) goto out_free_sie_block; VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, vcpu->arch.sie_block); return vcpu; out_free_sie_block: free_page((unsigned long)(vcpu->arch.sie_block)); out_free_cpu: kfree(vcpu); out: return ERR_PTR(rc); } int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { /* kvm common code refers to this, but never calls it */ BUG(); return 0; } static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) { kvm_s390_vcpu_initial_reset(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs)); return 0; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); return 0; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); restore_access_regs(vcpu->run->s.regs.acrs); return 0; } int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); return 0; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK; restore_fp_regs(&vcpu->arch.guest_fpregs); return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); fpu->fpc = vcpu->arch.guest_fpregs.fpc; return 0; } static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) { int rc = 0; if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED)) rc = -EBUSY; else { vcpu->run->psw_mask = psw.mask; vcpu->run->psw_addr = psw.addr; } return rc; } int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { return -EINVAL; /* not implemented yet */ } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { return -EINVAL; /* not implemented yet */ } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -EINVAL; /* not implemented yet */ } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -EINVAL; /* not implemented yet */ } static int __vcpu_run(struct kvm_vcpu *vcpu) { int rc; memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); if (need_resched()) schedule(); if (test_thread_flag(TIF_MCCK_PENDING)) s390_handle_mcck(); if (!kvm_is_ucontrol(vcpu->kvm)) kvm_s390_deliver_pending_interrupts(vcpu); vcpu->arch.sie_block->icptcode = 0; local_irq_disable(); kvm_guest_enter(); local_irq_enable(); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); if (rc) { if (kvm_is_ucontrol(vcpu->kvm)) { rc = SIE_INTERCEPT_UCONTROL; } else { VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); rc = 0; } } VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", vcpu->arch.sie_block->icptcode); local_irq_disable(); kvm_guest_exit(); local_irq_enable(); memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); return rc; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int rc; sigset_t sigsaved; rerun_vcpu: if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); switch (kvm_run->exit_reason) { case KVM_EXIT_S390_SIEIC: case KVM_EXIT_UNKNOWN: case KVM_EXIT_INTR: case KVM_EXIT_S390_RESET: case KVM_EXIT_S390_UCONTROL: break; default: BUG(); } vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) { kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX; kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); } if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS; memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); } might_fault(); do { rc = __vcpu_run(vcpu); if (rc) break; if (kvm_is_ucontrol(vcpu->kvm)) rc = -EOPNOTSUPP; else rc = kvm_handle_sie_intercept(vcpu); } while (!signal_pending(current) && !rc); if (rc == SIE_INTERCEPT_RERUNVCPU) goto rerun_vcpu; if (signal_pending(current) && !rc) { kvm_run->exit_reason = KVM_EXIT_INTR; rc = -EINTR; } #ifdef CONFIG_KVM_S390_UCONTROL if (rc == SIE_INTERCEPT_UCONTROL) { kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL; kvm_run->s390_ucontrol.trans_exc_code = current->thread.gmap_addr; kvm_run->s390_ucontrol.pgm_code = 0x10; rc = 0; } #endif if (rc == -EOPNOTSUPP) { /* intercept cannot be handled in-kernel, prepare kvm-run */ kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; rc = 0; } if (rc == -EREMOTE) { /* intercept was handled, but userspace support is needed * kvm_run has been prepared by the handler */ rc = 0; } kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix; memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); vcpu->stat.exit_userspace++; return rc; } static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from, unsigned long n, int prefix) { if (prefix) return copy_to_guest(vcpu, guestdest, from, n); else return copy_to_guest_absolute(vcpu, guestdest, from, n); } /* * store status at address * we use have two special cases: * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit * KVM_S390_STORE_STATUS_PREFIXED: -> prefix */ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) { unsigned char archmode = 1; int prefix; if (addr == KVM_S390_STORE_STATUS_NOADDR) { if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1)) return -EFAULT; addr = SAVE_AREA_BASE; prefix = 0; } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) { if (copy_to_guest(vcpu, 163ul, &archmode, 1)) return -EFAULT; addr = SAVE_AREA_BASE; prefix = 1; } else prefix = 0; if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), vcpu->arch.guest_fpregs.fprs, 128, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), vcpu->run->s.regs.gprs, 128, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw), &vcpu->arch.sie_block->gpsw, 16, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg), &vcpu->arch.sie_block->prefix, 4, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_ctrl_reg), &vcpu->arch.guest_fpregs.fpc, 4, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg), &vcpu->arch.sie_block->todpr, 4, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer), &vcpu->arch.sie_block->cputm, 8, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp), &vcpu->arch.sie_block->ckc, 8, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), &vcpu->run->s.regs.acrs, 64, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, ctrl_regs), &vcpu->arch.sie_block->gcr, 128, prefix)) return -EFAULT; return 0; } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; long r; switch (ioctl) { case KVM_S390_INTERRUPT: { struct kvm_s390_interrupt s390int; r = -EFAULT; if (copy_from_user(&s390int, argp, sizeof(s390int))) break; r = kvm_s390_inject_vcpu(vcpu, &s390int); break; } case KVM_S390_STORE_STATUS: r = kvm_s390_vcpu_store_status(vcpu, arg); break; case KVM_S390_SET_INITIAL_PSW: { psw_t psw; r = -EFAULT; if (copy_from_user(&psw, argp, sizeof(psw))) break; r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); break; } case KVM_S390_INITIAL_RESET: r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); break; #ifdef CONFIG_KVM_S390_UCONTROL case KVM_S390_UCAS_MAP: { struct kvm_s390_ucas_mapping ucasmap; if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { r = -EFAULT; break; } if (!kvm_is_ucontrol(vcpu->kvm)) { r = -EINVAL; break; } r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, ucasmap.vcpu_addr, ucasmap.length); break; } case KVM_S390_UCAS_UNMAP: { struct kvm_s390_ucas_mapping ucasmap; if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { r = -EFAULT; break; } if (!kvm_is_ucontrol(vcpu->kvm)) { r = -EINVAL; break; } r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, ucasmap.length); break; } #endif case KVM_S390_VCPU_FAULT: { r = gmap_fault(arg, vcpu->arch.gmap); if (!IS_ERR_VALUE(r)) r = 0; break; } default: r = -ENOTTY; } return r; } int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { #ifdef CONFIG_KVM_S390_UCONTROL if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) && (kvm_is_ucontrol(vcpu->kvm))) { vmf->page = virt_to_page(vcpu->arch.sie_block); get_page(vmf->page); return 0; } #endif return VM_FAULT_SIGBUS; } void kvm_arch_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { } int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) { return 0; } /* Section: memory related */ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, int user_alloc) { /* A few sanity checks. We can have exactly one memory slot which has to start at guest virtual zero and which has to be located at a page boundary in userland and which has to end at a page boundary. The memory in userland is ok to be fragmented into various different vmas. It is okay to mmap() and munmap() stuff in this slot after doing this call at any time */ if (mem->slot) return -EINVAL; if (mem->guest_phys_addr) return -EINVAL; if (mem->userspace_addr & 0xffffful) return -EINVAL; if (mem->memory_size & 0xffffful) return -EINVAL; if (!user_alloc) return -EINVAL; return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, int user_alloc) { int rc; rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, mem->guest_phys_addr, mem->memory_size); if (rc) printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); return; } void kvm_arch_flush_shadow(struct kvm *kvm) { } static int __init kvm_s390_init(void) { int ret; ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); if (ret) return ret; /* * guests can ask for up to 255+1 double words, we need a full page * to hold the maximum amount of facilities. On the other hand, we * only set facilities that are known to work in KVM. */ facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); if (!facilities) { kvm_exit(); return -ENOMEM; } memcpy(facilities, S390_lowcore.stfle_fac_list, 16); facilities[0] &= 0xff00fff3f47c0000ULL; facilities[1] &= 0x201c000000000000ULL; return 0; } static void __exit kvm_s390_exit(void) { free_page((unsigned long) facilities); kvm_exit(); } module_init(kvm_s390_init); module_exit(kvm_s390_exit);
gpl-2.0
ghsr/android_kernel_samsung_galaxys2plus-common
drivers/scsi/advansys.c
3181
383611
#define DRV_NAME "advansys" #define ASC_VERSION "3.4" /* AdvanSys Driver Version */ /* * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters * * Copyright (c) 1995-2000 Advanced System Products, Inc. * Copyright (c) 2000-2001 ConnectCom Solutions, Inc. * Copyright (c) 2007 Matthew Wilcox <matthew@wil.cx> * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* * As of March 8, 2000 Advanced System Products, Inc. (AdvanSys) * changed its name to ConnectCom Solutions, Inc. * On June 18, 2001 Initio Corp. acquired ConnectCom's SCSI assets */ #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/isa.h> #include <linux/eisa.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/dma-mapping.h> #include <linux/firmware.h> #include <asm/io.h> #include <asm/system.h> #include <asm/dma.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> /* FIXME: * * 1. Although all of the necessary command mapping places have the * appropriate dma_map.. APIs, the driver still processes its internal * queue using bus_to_virt() and virt_to_bus() which are illegal under * the API. The entire queue processing structure will need to be * altered to fix this. * 2. Need to add memory mapping workaround. Test the memory mapping. * If it doesn't work revert to I/O port access. Can a test be done * safely? * 3. Handle an interrupt not working. Keep an interrupt counter in * the interrupt handler. In the timeout function if the interrupt * has not occurred then print a message and run in polled mode. * 4. Need to add support for target mode commands, cf. CAM XPT. * 5. check DMA mapping functions for failure * 6. Use scsi_transport_spi * 7. advansys_info is not safe against multiple simultaneous callers * 8. Add module_param to override ISA/VLB ioport array */ #warning this driver is still not properly converted to the DMA API /* Enable driver /proc statistics. */ #define ADVANSYS_STATS /* Enable driver tracing. */ #undef ADVANSYS_DEBUG /* * Portable Data Types * * Any instance where a 32-bit long or pointer type is assumed * for precision or HW defined structures, the following define * types must be used. In Linux the char, short, and int types * are all consistent at 8, 16, and 32 bits respectively. Pointers * and long types are 64 bits on Alpha and UltraSPARC. */ #define ASC_PADDR __u32 /* Physical/Bus address data type. */ #define ASC_VADDR __u32 /* Virtual address data type. */ #define ASC_DCNT __u32 /* Unsigned Data count type. */ #define ASC_SDCNT __s32 /* Signed Data count type. */ typedef unsigned char uchar; #ifndef TRUE #define TRUE (1) #endif #ifndef FALSE #define FALSE (0) #endif #define ERR (-1) #define UW_ERR (uint)(0xFFFF) #define isodd_word(val) ((((uint)val) & (uint)0x0001) != 0) #define PCI_VENDOR_ID_ASP 0x10cd #define PCI_DEVICE_ID_ASP_1200A 0x1100 #define PCI_DEVICE_ID_ASP_ABP940 0x1200 #define PCI_DEVICE_ID_ASP_ABP940U 0x1300 #define PCI_DEVICE_ID_ASP_ABP940UW 0x2300 #define PCI_DEVICE_ID_38C0800_REV1 0x2500 #define PCI_DEVICE_ID_38C1600_REV1 0x2700 /* * Enable CC_VERY_LONG_SG_LIST to support up to 64K element SG lists. * The SRB structure will have to be changed and the ASC_SRB2SCSIQ() * macro re-defined to be able to obtain a ASC_SCSI_Q pointer from the * SRB structure. */ #define CC_VERY_LONG_SG_LIST 0 #define ASC_SRB2SCSIQ(srb_ptr) (srb_ptr) #define PortAddr unsigned int /* port address size */ #define inp(port) inb(port) #define outp(port, byte) outb((byte), (port)) #define inpw(port) inw(port) #define outpw(port, word) outw((word), (port)) #define ASC_MAX_SG_QUEUE 7 #define ASC_MAX_SG_LIST 255 #define ASC_CS_TYPE unsigned short #define ASC_IS_ISA (0x0001) #define ASC_IS_ISAPNP (0x0081) #define ASC_IS_EISA (0x0002) #define ASC_IS_PCI (0x0004) #define ASC_IS_PCI_ULTRA (0x0104) #define ASC_IS_PCMCIA (0x0008) #define ASC_IS_MCA (0x0020) #define ASC_IS_VL (0x0040) #define ASC_IS_WIDESCSI_16 (0x0100) #define ASC_IS_WIDESCSI_32 (0x0200) #define ASC_IS_BIG_ENDIAN (0x8000) #define ASC_CHIP_MIN_VER_VL (0x01) #define ASC_CHIP_MAX_VER_VL (0x07) #define ASC_CHIP_MIN_VER_PCI (0x09) #define ASC_CHIP_MAX_VER_PCI (0x0F) #define ASC_CHIP_VER_PCI_BIT (0x08) #define ASC_CHIP_MIN_VER_ISA (0x11) #define ASC_CHIP_MIN_VER_ISA_PNP (0x21) #define ASC_CHIP_MAX_VER_ISA (0x27) #define ASC_CHIP_VER_ISA_BIT (0x30) #define ASC_CHIP_VER_ISAPNP_BIT (0x20) #define ASC_CHIP_VER_ASYN_BUG (0x21) #define ASC_CHIP_VER_PCI 0x08 #define ASC_CHIP_VER_PCI_ULTRA_3150 (ASC_CHIP_VER_PCI | 0x02) #define ASC_CHIP_VER_PCI_ULTRA_3050 (ASC_CHIP_VER_PCI | 0x03) #define ASC_CHIP_MIN_VER_EISA (0x41) #define ASC_CHIP_MAX_VER_EISA (0x47) #define ASC_CHIP_VER_EISA_BIT (0x40) #define ASC_CHIP_LATEST_VER_EISA ((ASC_CHIP_MIN_VER_EISA - 1) + 3) #define ASC_MAX_VL_DMA_COUNT (0x07FFFFFFL) #define ASC_MAX_PCI_DMA_COUNT (0xFFFFFFFFL) #define ASC_MAX_ISA_DMA_COUNT (0x00FFFFFFL) #define ASC_SCSI_ID_BITS 3 #define ASC_SCSI_TIX_TYPE uchar #define ASC_ALL_DEVICE_BIT_SET 0xFF #define ASC_SCSI_BIT_ID_TYPE uchar #define ASC_MAX_TID 7 #define ASC_MAX_LUN 7 #define ASC_SCSI_WIDTH_BIT_SET 0xFF #define ASC_MAX_SENSE_LEN 32 #define ASC_MIN_SENSE_LEN 14 #define ASC_SCSI_RESET_HOLD_TIME_US 60 /* * Narrow boards only support 12-byte commands, while wide boards * extend to 16-byte commands. */ #define ASC_MAX_CDB_LEN 12 #define ADV_MAX_CDB_LEN 16 #define MS_SDTR_LEN 0x03 #define MS_WDTR_LEN 0x02 #define ASC_SG_LIST_PER_Q 7 #define QS_FREE 0x00 #define QS_READY 0x01 #define QS_DISC1 0x02 #define QS_DISC2 0x04 #define QS_BUSY 0x08 #define QS_ABORTED 0x40 #define QS_DONE 0x80 #define QC_NO_CALLBACK 0x01 #define QC_SG_SWAP_QUEUE 0x02 #define QC_SG_HEAD 0x04 #define QC_DATA_IN 0x08 #define QC_DATA_OUT 0x10 #define QC_URGENT 0x20 #define QC_MSG_OUT 0x40 #define QC_REQ_SENSE 0x80 #define QCSG_SG_XFER_LIST 0x02 #define QCSG_SG_XFER_MORE 0x04 #define QCSG_SG_XFER_END 0x08 #define QD_IN_PROGRESS 0x00 #define QD_NO_ERROR 0x01 #define QD_ABORTED_BY_HOST 0x02 #define QD_WITH_ERROR 0x04 #define QD_INVALID_REQUEST 0x80 #define QD_INVALID_HOST_NUM 0x81 #define QD_INVALID_DEVICE 0x82 #define QD_ERR_INTERNAL 0xFF #define QHSTA_NO_ERROR 0x00 #define QHSTA_M_SEL_TIMEOUT 0x11 #define QHSTA_M_DATA_OVER_RUN 0x12 #define QHSTA_M_DATA_UNDER_RUN 0x12 #define QHSTA_M_UNEXPECTED_BUS_FREE 0x13 #define QHSTA_M_BAD_BUS_PHASE_SEQ 0x14 #define QHSTA_D_QDONE_SG_LIST_CORRUPTED 0x21 #define QHSTA_D_ASC_DVC_ERROR_CODE_SET 0x22 #define QHSTA_D_HOST_ABORT_FAILED 0x23 #define QHSTA_D_EXE_SCSI_Q_FAILED 0x24 #define QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT 0x25 #define QHSTA_D_ASPI_NO_BUF_POOL 0x26 #define QHSTA_M_WTM_TIMEOUT 0x41 #define QHSTA_M_BAD_CMPL_STATUS_IN 0x42 #define QHSTA_M_NO_AUTO_REQ_SENSE 0x43 #define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44 #define QHSTA_M_TARGET_STATUS_BUSY 0x45 #define QHSTA_M_BAD_TAG_CODE 0x46 #define QHSTA_M_BAD_QUEUE_FULL_OR_BUSY 0x47 #define QHSTA_M_HUNG_REQ_SCSI_BUS_RESET 0x48 #define QHSTA_D_LRAM_CMP_ERROR 0x81 #define QHSTA_M_MICRO_CODE_ERROR_HALT 0xA1 #define ASC_FLAG_SCSIQ_REQ 0x01 #define ASC_FLAG_BIOS_SCSIQ_REQ 0x02 #define ASC_FLAG_BIOS_ASYNC_IO 0x04 #define ASC_FLAG_SRB_LINEAR_ADDR 0x08 #define ASC_FLAG_WIN16 0x10 #define ASC_FLAG_WIN32 0x20 #define ASC_FLAG_ISA_OVER_16MB 0x40 #define ASC_FLAG_DOS_VM_CALLBACK 0x80 #define ASC_TAG_FLAG_EXTRA_BYTES 0x10 #define ASC_TAG_FLAG_DISABLE_DISCONNECT 0x04 #define ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX 0x08 #define ASC_TAG_FLAG_DISABLE_CHK_COND_INT_HOST 0x40 #define ASC_SCSIQ_CPY_BEG 4 #define ASC_SCSIQ_SGHD_CPY_BEG 2 #define ASC_SCSIQ_B_FWD 0 #define ASC_SCSIQ_B_BWD 1 #define ASC_SCSIQ_B_STATUS 2 #define ASC_SCSIQ_B_QNO 3 #define ASC_SCSIQ_B_CNTL 4 #define ASC_SCSIQ_B_SG_QUEUE_CNT 5 #define ASC_SCSIQ_D_DATA_ADDR 8 #define ASC_SCSIQ_D_DATA_CNT 12 #define ASC_SCSIQ_B_SENSE_LEN 20 #define ASC_SCSIQ_DONE_INFO_BEG 22 #define ASC_SCSIQ_D_SRBPTR 22 #define ASC_SCSIQ_B_TARGET_IX 26 #define ASC_SCSIQ_B_CDB_LEN 28 #define ASC_SCSIQ_B_TAG_CODE 29 #define ASC_SCSIQ_W_VM_ID 30 #define ASC_SCSIQ_DONE_STATUS 32 #define ASC_SCSIQ_HOST_STATUS 33 #define ASC_SCSIQ_SCSI_STATUS 34 #define ASC_SCSIQ_CDB_BEG 36 #define ASC_SCSIQ_DW_REMAIN_XFER_ADDR 56 #define ASC_SCSIQ_DW_REMAIN_XFER_CNT 60 #define ASC_SCSIQ_B_FIRST_SG_WK_QP 48 #define ASC_SCSIQ_B_SG_WK_QP 49 #define ASC_SCSIQ_B_SG_WK_IX 50 #define ASC_SCSIQ_W_ALT_DC1 52 #define ASC_SCSIQ_B_LIST_CNT 6 #define ASC_SCSIQ_B_CUR_LIST_CNT 7 #define ASC_SGQ_B_SG_CNTL 4 #define ASC_SGQ_B_SG_HEAD_QP 5 #define ASC_SGQ_B_SG_LIST_CNT 6 #define ASC_SGQ_B_SG_CUR_LIST_CNT 7 #define ASC_SGQ_LIST_BEG 8 #define ASC_DEF_SCSI1_QNG 4 #define ASC_MAX_SCSI1_QNG 4 #define ASC_DEF_SCSI2_QNG 16 #define ASC_MAX_SCSI2_QNG 32 #define ASC_TAG_CODE_MASK 0x23 #define ASC_STOP_REQ_RISC_STOP 0x01 #define ASC_STOP_ACK_RISC_STOP 0x03 #define ASC_STOP_CLEAN_UP_BUSY_Q 0x10 #define ASC_STOP_CLEAN_UP_DISC_Q 0x20 #define ASC_STOP_HOST_REQ_RISC_HALT 0x40 #define ASC_TIDLUN_TO_IX(tid, lun) (ASC_SCSI_TIX_TYPE)((tid) + ((lun)<<ASC_SCSI_ID_BITS)) #define ASC_TID_TO_TARGET_ID(tid) (ASC_SCSI_BIT_ID_TYPE)(0x01 << (tid)) #define ASC_TIX_TO_TARGET_ID(tix) (0x01 << ((tix) & ASC_MAX_TID)) #define ASC_TIX_TO_TID(tix) ((tix) & ASC_MAX_TID) #define ASC_TID_TO_TIX(tid) ((tid) & ASC_MAX_TID) #define ASC_TIX_TO_LUN(tix) (((tix) >> ASC_SCSI_ID_BITS) & ASC_MAX_LUN) #define ASC_QNO_TO_QADDR(q_no) ((ASC_QADR_BEG)+((int)(q_no) << 6)) typedef struct asc_scsiq_1 { uchar status; uchar q_no; uchar cntl; uchar sg_queue_cnt; uchar target_id; uchar target_lun; ASC_PADDR data_addr; ASC_DCNT data_cnt; ASC_PADDR sense_addr; uchar sense_len; uchar extra_bytes; } ASC_SCSIQ_1; typedef struct asc_scsiq_2 { ASC_VADDR srb_ptr; uchar target_ix; uchar flag; uchar cdb_len; uchar tag_code; ushort vm_id; } ASC_SCSIQ_2; typedef struct asc_scsiq_3 { uchar done_stat; uchar host_stat; uchar scsi_stat; uchar scsi_msg; } ASC_SCSIQ_3; typedef struct asc_scsiq_4 { uchar cdb[ASC_MAX_CDB_LEN]; uchar y_first_sg_list_qp; uchar y_working_sg_qp; uchar y_working_sg_ix; uchar y_res; ushort x_req_count; ushort x_reconnect_rtn; ASC_PADDR x_saved_data_addr; ASC_DCNT x_saved_data_cnt; } ASC_SCSIQ_4; typedef struct asc_q_done_info { ASC_SCSIQ_2 d2; ASC_SCSIQ_3 d3; uchar q_status; uchar q_no; uchar cntl; uchar sense_len; uchar extra_bytes; uchar res; ASC_DCNT remain_bytes; } ASC_QDONE_INFO; typedef struct asc_sg_list { ASC_PADDR addr; ASC_DCNT bytes; } ASC_SG_LIST; typedef struct asc_sg_head { ushort entry_cnt; ushort queue_cnt; ushort entry_to_copy; ushort res; ASC_SG_LIST sg_list[0]; } ASC_SG_HEAD; typedef struct asc_scsi_q { ASC_SCSIQ_1 q1; ASC_SCSIQ_2 q2; uchar *cdbptr; ASC_SG_HEAD *sg_head; ushort remain_sg_entry_cnt; ushort next_sg_index; } ASC_SCSI_Q; typedef struct asc_scsi_req_q { ASC_SCSIQ_1 r1; ASC_SCSIQ_2 r2; uchar *cdbptr; ASC_SG_HEAD *sg_head; uchar *sense_ptr; ASC_SCSIQ_3 r3; uchar cdb[ASC_MAX_CDB_LEN]; uchar sense[ASC_MIN_SENSE_LEN]; } ASC_SCSI_REQ_Q; typedef struct asc_scsi_bios_req_q { ASC_SCSIQ_1 r1; ASC_SCSIQ_2 r2; uchar *cdbptr; ASC_SG_HEAD *sg_head; uchar *sense_ptr; ASC_SCSIQ_3 r3; uchar cdb[ASC_MAX_CDB_LEN]; uchar sense[ASC_MIN_SENSE_LEN]; } ASC_SCSI_BIOS_REQ_Q; typedef struct asc_risc_q { uchar fwd; uchar bwd; ASC_SCSIQ_1 i1; ASC_SCSIQ_2 i2; ASC_SCSIQ_3 i3; ASC_SCSIQ_4 i4; } ASC_RISC_Q; typedef struct asc_sg_list_q { uchar seq_no; uchar q_no; uchar cntl; uchar sg_head_qp; uchar sg_list_cnt; uchar sg_cur_list_cnt; } ASC_SG_LIST_Q; typedef struct asc_risc_sg_list_q { uchar fwd; uchar bwd; ASC_SG_LIST_Q sg; ASC_SG_LIST sg_list[7]; } ASC_RISC_SG_LIST_Q; #define ASCQ_ERR_Q_STATUS 0x0D #define ASCQ_ERR_CUR_QNG 0x17 #define ASCQ_ERR_SG_Q_LINKS 0x18 #define ASCQ_ERR_ISR_RE_ENTRY 0x1A #define ASCQ_ERR_CRITICAL_RE_ENTRY 0x1B #define ASCQ_ERR_ISR_ON_CRITICAL 0x1C /* * Warning code values are set in ASC_DVC_VAR 'warn_code'. */ #define ASC_WARN_NO_ERROR 0x0000 #define ASC_WARN_IO_PORT_ROTATE 0x0001 #define ASC_WARN_EEPROM_CHKSUM 0x0002 #define ASC_WARN_IRQ_MODIFIED 0x0004 #define ASC_WARN_AUTO_CONFIG 0x0008 #define ASC_WARN_CMD_QNG_CONFLICT 0x0010 #define ASC_WARN_EEPROM_RECOVER 0x0020 #define ASC_WARN_CFG_MSW_RECOVER 0x0040 /* * Error code values are set in {ASC/ADV}_DVC_VAR 'err_code'. */ #define ASC_IERR_NO_CARRIER 0x0001 /* No more carrier memory */ #define ASC_IERR_MCODE_CHKSUM 0x0002 /* micro code check sum error */ #define ASC_IERR_SET_PC_ADDR 0x0004 #define ASC_IERR_START_STOP_CHIP 0x0008 /* start/stop chip failed */ #define ASC_IERR_ILLEGAL_CONNECTION 0x0010 /* Illegal cable connection */ #define ASC_IERR_SINGLE_END_DEVICE 0x0020 /* SE device on DIFF bus */ #define ASC_IERR_REVERSED_CABLE 0x0040 /* Narrow flat cable reversed */ #define ASC_IERR_SET_SCSI_ID 0x0080 /* set SCSI ID failed */ #define ASC_IERR_HVD_DEVICE 0x0100 /* HVD device on LVD port */ #define ASC_IERR_BAD_SIGNATURE 0x0200 /* signature not found */ #define ASC_IERR_NO_BUS_TYPE 0x0400 #define ASC_IERR_BIST_PRE_TEST 0x0800 /* BIST pre-test error */ #define ASC_IERR_BIST_RAM_TEST 0x1000 /* BIST RAM test error */ #define ASC_IERR_BAD_CHIPTYPE 0x2000 /* Invalid chip_type setting */ #define ASC_DEF_MAX_TOTAL_QNG (0xF0) #define ASC_MIN_TAG_Q_PER_DVC (0x04) #define ASC_MIN_FREE_Q (0x02) #define ASC_MIN_TOTAL_QNG ((ASC_MAX_SG_QUEUE)+(ASC_MIN_FREE_Q)) #define ASC_MAX_TOTAL_QNG 240 #define ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG 16 #define ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG 8 #define ASC_MAX_PCI_INRAM_TOTAL_QNG 20 #define ASC_MAX_INRAM_TAG_QNG 16 #define ASC_IOADR_GAP 0x10 #define ASC_SYN_MAX_OFFSET 0x0F #define ASC_DEF_SDTR_OFFSET 0x0F #define ASC_SDTR_ULTRA_PCI_10MB_INDEX 0x02 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41 /* The narrow chip only supports a limited selection of transfer rates. * These are encoded in the range 0..7 or 0..15 depending whether the chip * is Ultra-capable or not. These tables let us convert from one to the other. */ static const unsigned char asc_syn_xfer_period[8] = { 25, 30, 35, 40, 50, 60, 70, 85 }; static const unsigned char asc_syn_ultra_xfer_period[16] = { 12, 19, 25, 32, 38, 44, 50, 57, 63, 69, 75, 82, 88, 94, 100, 107 }; typedef struct ext_msg { uchar msg_type; uchar msg_len; uchar msg_req; union { struct { uchar sdtr_xfer_period; uchar sdtr_req_ack_offset; } sdtr; struct { uchar wdtr_width; } wdtr; struct { uchar mdp_b3; uchar mdp_b2; uchar mdp_b1; uchar mdp_b0; } mdp; } u_ext_msg; uchar res; } EXT_MSG; #define xfer_period u_ext_msg.sdtr.sdtr_xfer_period #define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset #define wdtr_width u_ext_msg.wdtr.wdtr_width #define mdp_b3 u_ext_msg.mdp_b3 #define mdp_b2 u_ext_msg.mdp_b2 #define mdp_b1 u_ext_msg.mdp_b1 #define mdp_b0 u_ext_msg.mdp_b0 typedef struct asc_dvc_cfg { ASC_SCSI_BIT_ID_TYPE can_tagged_qng; ASC_SCSI_BIT_ID_TYPE cmd_qng_enabled; ASC_SCSI_BIT_ID_TYPE disc_enable; ASC_SCSI_BIT_ID_TYPE sdtr_enable; uchar chip_scsi_id; uchar isa_dma_speed; uchar isa_dma_channel; uchar chip_version; ushort mcode_date; ushort mcode_version; uchar max_tag_qng[ASC_MAX_TID + 1]; uchar sdtr_period_offset[ASC_MAX_TID + 1]; uchar adapter_info[6]; } ASC_DVC_CFG; #define ASC_DEF_DVC_CNTL 0xFFFF #define ASC_DEF_CHIP_SCSI_ID 7 #define ASC_DEF_ISA_DMA_SPEED 4 #define ASC_INIT_STATE_BEG_GET_CFG 0x0001 #define ASC_INIT_STATE_END_GET_CFG 0x0002 #define ASC_INIT_STATE_BEG_SET_CFG 0x0004 #define ASC_INIT_STATE_END_SET_CFG 0x0008 #define ASC_INIT_STATE_BEG_LOAD_MC 0x0010 #define ASC_INIT_STATE_END_LOAD_MC 0x0020 #define ASC_INIT_STATE_BEG_INQUIRY 0x0040 #define ASC_INIT_STATE_END_INQUIRY 0x0080 #define ASC_INIT_RESET_SCSI_DONE 0x0100 #define ASC_INIT_STATE_WITHOUT_EEP 0x8000 #define ASC_BUG_FIX_IF_NOT_DWB 0x0001 #define ASC_BUG_FIX_ASYN_USE_SYN 0x0002 #define ASC_MIN_TAGGED_CMD 7 #define ASC_MAX_SCSI_RESET_WAIT 30 #define ASC_OVERRUN_BSIZE 64 struct asc_dvc_var; /* Forward Declaration. */ typedef struct asc_dvc_var { PortAddr iop_base; ushort err_code; ushort dvc_cntl; ushort bug_fix_cntl; ushort bus_type; ASC_SCSI_BIT_ID_TYPE init_sdtr; ASC_SCSI_BIT_ID_TYPE sdtr_done; ASC_SCSI_BIT_ID_TYPE use_tagged_qng; ASC_SCSI_BIT_ID_TYPE unit_not_ready; ASC_SCSI_BIT_ID_TYPE queue_full_or_busy; ASC_SCSI_BIT_ID_TYPE start_motor; uchar *overrun_buf; dma_addr_t overrun_dma; uchar scsi_reset_wait; uchar chip_no; char is_in_int; uchar max_total_qng; uchar cur_total_qng; uchar in_critical_cnt; uchar last_q_shortage; ushort init_state; uchar cur_dvc_qng[ASC_MAX_TID + 1]; uchar max_dvc_qng[ASC_MAX_TID + 1]; ASC_SCSI_Q *scsiq_busy_head[ASC_MAX_TID + 1]; ASC_SCSI_Q *scsiq_busy_tail[ASC_MAX_TID + 1]; const uchar *sdtr_period_tbl; ASC_DVC_CFG *cfg; ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer_always; char redo_scam; ushort res2; uchar dos_int13_table[ASC_MAX_TID + 1]; ASC_DCNT max_dma_count; ASC_SCSI_BIT_ID_TYPE no_scam; ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer; uchar min_sdtr_index; uchar max_sdtr_index; struct asc_board *drv_ptr; int ptr_map_count; void **ptr_map; ASC_DCNT uc_break; } ASC_DVC_VAR; typedef struct asc_dvc_inq_info { uchar type[ASC_MAX_TID + 1][ASC_MAX_LUN + 1]; } ASC_DVC_INQ_INFO; typedef struct asc_cap_info { ASC_DCNT lba; ASC_DCNT blk_size; } ASC_CAP_INFO; typedef struct asc_cap_info_array { ASC_CAP_INFO cap_info[ASC_MAX_TID + 1][ASC_MAX_LUN + 1]; } ASC_CAP_INFO_ARRAY; #define ASC_MCNTL_NO_SEL_TIMEOUT (ushort)0x0001 #define ASC_MCNTL_NULL_TARGET (ushort)0x0002 #define ASC_CNTL_INITIATOR (ushort)0x0001 #define ASC_CNTL_BIOS_GT_1GB (ushort)0x0002 #define ASC_CNTL_BIOS_GT_2_DISK (ushort)0x0004 #define ASC_CNTL_BIOS_REMOVABLE (ushort)0x0008 #define ASC_CNTL_NO_SCAM (ushort)0x0010 #define ASC_CNTL_INT_MULTI_Q (ushort)0x0080 #define ASC_CNTL_NO_LUN_SUPPORT (ushort)0x0040 #define ASC_CNTL_NO_VERIFY_COPY (ushort)0x0100 #define ASC_CNTL_RESET_SCSI (ushort)0x0200 #define ASC_CNTL_INIT_INQUIRY (ushort)0x0400 #define ASC_CNTL_INIT_VERBOSE (ushort)0x0800 #define ASC_CNTL_SCSI_PARITY (ushort)0x1000 #define ASC_CNTL_BURST_MODE (ushort)0x2000 #define ASC_CNTL_SDTR_ENABLE_ULTRA (ushort)0x4000 #define ASC_EEP_DVC_CFG_BEG_VL 2 #define ASC_EEP_MAX_DVC_ADDR_VL 15 #define ASC_EEP_DVC_CFG_BEG 32 #define ASC_EEP_MAX_DVC_ADDR 45 #define ASC_EEP_MAX_RETRY 20 /* * These macros keep the chip SCSI id and ISA DMA speed * bitfields in board order. C bitfields aren't portable * between big and little-endian platforms so they are * not used. */ #define ASC_EEP_GET_CHIP_ID(cfg) ((cfg)->id_speed & 0x0f) #define ASC_EEP_GET_DMA_SPD(cfg) (((cfg)->id_speed & 0xf0) >> 4) #define ASC_EEP_SET_CHIP_ID(cfg, sid) \ ((cfg)->id_speed = ((cfg)->id_speed & 0xf0) | ((sid) & ASC_MAX_TID)) #define ASC_EEP_SET_DMA_SPD(cfg, spd) \ ((cfg)->id_speed = ((cfg)->id_speed & 0x0f) | ((spd) & 0x0f) << 4) typedef struct asceep_config { ushort cfg_lsw; ushort cfg_msw; uchar init_sdtr; uchar disc_enable; uchar use_cmd_qng; uchar start_motor; uchar max_total_qng; uchar max_tag_qng; uchar bios_scan; uchar power_up_wait; uchar no_scam; uchar id_speed; /* low order 4 bits is chip scsi id */ /* high order 4 bits is isa dma speed */ uchar dos_int13_table[ASC_MAX_TID + 1]; uchar adapter_info[6]; ushort cntl; ushort chksum; } ASCEEP_CONFIG; #define ASC_EEP_CMD_READ 0x80 #define ASC_EEP_CMD_WRITE 0x40 #define ASC_EEP_CMD_WRITE_ABLE 0x30 #define ASC_EEP_CMD_WRITE_DISABLE 0x00 #define ASCV_MSGOUT_BEG 0x0000 #define ASCV_MSGOUT_SDTR_PERIOD (ASCV_MSGOUT_BEG+3) #define ASCV_MSGOUT_SDTR_OFFSET (ASCV_MSGOUT_BEG+4) #define ASCV_BREAK_SAVED_CODE (ushort)0x0006 #define ASCV_MSGIN_BEG (ASCV_MSGOUT_BEG+8) #define ASCV_MSGIN_SDTR_PERIOD (ASCV_MSGIN_BEG+3) #define ASCV_MSGIN_SDTR_OFFSET (ASCV_MSGIN_BEG+4) #define ASCV_SDTR_DATA_BEG (ASCV_MSGIN_BEG+8) #define ASCV_SDTR_DONE_BEG (ASCV_SDTR_DATA_BEG+8) #define ASCV_MAX_DVC_QNG_BEG (ushort)0x0020 #define ASCV_BREAK_ADDR (ushort)0x0028 #define ASCV_BREAK_NOTIFY_COUNT (ushort)0x002A #define ASCV_BREAK_CONTROL (ushort)0x002C #define ASCV_BREAK_HIT_COUNT (ushort)0x002E #define ASCV_ASCDVC_ERR_CODE_W (ushort)0x0030 #define ASCV_MCODE_CHKSUM_W (ushort)0x0032 #define ASCV_MCODE_SIZE_W (ushort)0x0034 #define ASCV_STOP_CODE_B (ushort)0x0036 #define ASCV_DVC_ERR_CODE_B (ushort)0x0037 #define ASCV_OVERRUN_PADDR_D (ushort)0x0038 #define ASCV_OVERRUN_BSIZE_D (ushort)0x003C #define ASCV_HALTCODE_W (ushort)0x0040 #define ASCV_CHKSUM_W (ushort)0x0042 #define ASCV_MC_DATE_W (ushort)0x0044 #define ASCV_MC_VER_W (ushort)0x0046 #define ASCV_NEXTRDY_B (ushort)0x0048 #define ASCV_DONENEXT_B (ushort)0x0049 #define ASCV_USE_TAGGED_QNG_B (ushort)0x004A #define ASCV_SCSIBUSY_B (ushort)0x004B #define ASCV_Q_DONE_IN_PROGRESS_B (ushort)0x004C #define ASCV_CURCDB_B (ushort)0x004D #define ASCV_RCLUN_B (ushort)0x004E #define ASCV_BUSY_QHEAD_B (ushort)0x004F #define ASCV_DISC1_QHEAD_B (ushort)0x0050 #define ASCV_DISC_ENABLE_B (ushort)0x0052 #define ASCV_CAN_TAGGED_QNG_B (ushort)0x0053 #define ASCV_HOSTSCSI_ID_B (ushort)0x0055 #define ASCV_MCODE_CNTL_B (ushort)0x0056 #define ASCV_NULL_TARGET_B (ushort)0x0057 #define ASCV_FREE_Q_HEAD_W (ushort)0x0058 #define ASCV_DONE_Q_TAIL_W (ushort)0x005A #define ASCV_FREE_Q_HEAD_B (ushort)(ASCV_FREE_Q_HEAD_W+1) #define ASCV_DONE_Q_TAIL_B (ushort)(ASCV_DONE_Q_TAIL_W+1) #define ASCV_HOST_FLAG_B (ushort)0x005D #define ASCV_TOTAL_READY_Q_B (ushort)0x0064 #define ASCV_VER_SERIAL_B (ushort)0x0065 #define ASCV_HALTCODE_SAVED_W (ushort)0x0066 #define ASCV_WTM_FLAG_B (ushort)0x0068 #define ASCV_RISC_FLAG_B (ushort)0x006A #define ASCV_REQ_SG_LIST_QP (ushort)0x006B #define ASC_HOST_FLAG_IN_ISR 0x01 #define ASC_HOST_FLAG_ACK_INT 0x02 #define ASC_RISC_FLAG_GEN_INT 0x01 #define ASC_RISC_FLAG_REQ_SG_LIST 0x02 #define IOP_CTRL (0x0F) #define IOP_STATUS (0x0E) #define IOP_INT_ACK IOP_STATUS #define IOP_REG_IFC (0x0D) #define IOP_SYN_OFFSET (0x0B) #define IOP_EXTRA_CONTROL (0x0D) #define IOP_REG_PC (0x0C) #define IOP_RAM_ADDR (0x0A) #define IOP_RAM_DATA (0x08) #define IOP_EEP_DATA (0x06) #define IOP_EEP_CMD (0x07) #define IOP_VERSION (0x03) #define IOP_CONFIG_HIGH (0x04) #define IOP_CONFIG_LOW (0x02) #define IOP_SIG_BYTE (0x01) #define IOP_SIG_WORD (0x00) #define IOP_REG_DC1 (0x0E) #define IOP_REG_DC0 (0x0C) #define IOP_REG_SB (0x0B) #define IOP_REG_DA1 (0x0A) #define IOP_REG_DA0 (0x08) #define IOP_REG_SC (0x09) #define IOP_DMA_SPEED (0x07) #define IOP_REG_FLAG (0x07) #define IOP_FIFO_H (0x06) #define IOP_FIFO_L (0x04) #define IOP_REG_ID (0x05) #define IOP_REG_QP (0x03) #define IOP_REG_IH (0x02) #define IOP_REG_IX (0x01) #define IOP_REG_AX (0x00) #define IFC_REG_LOCK (0x00) #define IFC_REG_UNLOCK (0x09) #define IFC_WR_EN_FILTER (0x10) #define IFC_RD_NO_EEPROM (0x10) #define IFC_SLEW_RATE (0x20) #define IFC_ACT_NEG (0x40) #define IFC_INP_FILTER (0x80) #define IFC_INIT_DEFAULT (IFC_ACT_NEG | IFC_REG_UNLOCK) #define SC_SEL (uchar)(0x80) #define SC_BSY (uchar)(0x40) #define SC_ACK (uchar)(0x20) #define SC_REQ (uchar)(0x10) #define SC_ATN (uchar)(0x08) #define SC_IO (uchar)(0x04) #define SC_CD (uchar)(0x02) #define SC_MSG (uchar)(0x01) #define SEC_SCSI_CTL (uchar)(0x80) #define SEC_ACTIVE_NEGATE (uchar)(0x40) #define SEC_SLEW_RATE (uchar)(0x20) #define SEC_ENABLE_FILTER (uchar)(0x10) #define ASC_HALT_EXTMSG_IN (ushort)0x8000 #define ASC_HALT_CHK_CONDITION (ushort)0x8100 #define ASC_HALT_SS_QUEUE_FULL (ushort)0x8200 #define ASC_HALT_DISABLE_ASYN_USE_SYN_FIX (ushort)0x8300 #define ASC_HALT_ENABLE_ASYN_USE_SYN_FIX (ushort)0x8400 #define ASC_HALT_SDTR_REJECTED (ushort)0x4000 #define ASC_HALT_HOST_COPY_SG_LIST_TO_RISC ( ushort )0x2000 #define ASC_MAX_QNO 0xF8 #define ASC_DATA_SEC_BEG (ushort)0x0080 #define ASC_DATA_SEC_END (ushort)0x0080 #define ASC_CODE_SEC_BEG (ushort)0x0080 #define ASC_CODE_SEC_END (ushort)0x0080 #define ASC_QADR_BEG (0x4000) #define ASC_QADR_USED (ushort)(ASC_MAX_QNO * 64) #define ASC_QADR_END (ushort)0x7FFF #define ASC_QLAST_ADR (ushort)0x7FC0 #define ASC_QBLK_SIZE 0x40 #define ASC_BIOS_DATA_QBEG 0xF8 #define ASC_MIN_ACTIVE_QNO 0x01 #define ASC_QLINK_END 0xFF #define ASC_EEPROM_WORDS 0x10 #define ASC_MAX_MGS_LEN 0x10 #define ASC_BIOS_ADDR_DEF 0xDC00 #define ASC_BIOS_SIZE 0x3800 #define ASC_BIOS_RAM_OFF 0x3800 #define ASC_BIOS_RAM_SIZE 0x800 #define ASC_BIOS_MIN_ADDR 0xC000 #define ASC_BIOS_MAX_ADDR 0xEC00 #define ASC_BIOS_BANK_SIZE 0x0400 #define ASC_MCODE_START_ADDR 0x0080 #define ASC_CFG0_HOST_INT_ON 0x0020 #define ASC_CFG0_BIOS_ON 0x0040 #define ASC_CFG0_VERA_BURST_ON 0x0080 #define ASC_CFG0_SCSI_PARITY_ON 0x0800 #define ASC_CFG1_SCSI_TARGET_ON 0x0080 #define ASC_CFG1_LRAM_8BITS_ON 0x0800 #define ASC_CFG_MSW_CLR_MASK 0x3080 #define CSW_TEST1 (ASC_CS_TYPE)0x8000 #define CSW_AUTO_CONFIG (ASC_CS_TYPE)0x4000 #define CSW_RESERVED1 (ASC_CS_TYPE)0x2000 #define CSW_IRQ_WRITTEN (ASC_CS_TYPE)0x1000 #define CSW_33MHZ_SELECTED (ASC_CS_TYPE)0x0800 #define CSW_TEST2 (ASC_CS_TYPE)0x0400 #define CSW_TEST3 (ASC_CS_TYPE)0x0200 #define CSW_RESERVED2 (ASC_CS_TYPE)0x0100 #define CSW_DMA_DONE (ASC_CS_TYPE)0x0080 #define CSW_FIFO_RDY (ASC_CS_TYPE)0x0040 #define CSW_EEP_READ_DONE (ASC_CS_TYPE)0x0020 #define CSW_HALTED (ASC_CS_TYPE)0x0010 #define CSW_SCSI_RESET_ACTIVE (ASC_CS_TYPE)0x0008 #define CSW_PARITY_ERR (ASC_CS_TYPE)0x0004 #define CSW_SCSI_RESET_LATCH (ASC_CS_TYPE)0x0002 #define CSW_INT_PENDING (ASC_CS_TYPE)0x0001 #define CIW_CLR_SCSI_RESET_INT (ASC_CS_TYPE)0x1000 #define CIW_INT_ACK (ASC_CS_TYPE)0x0100 #define CIW_TEST1 (ASC_CS_TYPE)0x0200 #define CIW_TEST2 (ASC_CS_TYPE)0x0400 #define CIW_SEL_33MHZ (ASC_CS_TYPE)0x0800 #define CIW_IRQ_ACT (ASC_CS_TYPE)0x1000 #define CC_CHIP_RESET (uchar)0x80 #define CC_SCSI_RESET (uchar)0x40 #define CC_HALT (uchar)0x20 #define CC_SINGLE_STEP (uchar)0x10 #define CC_DMA_ABLE (uchar)0x08 #define CC_TEST (uchar)0x04 #define CC_BANK_ONE (uchar)0x02 #define CC_DIAG (uchar)0x01 #define ASC_1000_ID0W 0x04C1 #define ASC_1000_ID0W_FIX 0x00C1 #define ASC_1000_ID1B 0x25 #define ASC_EISA_REV_IOP_MASK (0x0C83) #define ASC_EISA_CFG_IOP_MASK (0x0C86) #define ASC_GET_EISA_SLOT(iop) (PortAddr)((iop) & 0xF000) #define INS_HALTINT (ushort)0x6281 #define INS_HALT (ushort)0x6280 #define INS_SINT (ushort)0x6200 #define INS_RFLAG_WTM (ushort)0x7380 #define ASC_MC_SAVE_CODE_WSIZE 0x500 #define ASC_MC_SAVE_DATA_WSIZE 0x40 typedef struct asc_mc_saved { ushort data[ASC_MC_SAVE_DATA_WSIZE]; ushort code[ASC_MC_SAVE_CODE_WSIZE]; } ASC_MC_SAVED; #define AscGetQDoneInProgress(port) AscReadLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B) #define AscPutQDoneInProgress(port, val) AscWriteLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B, val) #define AscGetVarFreeQHead(port) AscReadLramWord((port), ASCV_FREE_Q_HEAD_W) #define AscGetVarDoneQTail(port) AscReadLramWord((port), ASCV_DONE_Q_TAIL_W) #define AscPutVarFreeQHead(port, val) AscWriteLramWord((port), ASCV_FREE_Q_HEAD_W, val) #define AscPutVarDoneQTail(port, val) AscWriteLramWord((port), ASCV_DONE_Q_TAIL_W, val) #define AscGetRiscVarFreeQHead(port) AscReadLramByte((port), ASCV_NEXTRDY_B) #define AscGetRiscVarDoneQTail(port) AscReadLramByte((port), ASCV_DONENEXT_B) #define AscPutRiscVarFreeQHead(port, val) AscWriteLramByte((port), ASCV_NEXTRDY_B, val) #define AscPutRiscVarDoneQTail(port, val) AscWriteLramByte((port), ASCV_DONENEXT_B, val) #define AscPutMCodeSDTRDoneAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id), (data)) #define AscGetMCodeSDTRDoneAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id)) #define AscPutMCodeInitSDTRAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id), data) #define AscGetMCodeInitSDTRAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id)) #define AscGetChipSignatureByte(port) (uchar)inp((port)+IOP_SIG_BYTE) #define AscGetChipSignatureWord(port) (ushort)inpw((port)+IOP_SIG_WORD) #define AscGetChipVerNo(port) (uchar)inp((port)+IOP_VERSION) #define AscGetChipCfgLsw(port) (ushort)inpw((port)+IOP_CONFIG_LOW) #define AscGetChipCfgMsw(port) (ushort)inpw((port)+IOP_CONFIG_HIGH) #define AscSetChipCfgLsw(port, data) outpw((port)+IOP_CONFIG_LOW, data) #define AscSetChipCfgMsw(port, data) outpw((port)+IOP_CONFIG_HIGH, data) #define AscGetChipEEPCmd(port) (uchar)inp((port)+IOP_EEP_CMD) #define AscSetChipEEPCmd(port, data) outp((port)+IOP_EEP_CMD, data) #define AscGetChipEEPData(port) (ushort)inpw((port)+IOP_EEP_DATA) #define AscSetChipEEPData(port, data) outpw((port)+IOP_EEP_DATA, data) #define AscGetChipLramAddr(port) (ushort)inpw((PortAddr)((port)+IOP_RAM_ADDR)) #define AscSetChipLramAddr(port, addr) outpw((PortAddr)((port)+IOP_RAM_ADDR), addr) #define AscGetChipLramData(port) (ushort)inpw((port)+IOP_RAM_DATA) #define AscSetChipLramData(port, data) outpw((port)+IOP_RAM_DATA, data) #define AscGetChipIFC(port) (uchar)inp((port)+IOP_REG_IFC) #define AscSetChipIFC(port, data) outp((port)+IOP_REG_IFC, data) #define AscGetChipStatus(port) (ASC_CS_TYPE)inpw((port)+IOP_STATUS) #define AscSetChipStatus(port, cs_val) outpw((port)+IOP_STATUS, cs_val) #define AscGetChipControl(port) (uchar)inp((port)+IOP_CTRL) #define AscSetChipControl(port, cc_val) outp((port)+IOP_CTRL, cc_val) #define AscGetChipSyn(port) (uchar)inp((port)+IOP_SYN_OFFSET) #define AscSetChipSyn(port, data) outp((port)+IOP_SYN_OFFSET, data) #define AscSetPCAddr(port, data) outpw((port)+IOP_REG_PC, data) #define AscGetPCAddr(port) (ushort)inpw((port)+IOP_REG_PC) #define AscIsIntPending(port) (AscGetChipStatus(port) & (CSW_INT_PENDING | CSW_SCSI_RESET_LATCH)) #define AscGetChipScsiID(port) ((AscGetChipCfgLsw(port) >> 8) & ASC_MAX_TID) #define AscGetExtraControl(port) (uchar)inp((port)+IOP_EXTRA_CONTROL) #define AscSetExtraControl(port, data) outp((port)+IOP_EXTRA_CONTROL, data) #define AscReadChipAX(port) (ushort)inpw((port)+IOP_REG_AX) #define AscWriteChipAX(port, data) outpw((port)+IOP_REG_AX, data) #define AscReadChipIX(port) (uchar)inp((port)+IOP_REG_IX) #define AscWriteChipIX(port, data) outp((port)+IOP_REG_IX, data) #define AscReadChipIH(port) (ushort)inpw((port)+IOP_REG_IH) #define AscWriteChipIH(port, data) outpw((port)+IOP_REG_IH, data) #define AscReadChipQP(port) (uchar)inp((port)+IOP_REG_QP) #define AscWriteChipQP(port, data) outp((port)+IOP_REG_QP, data) #define AscReadChipFIFO_L(port) (ushort)inpw((port)+IOP_REG_FIFO_L) #define AscWriteChipFIFO_L(port, data) outpw((port)+IOP_REG_FIFO_L, data) #define AscReadChipFIFO_H(port) (ushort)inpw((port)+IOP_REG_FIFO_H) #define AscWriteChipFIFO_H(port, data) outpw((port)+IOP_REG_FIFO_H, data) #define AscReadChipDmaSpeed(port) (uchar)inp((port)+IOP_DMA_SPEED) #define AscWriteChipDmaSpeed(port, data) outp((port)+IOP_DMA_SPEED, data) #define AscReadChipDA0(port) (ushort)inpw((port)+IOP_REG_DA0) #define AscWriteChipDA0(port) outpw((port)+IOP_REG_DA0, data) #define AscReadChipDA1(port) (ushort)inpw((port)+IOP_REG_DA1) #define AscWriteChipDA1(port) outpw((port)+IOP_REG_DA1, data) #define AscReadChipDC0(port) (ushort)inpw((port)+IOP_REG_DC0) #define AscWriteChipDC0(port) outpw((port)+IOP_REG_DC0, data) #define AscReadChipDC1(port) (ushort)inpw((port)+IOP_REG_DC1) #define AscWriteChipDC1(port) outpw((port)+IOP_REG_DC1, data) #define AscReadChipDvcID(port) (uchar)inp((port)+IOP_REG_ID) #define AscWriteChipDvcID(port, data) outp((port)+IOP_REG_ID, data) /* * Portable Data Types * * Any instance where a 32-bit long or pointer type is assumed * for precision or HW defined structures, the following define * types must be used. In Linux the char, short, and int types * are all consistent at 8, 16, and 32 bits respectively. Pointers * and long types are 64 bits on Alpha and UltraSPARC. */ #define ADV_PADDR __u32 /* Physical address data type. */ #define ADV_VADDR __u32 /* Virtual address data type. */ #define ADV_DCNT __u32 /* Unsigned Data count type. */ #define ADV_SDCNT __s32 /* Signed Data count type. */ /* * These macros are used to convert a virtual address to a * 32-bit value. This currently can be used on Linux Alpha * which uses 64-bit virtual address but a 32-bit bus address. * This is likely to break in the future, but doing this now * will give us time to change the HW and FW to handle 64-bit * addresses. */ #define ADV_VADDR_TO_U32 virt_to_bus #define ADV_U32_TO_VADDR bus_to_virt #define AdvPortAddr void __iomem * /* Virtual memory address size */ /* * Define Adv Library required memory access macros. */ #define ADV_MEM_READB(addr) readb(addr) #define ADV_MEM_READW(addr) readw(addr) #define ADV_MEM_WRITEB(addr, byte) writeb(byte, addr) #define ADV_MEM_WRITEW(addr, word) writew(word, addr) #define ADV_MEM_WRITEDW(addr, dword) writel(dword, addr) #define ADV_CARRIER_COUNT (ASC_DEF_MAX_HOST_QNG + 15) /* * Define total number of simultaneous maximum element scatter-gather * request blocks per wide adapter. ASC_DEF_MAX_HOST_QNG (253) is the * maximum number of outstanding commands per wide host adapter. Each * command uses one or more ADV_SG_BLOCK each with 15 scatter-gather * elements. Allow each command to have at least one ADV_SG_BLOCK structure. * This allows about 15 commands to have the maximum 17 ADV_SG_BLOCK * structures or 255 scatter-gather elements. */ #define ADV_TOT_SG_BLOCK ASC_DEF_MAX_HOST_QNG /* * Define maximum number of scatter-gather elements per request. */ #define ADV_MAX_SG_LIST 255 #define NO_OF_SG_PER_BLOCK 15 #define ADV_EEP_DVC_CFG_BEGIN (0x00) #define ADV_EEP_DVC_CFG_END (0x15) #define ADV_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */ #define ADV_EEP_MAX_WORD_ADDR (0x1E) #define ADV_EEP_DELAY_MS 100 #define ADV_EEPROM_BIG_ENDIAN 0x8000 /* EEPROM Bit 15 */ #define ADV_EEPROM_BIOS_ENABLE 0x4000 /* EEPROM Bit 14 */ /* * For the ASC3550 Bit 13 is Termination Polarity control bit. * For later ICs Bit 13 controls whether the CIS (Card Information * Service Section) is loaded from EEPROM. */ #define ADV_EEPROM_TERM_POL 0x2000 /* EEPROM Bit 13 */ #define ADV_EEPROM_CIS_LD 0x2000 /* EEPROM Bit 13 */ /* * ASC38C1600 Bit 11 * * If EEPROM Bit 11 is 0 for Function 0, then Function 0 will specify * INT A in the PCI Configuration Space Int Pin field. If it is 1, then * Function 0 will specify INT B. * * If EEPROM Bit 11 is 0 for Function 1, then Function 1 will specify * INT B in the PCI Configuration Space Int Pin field. If it is 1, then * Function 1 will specify INT A. */ #define ADV_EEPROM_INTAB 0x0800 /* EEPROM Bit 11 */ typedef struct adveep_3550_config { /* Word Offset, Description */ ushort cfg_lsw; /* 00 power up initialization */ /* bit 13 set - Term Polarity Control */ /* bit 14 set - BIOS Enable */ /* bit 15 set - Big Endian Mode */ ushort cfg_msw; /* 01 unused */ ushort disc_enable; /* 02 disconnect enable */ ushort wdtr_able; /* 03 Wide DTR able */ ushort sdtr_able; /* 04 Synchronous DTR able */ ushort start_motor; /* 05 send start up motor */ ushort tagqng_able; /* 06 tag queuing able */ ushort bios_scan; /* 07 BIOS device control */ ushort scam_tolerant; /* 08 no scam */ uchar adapter_scsi_id; /* 09 Host Adapter ID */ uchar bios_boot_delay; /* power up wait */ uchar scsi_reset_delay; /* 10 reset delay */ uchar bios_id_lun; /* first boot device scsi id & lun */ /* high nibble is lun */ /* low nibble is scsi id */ uchar termination; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ uchar reserved1; /* reserved byte (not used) */ ushort bios_ctrl; /* 12 BIOS control bits */ /* bit 0 BIOS don't act as initiator. */ /* bit 1 BIOS > 1 GB support */ /* bit 2 BIOS > 2 Disk Support */ /* bit 3 BIOS don't support removables */ /* bit 4 BIOS support bootable CD */ /* bit 5 BIOS scan enabled */ /* bit 6 BIOS support multiple LUNs */ /* bit 7 BIOS display of message */ /* bit 8 SCAM disabled */ /* bit 9 Reset SCSI bus during init. */ /* bit 10 */ /* bit 11 No verbose initialization. */ /* bit 12 SCSI parity enabled */ /* bit 13 */ /* bit 14 */ /* bit 15 */ ushort ultra_able; /* 13 ULTRA speed able */ ushort reserved2; /* 14 reserved */ uchar max_host_qng; /* 15 maximum host queuing */ uchar max_dvc_qng; /* maximum per device queuing */ ushort dvc_cntl; /* 16 control bit for driver */ ushort bug_fix; /* 17 control bit for bug fix */ ushort serial_number_word1; /* 18 Board serial number word 1 */ ushort serial_number_word2; /* 19 Board serial number word 2 */ ushort serial_number_word3; /* 20 Board serial number word 3 */ ushort check_sum; /* 21 EEP check sum */ uchar oem_name[16]; /* 22 OEM name */ ushort dvc_err_code; /* 30 last device driver error code */ ushort adv_err_code; /* 31 last uc and Adv Lib error code */ ushort adv_err_addr; /* 32 last uc error address */ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ ushort saved_adv_err_addr; /* 35 saved last uc error address */ ushort num_of_err; /* 36 number of error */ } ADVEEP_3550_CONFIG; typedef struct adveep_38C0800_config { /* Word Offset, Description */ ushort cfg_lsw; /* 00 power up initialization */ /* bit 13 set - Load CIS */ /* bit 14 set - BIOS Enable */ /* bit 15 set - Big Endian Mode */ ushort cfg_msw; /* 01 unused */ ushort disc_enable; /* 02 disconnect enable */ ushort wdtr_able; /* 03 Wide DTR able */ ushort sdtr_speed1; /* 04 SDTR Speed TID 0-3 */ ushort start_motor; /* 05 send start up motor */ ushort tagqng_able; /* 06 tag queuing able */ ushort bios_scan; /* 07 BIOS device control */ ushort scam_tolerant; /* 08 no scam */ uchar adapter_scsi_id; /* 09 Host Adapter ID */ uchar bios_boot_delay; /* power up wait */ uchar scsi_reset_delay; /* 10 reset delay */ uchar bios_id_lun; /* first boot device scsi id & lun */ /* high nibble is lun */ /* low nibble is scsi id */ uchar termination_se; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ uchar termination_lvd; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ ushort bios_ctrl; /* 12 BIOS control bits */ /* bit 0 BIOS don't act as initiator. */ /* bit 1 BIOS > 1 GB support */ /* bit 2 BIOS > 2 Disk Support */ /* bit 3 BIOS don't support removables */ /* bit 4 BIOS support bootable CD */ /* bit 5 BIOS scan enabled */ /* bit 6 BIOS support multiple LUNs */ /* bit 7 BIOS display of message */ /* bit 8 SCAM disabled */ /* bit 9 Reset SCSI bus during init. */ /* bit 10 */ /* bit 11 No verbose initialization. */ /* bit 12 SCSI parity enabled */ /* bit 13 */ /* bit 14 */ /* bit 15 */ ushort sdtr_speed2; /* 13 SDTR speed TID 4-7 */ ushort sdtr_speed3; /* 14 SDTR speed TID 8-11 */ uchar max_host_qng; /* 15 maximum host queueing */ uchar max_dvc_qng; /* maximum per device queuing */ ushort dvc_cntl; /* 16 control bit for driver */ ushort sdtr_speed4; /* 17 SDTR speed 4 TID 12-15 */ ushort serial_number_word1; /* 18 Board serial number word 1 */ ushort serial_number_word2; /* 19 Board serial number word 2 */ ushort serial_number_word3; /* 20 Board serial number word 3 */ ushort check_sum; /* 21 EEP check sum */ uchar oem_name[16]; /* 22 OEM name */ ushort dvc_err_code; /* 30 last device driver error code */ ushort adv_err_code; /* 31 last uc and Adv Lib error code */ ushort adv_err_addr; /* 32 last uc error address */ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ ushort saved_adv_err_addr; /* 35 saved last uc error address */ ushort reserved36; /* 36 reserved */ ushort reserved37; /* 37 reserved */ ushort reserved38; /* 38 reserved */ ushort reserved39; /* 39 reserved */ ushort reserved40; /* 40 reserved */ ushort reserved41; /* 41 reserved */ ushort reserved42; /* 42 reserved */ ushort reserved43; /* 43 reserved */ ushort reserved44; /* 44 reserved */ ushort reserved45; /* 45 reserved */ ushort reserved46; /* 46 reserved */ ushort reserved47; /* 47 reserved */ ushort reserved48; /* 48 reserved */ ushort reserved49; /* 49 reserved */ ushort reserved50; /* 50 reserved */ ushort reserved51; /* 51 reserved */ ushort reserved52; /* 52 reserved */ ushort reserved53; /* 53 reserved */ ushort reserved54; /* 54 reserved */ ushort reserved55; /* 55 reserved */ ushort cisptr_lsw; /* 56 CIS PTR LSW */ ushort cisprt_msw; /* 57 CIS PTR MSW */ ushort subsysvid; /* 58 SubSystem Vendor ID */ ushort subsysid; /* 59 SubSystem ID */ ushort reserved60; /* 60 reserved */ ushort reserved61; /* 61 reserved */ ushort reserved62; /* 62 reserved */ ushort reserved63; /* 63 reserved */ } ADVEEP_38C0800_CONFIG; typedef struct adveep_38C1600_config { /* Word Offset, Description */ ushort cfg_lsw; /* 00 power up initialization */ /* bit 11 set - Func. 0 INTB, Func. 1 INTA */ /* clear - Func. 0 INTA, Func. 1 INTB */ /* bit 13 set - Load CIS */ /* bit 14 set - BIOS Enable */ /* bit 15 set - Big Endian Mode */ ushort cfg_msw; /* 01 unused */ ushort disc_enable; /* 02 disconnect enable */ ushort wdtr_able; /* 03 Wide DTR able */ ushort sdtr_speed1; /* 04 SDTR Speed TID 0-3 */ ushort start_motor; /* 05 send start up motor */ ushort tagqng_able; /* 06 tag queuing able */ ushort bios_scan; /* 07 BIOS device control */ ushort scam_tolerant; /* 08 no scam */ uchar adapter_scsi_id; /* 09 Host Adapter ID */ uchar bios_boot_delay; /* power up wait */ uchar scsi_reset_delay; /* 10 reset delay */ uchar bios_id_lun; /* first boot device scsi id & lun */ /* high nibble is lun */ /* low nibble is scsi id */ uchar termination_se; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ uchar termination_lvd; /* 11 0 - automatic */ /* 1 - low off / high off */ /* 2 - low off / high on */ /* 3 - low on / high on */ /* There is no low on / high off */ ushort bios_ctrl; /* 12 BIOS control bits */ /* bit 0 BIOS don't act as initiator. */ /* bit 1 BIOS > 1 GB support */ /* bit 2 BIOS > 2 Disk Support */ /* bit 3 BIOS don't support removables */ /* bit 4 BIOS support bootable CD */ /* bit 5 BIOS scan enabled */ /* bit 6 BIOS support multiple LUNs */ /* bit 7 BIOS display of message */ /* bit 8 SCAM disabled */ /* bit 9 Reset SCSI bus during init. */ /* bit 10 Basic Integrity Checking disabled */ /* bit 11 No verbose initialization. */ /* bit 12 SCSI parity enabled */ /* bit 13 AIPP (Asyn. Info. Ph. Prot.) dis. */ /* bit 14 */ /* bit 15 */ ushort sdtr_speed2; /* 13 SDTR speed TID 4-7 */ ushort sdtr_speed3; /* 14 SDTR speed TID 8-11 */ uchar max_host_qng; /* 15 maximum host queueing */ uchar max_dvc_qng; /* maximum per device queuing */ ushort dvc_cntl; /* 16 control bit for driver */ ushort sdtr_speed4; /* 17 SDTR speed 4 TID 12-15 */ ushort serial_number_word1; /* 18 Board serial number word 1 */ ushort serial_number_word2; /* 19 Board serial number word 2 */ ushort serial_number_word3; /* 20 Board serial number word 3 */ ushort check_sum; /* 21 EEP check sum */ uchar oem_name[16]; /* 22 OEM name */ ushort dvc_err_code; /* 30 last device driver error code */ ushort adv_err_code; /* 31 last uc and Adv Lib error code */ ushort adv_err_addr; /* 32 last uc error address */ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */ ushort saved_adv_err_addr; /* 35 saved last uc error address */ ushort reserved36; /* 36 reserved */ ushort reserved37; /* 37 reserved */ ushort reserved38; /* 38 reserved */ ushort reserved39; /* 39 reserved */ ushort reserved40; /* 40 reserved */ ushort reserved41; /* 41 reserved */ ushort reserved42; /* 42 reserved */ ushort reserved43; /* 43 reserved */ ushort reserved44; /* 44 reserved */ ushort reserved45; /* 45 reserved */ ushort reserved46; /* 46 reserved */ ushort reserved47; /* 47 reserved */ ushort reserved48; /* 48 reserved */ ushort reserved49; /* 49 reserved */ ushort reserved50; /* 50 reserved */ ushort reserved51; /* 51 reserved */ ushort reserved52; /* 52 reserved */ ushort reserved53; /* 53 reserved */ ushort reserved54; /* 54 reserved */ ushort reserved55; /* 55 reserved */ ushort cisptr_lsw; /* 56 CIS PTR LSW */ ushort cisprt_msw; /* 57 CIS PTR MSW */ ushort subsysvid; /* 58 SubSystem Vendor ID */ ushort subsysid; /* 59 SubSystem ID */ ushort reserved60; /* 60 reserved */ ushort reserved61; /* 61 reserved */ ushort reserved62; /* 62 reserved */ ushort reserved63; /* 63 reserved */ } ADVEEP_38C1600_CONFIG; /* * EEPROM Commands */ #define ASC_EEP_CMD_DONE 0x0200 /* bios_ctrl */ #define BIOS_CTRL_BIOS 0x0001 #define BIOS_CTRL_EXTENDED_XLAT 0x0002 #define BIOS_CTRL_GT_2_DISK 0x0004 #define BIOS_CTRL_BIOS_REMOVABLE 0x0008 #define BIOS_CTRL_BOOTABLE_CD 0x0010 #define BIOS_CTRL_MULTIPLE_LUN 0x0040 #define BIOS_CTRL_DISPLAY_MSG 0x0080 #define BIOS_CTRL_NO_SCAM 0x0100 #define BIOS_CTRL_RESET_SCSI_BUS 0x0200 #define BIOS_CTRL_INIT_VERBOSE 0x0800 #define BIOS_CTRL_SCSI_PARITY 0x1000 #define BIOS_CTRL_AIPP_DIS 0x2000 #define ADV_3550_MEMSIZE 0x2000 /* 8 KB Internal Memory */ #define ADV_38C0800_MEMSIZE 0x4000 /* 16 KB Internal Memory */ /* * XXX - Since ASC38C1600 Rev.3 has a local RAM failure issue, there is * a special 16K Adv Library and Microcode version. After the issue is * resolved, should restore 32K support. * * #define ADV_38C1600_MEMSIZE 0x8000L * 32 KB Internal Memory * */ #define ADV_38C1600_MEMSIZE 0x4000 /* 16 KB Internal Memory */ /* * Byte I/O register address from base of 'iop_base'. */ #define IOPB_INTR_STATUS_REG 0x00 #define IOPB_CHIP_ID_1 0x01 #define IOPB_INTR_ENABLES 0x02 #define IOPB_CHIP_TYPE_REV 0x03 #define IOPB_RES_ADDR_4 0x04 #define IOPB_RES_ADDR_5 0x05 #define IOPB_RAM_DATA 0x06 #define IOPB_RES_ADDR_7 0x07 #define IOPB_FLAG_REG 0x08 #define IOPB_RES_ADDR_9 0x09 #define IOPB_RISC_CSR 0x0A #define IOPB_RES_ADDR_B 0x0B #define IOPB_RES_ADDR_C 0x0C #define IOPB_RES_ADDR_D 0x0D #define IOPB_SOFT_OVER_WR 0x0E #define IOPB_RES_ADDR_F 0x0F #define IOPB_MEM_CFG 0x10 #define IOPB_RES_ADDR_11 0x11 #define IOPB_GPIO_DATA 0x12 #define IOPB_RES_ADDR_13 0x13 #define IOPB_FLASH_PAGE 0x14 #define IOPB_RES_ADDR_15 0x15 #define IOPB_GPIO_CNTL 0x16 #define IOPB_RES_ADDR_17 0x17 #define IOPB_FLASH_DATA 0x18 #define IOPB_RES_ADDR_19 0x19 #define IOPB_RES_ADDR_1A 0x1A #define IOPB_RES_ADDR_1B 0x1B #define IOPB_RES_ADDR_1C 0x1C #define IOPB_RES_ADDR_1D 0x1D #define IOPB_RES_ADDR_1E 0x1E #define IOPB_RES_ADDR_1F 0x1F #define IOPB_DMA_CFG0 0x20 #define IOPB_DMA_CFG1 0x21 #define IOPB_TICKLE 0x22 #define IOPB_DMA_REG_WR 0x23 #define IOPB_SDMA_STATUS 0x24 #define IOPB_SCSI_BYTE_CNT 0x25 #define IOPB_HOST_BYTE_CNT 0x26 #define IOPB_BYTE_LEFT_TO_XFER 0x27 #define IOPB_BYTE_TO_XFER_0 0x28 #define IOPB_BYTE_TO_XFER_1 0x29 #define IOPB_BYTE_TO_XFER_2 0x2A #define IOPB_BYTE_TO_XFER_3 0x2B #define IOPB_ACC_GRP 0x2C #define IOPB_RES_ADDR_2D 0x2D #define IOPB_DEV_ID 0x2E #define IOPB_RES_ADDR_2F 0x2F #define IOPB_SCSI_DATA 0x30 #define IOPB_RES_ADDR_31 0x31 #define IOPB_RES_ADDR_32 0x32 #define IOPB_SCSI_DATA_HSHK 0x33 #define IOPB_SCSI_CTRL 0x34 #define IOPB_RES_ADDR_35 0x35 #define IOPB_RES_ADDR_36 0x36 #define IOPB_RES_ADDR_37 0x37 #define IOPB_RAM_BIST 0x38 #define IOPB_PLL_TEST 0x39 #define IOPB_PCI_INT_CFG 0x3A #define IOPB_RES_ADDR_3B 0x3B #define IOPB_RFIFO_CNT 0x3C #define IOPB_RES_ADDR_3D 0x3D #define IOPB_RES_ADDR_3E 0x3E #define IOPB_RES_ADDR_3F 0x3F /* * Word I/O register address from base of 'iop_base'. */ #define IOPW_CHIP_ID_0 0x00 /* CID0 */ #define IOPW_CTRL_REG 0x02 /* CC */ #define IOPW_RAM_ADDR 0x04 /* LA */ #define IOPW_RAM_DATA 0x06 /* LD */ #define IOPW_RES_ADDR_08 0x08 #define IOPW_RISC_CSR 0x0A /* CSR */ #define IOPW_SCSI_CFG0 0x0C /* CFG0 */ #define IOPW_SCSI_CFG1 0x0E /* CFG1 */ #define IOPW_RES_ADDR_10 0x10 #define IOPW_SEL_MASK 0x12 /* SM */ #define IOPW_RES_ADDR_14 0x14 #define IOPW_FLASH_ADDR 0x16 /* FA */ #define IOPW_RES_ADDR_18 0x18 #define IOPW_EE_CMD 0x1A /* EC */ #define IOPW_EE_DATA 0x1C /* ED */ #define IOPW_SFIFO_CNT 0x1E /* SFC */ #define IOPW_RES_ADDR_20 0x20 #define IOPW_Q_BASE 0x22 /* QB */ #define IOPW_QP 0x24 /* QP */ #define IOPW_IX 0x26 /* IX */ #define IOPW_SP 0x28 /* SP */ #define IOPW_PC 0x2A /* PC */ #define IOPW_RES_ADDR_2C 0x2C #define IOPW_RES_ADDR_2E 0x2E #define IOPW_SCSI_DATA 0x30 /* SD */ #define IOPW_SCSI_DATA_HSHK 0x32 /* SDH */ #define IOPW_SCSI_CTRL 0x34 /* SC */ #define IOPW_HSHK_CFG 0x36 /* HCFG */ #define IOPW_SXFR_STATUS 0x36 /* SXS */ #define IOPW_SXFR_CNTL 0x38 /* SXL */ #define IOPW_SXFR_CNTH 0x3A /* SXH */ #define IOPW_RES_ADDR_3C 0x3C #define IOPW_RFIFO_DATA 0x3E /* RFD */ /* * Doubleword I/O register address from base of 'iop_base'. */ #define IOPDW_RES_ADDR_0 0x00 #define IOPDW_RAM_DATA 0x04 #define IOPDW_RES_ADDR_8 0x08 #define IOPDW_RES_ADDR_C 0x0C #define IOPDW_RES_ADDR_10 0x10 #define IOPDW_COMMA 0x14 #define IOPDW_COMMB 0x18 #define IOPDW_RES_ADDR_1C 0x1C #define IOPDW_SDMA_ADDR0 0x20 #define IOPDW_SDMA_ADDR1 0x24 #define IOPDW_SDMA_COUNT 0x28 #define IOPDW_SDMA_ERROR 0x2C #define IOPDW_RDMA_ADDR0 0x30 #define IOPDW_RDMA_ADDR1 0x34 #define IOPDW_RDMA_COUNT 0x38 #define IOPDW_RDMA_ERROR 0x3C #define ADV_CHIP_ID_BYTE 0x25 #define ADV_CHIP_ID_WORD 0x04C1 #define ADV_INTR_ENABLE_HOST_INTR 0x01 #define ADV_INTR_ENABLE_SEL_INTR 0x02 #define ADV_INTR_ENABLE_DPR_INTR 0x04 #define ADV_INTR_ENABLE_RTA_INTR 0x08 #define ADV_INTR_ENABLE_RMA_INTR 0x10 #define ADV_INTR_ENABLE_RST_INTR 0x20 #define ADV_INTR_ENABLE_DPE_INTR 0x40 #define ADV_INTR_ENABLE_GLOBAL_INTR 0x80 #define ADV_INTR_STATUS_INTRA 0x01 #define ADV_INTR_STATUS_INTRB 0x02 #define ADV_INTR_STATUS_INTRC 0x04 #define ADV_RISC_CSR_STOP (0x0000) #define ADV_RISC_TEST_COND (0x2000) #define ADV_RISC_CSR_RUN (0x4000) #define ADV_RISC_CSR_SINGLE_STEP (0x8000) #define ADV_CTRL_REG_HOST_INTR 0x0100 #define ADV_CTRL_REG_SEL_INTR 0x0200 #define ADV_CTRL_REG_DPR_INTR 0x0400 #define ADV_CTRL_REG_RTA_INTR 0x0800 #define ADV_CTRL_REG_RMA_INTR 0x1000 #define ADV_CTRL_REG_RES_BIT14 0x2000 #define ADV_CTRL_REG_DPE_INTR 0x4000 #define ADV_CTRL_REG_POWER_DONE 0x8000 #define ADV_CTRL_REG_ANY_INTR 0xFF00 #define ADV_CTRL_REG_CMD_RESET 0x00C6 #define ADV_CTRL_REG_CMD_WR_IO_REG 0x00C5 #define ADV_CTRL_REG_CMD_RD_IO_REG 0x00C4 #define ADV_CTRL_REG_CMD_WR_PCI_CFG_SPACE 0x00C3 #define ADV_CTRL_REG_CMD_RD_PCI_CFG_SPACE 0x00C2 #define ADV_TICKLE_NOP 0x00 #define ADV_TICKLE_A 0x01 #define ADV_TICKLE_B 0x02 #define ADV_TICKLE_C 0x03 #define AdvIsIntPending(port) \ (AdvReadWordRegister(port, IOPW_CTRL_REG) & ADV_CTRL_REG_HOST_INTR) /* * SCSI_CFG0 Register bit definitions */ #define TIMER_MODEAB 0xC000 /* Watchdog, Second, and Select. Timer Ctrl. */ #define PARITY_EN 0x2000 /* Enable SCSI Parity Error detection */ #define EVEN_PARITY 0x1000 /* Select Even Parity */ #define WD_LONG 0x0800 /* Watchdog Interval, 1: 57 min, 0: 13 sec */ #define QUEUE_128 0x0400 /* Queue Size, 1: 128 byte, 0: 64 byte */ #define PRIM_MODE 0x0100 /* Primitive SCSI mode */ #define SCAM_EN 0x0080 /* Enable SCAM selection */ #define SEL_TMO_LONG 0x0040 /* Sel/Resel Timeout, 1: 400 ms, 0: 1.6 ms */ #define CFRM_ID 0x0020 /* SCAM id sel. confirm., 1: fast, 0: 6.4 ms */ #define OUR_ID_EN 0x0010 /* Enable OUR_ID bits */ #define OUR_ID 0x000F /* SCSI ID */ /* * SCSI_CFG1 Register bit definitions */ #define BIG_ENDIAN 0x8000 /* Enable Big Endian Mode MIO:15, EEP:15 */ #define TERM_POL 0x2000 /* Terminator Polarity Ctrl. MIO:13, EEP:13 */ #define SLEW_RATE 0x1000 /* SCSI output buffer slew rate */ #define FILTER_SEL 0x0C00 /* Filter Period Selection */ #define FLTR_DISABLE 0x0000 /* Input Filtering Disabled */ #define FLTR_11_TO_20NS 0x0800 /* Input Filtering 11ns to 20ns */ #define FLTR_21_TO_39NS 0x0C00 /* Input Filtering 21ns to 39ns */ #define ACTIVE_DBL 0x0200 /* Disable Active Negation */ #define DIFF_MODE 0x0100 /* SCSI differential Mode (Read-Only) */ #define DIFF_SENSE 0x0080 /* 1: No SE cables, 0: SE cable (Read-Only) */ #define TERM_CTL_SEL 0x0040 /* Enable TERM_CTL_H and TERM_CTL_L */ #define TERM_CTL 0x0030 /* External SCSI Termination Bits */ #define TERM_CTL_H 0x0020 /* Enable External SCSI Upper Termination */ #define TERM_CTL_L 0x0010 /* Enable External SCSI Lower Termination */ #define CABLE_DETECT 0x000F /* External SCSI Cable Connection Status */ /* * Addendum for ASC-38C0800 Chip * * The ASC-38C1600 Chip uses the same definitions except that the * bus mode override bits [12:10] have been moved to byte register * offset 0xE (IOPB_SOFT_OVER_WR) bits [12:10]. The [12:10] bits in * SCSI_CFG1 are read-only and always available. Bit 14 (DIS_TERM_DRV) * is not needed. The [12:10] bits in IOPB_SOFT_OVER_WR are write-only. * Also each ASC-38C1600 function or channel uses only cable bits [5:4] * and [1:0]. Bits [14], [7:6], [3:2] are unused. */ #define DIS_TERM_DRV 0x4000 /* 1: Read c_det[3:0], 0: cannot read */ #define HVD_LVD_SE 0x1C00 /* Device Detect Bits */ #define HVD 0x1000 /* HVD Device Detect */ #define LVD 0x0800 /* LVD Device Detect */ #define SE 0x0400 /* SE Device Detect */ #define TERM_LVD 0x00C0 /* LVD Termination Bits */ #define TERM_LVD_HI 0x0080 /* Enable LVD Upper Termination */ #define TERM_LVD_LO 0x0040 /* Enable LVD Lower Termination */ #define TERM_SE 0x0030 /* SE Termination Bits */ #define TERM_SE_HI 0x0020 /* Enable SE Upper Termination */ #define TERM_SE_LO 0x0010 /* Enable SE Lower Termination */ #define C_DET_LVD 0x000C /* LVD Cable Detect Bits */ #define C_DET3 0x0008 /* Cable Detect for LVD External Wide */ #define C_DET2 0x0004 /* Cable Detect for LVD Internal Wide */ #define C_DET_SE 0x0003 /* SE Cable Detect Bits */ #define C_DET1 0x0002 /* Cable Detect for SE Internal Wide */ #define C_DET0 0x0001 /* Cable Detect for SE Internal Narrow */ #define CABLE_ILLEGAL_A 0x7 /* x 0 0 0 | on on | Illegal (all 3 connectors are used) */ #define CABLE_ILLEGAL_B 0xB /* 0 x 0 0 | on on | Illegal (all 3 connectors are used) */ /* * MEM_CFG Register bit definitions */ #define BIOS_EN 0x40 /* BIOS Enable MIO:14,EEP:14 */ #define FAST_EE_CLK 0x20 /* Diagnostic Bit */ #define RAM_SZ 0x1C /* Specify size of RAM to RISC */ #define RAM_SZ_2KB 0x00 /* 2 KB */ #define RAM_SZ_4KB 0x04 /* 4 KB */ #define RAM_SZ_8KB 0x08 /* 8 KB */ #define RAM_SZ_16KB 0x0C /* 16 KB */ #define RAM_SZ_32KB 0x10 /* 32 KB */ #define RAM_SZ_64KB 0x14 /* 64 KB */ /* * DMA_CFG0 Register bit definitions * * This register is only accessible to the host. */ #define BC_THRESH_ENB 0x80 /* PCI DMA Start Conditions */ #define FIFO_THRESH 0x70 /* PCI DMA FIFO Threshold */ #define FIFO_THRESH_16B 0x00 /* 16 bytes */ #define FIFO_THRESH_32B 0x20 /* 32 bytes */ #define FIFO_THRESH_48B 0x30 /* 48 bytes */ #define FIFO_THRESH_64B 0x40 /* 64 bytes */ #define FIFO_THRESH_80B 0x50 /* 80 bytes (default) */ #define FIFO_THRESH_96B 0x60 /* 96 bytes */ #define FIFO_THRESH_112B 0x70 /* 112 bytes */ #define START_CTL 0x0C /* DMA start conditions */ #define START_CTL_TH 0x00 /* Wait threshold level (default) */ #define START_CTL_ID 0x04 /* Wait SDMA/SBUS idle */ #define START_CTL_THID 0x08 /* Wait threshold and SDMA/SBUS idle */ #define START_CTL_EMFU 0x0C /* Wait SDMA FIFO empty/full */ #define READ_CMD 0x03 /* Memory Read Method */ #define READ_CMD_MR 0x00 /* Memory Read */ #define READ_CMD_MRL 0x02 /* Memory Read Long */ #define READ_CMD_MRM 0x03 /* Memory Read Multiple (default) */ /* * ASC-38C0800 RAM BIST Register bit definitions */ #define RAM_TEST_MODE 0x80 #define PRE_TEST_MODE 0x40 #define NORMAL_MODE 0x00 #define RAM_TEST_DONE 0x10 #define RAM_TEST_STATUS 0x0F #define RAM_TEST_HOST_ERROR 0x08 #define RAM_TEST_INTRAM_ERROR 0x04 #define RAM_TEST_RISC_ERROR 0x02 #define RAM_TEST_SCSI_ERROR 0x01 #define RAM_TEST_SUCCESS 0x00 #define PRE_TEST_VALUE 0x05 #define NORMAL_VALUE 0x00 /* * ASC38C1600 Definitions * * IOPB_PCI_INT_CFG Bit Field Definitions */ #define INTAB_LD 0x80 /* Value loaded from EEPROM Bit 11. */ /* * Bit 1 can be set to change the interrupt for the Function to operate in * Totem Pole mode. By default Bit 1 is 0 and the interrupt operates in * Open Drain mode. Both functions of the ASC38C1600 must be set to the same * mode, otherwise the operating mode is undefined. */ #define TOTEMPOLE 0x02 /* * Bit 0 can be used to change the Int Pin for the Function. The value is * 0 by default for both Functions with Function 0 using INT A and Function * B using INT B. For Function 0 if set, INT B is used. For Function 1 if set, * INT A is used. * * EEPROM Word 0 Bit 11 for each Function may change the initial Int Pin * value specified in the PCI Configuration Space. */ #define INTAB 0x01 /* * Adv Library Status Definitions */ #define ADV_TRUE 1 #define ADV_FALSE 0 #define ADV_SUCCESS 1 #define ADV_BUSY 0 #define ADV_ERROR (-1) /* * ADV_DVC_VAR 'warn_code' values */ #define ASC_WARN_BUSRESET_ERROR 0x0001 /* SCSI Bus Reset error */ #define ASC_WARN_EEPROM_CHKSUM 0x0002 /* EEP check sum error */ #define ASC_WARN_EEPROM_TERMINATION 0x0004 /* EEP termination bad field */ #define ASC_WARN_ERROR 0xFFFF /* ADV_ERROR return */ #define ADV_MAX_TID 15 /* max. target identifier */ #define ADV_MAX_LUN 7 /* max. logical unit number */ /* * Fixed locations of microcode operating variables. */ #define ASC_MC_CODE_BEGIN_ADDR 0x0028 /* microcode start address */ #define ASC_MC_CODE_END_ADDR 0x002A /* microcode end address */ #define ASC_MC_CODE_CHK_SUM 0x002C /* microcode code checksum */ #define ASC_MC_VERSION_DATE 0x0038 /* microcode version */ #define ASC_MC_VERSION_NUM 0x003A /* microcode number */ #define ASC_MC_BIOSMEM 0x0040 /* BIOS RISC Memory Start */ #define ASC_MC_BIOSLEN 0x0050 /* BIOS RISC Memory Length */ #define ASC_MC_BIOS_SIGNATURE 0x0058 /* BIOS Signature 0x55AA */ #define ASC_MC_BIOS_VERSION 0x005A /* BIOS Version (2 bytes) */ #define ASC_MC_SDTR_SPEED1 0x0090 /* SDTR Speed for TID 0-3 */ #define ASC_MC_SDTR_SPEED2 0x0092 /* SDTR Speed for TID 4-7 */ #define ASC_MC_SDTR_SPEED3 0x0094 /* SDTR Speed for TID 8-11 */ #define ASC_MC_SDTR_SPEED4 0x0096 /* SDTR Speed for TID 12-15 */ #define ASC_MC_CHIP_TYPE 0x009A #define ASC_MC_INTRB_CODE 0x009B #define ASC_MC_WDTR_ABLE 0x009C #define ASC_MC_SDTR_ABLE 0x009E #define ASC_MC_TAGQNG_ABLE 0x00A0 #define ASC_MC_DISC_ENABLE 0x00A2 #define ASC_MC_IDLE_CMD_STATUS 0x00A4 #define ASC_MC_IDLE_CMD 0x00A6 #define ASC_MC_IDLE_CMD_PARAMETER 0x00A8 #define ASC_MC_DEFAULT_SCSI_CFG0 0x00AC #define ASC_MC_DEFAULT_SCSI_CFG1 0x00AE #define ASC_MC_DEFAULT_MEM_CFG 0x00B0 #define ASC_MC_DEFAULT_SEL_MASK 0x00B2 #define ASC_MC_SDTR_DONE 0x00B6 #define ASC_MC_NUMBER_OF_QUEUED_CMD 0x00C0 #define ASC_MC_NUMBER_OF_MAX_CMD 0x00D0 #define ASC_MC_DEVICE_HSHK_CFG_TABLE 0x0100 #define ASC_MC_CONTROL_FLAG 0x0122 /* Microcode control flag. */ #define ASC_MC_WDTR_DONE 0x0124 #define ASC_MC_CAM_MODE_MASK 0x015E /* CAM mode TID bitmask. */ #define ASC_MC_ICQ 0x0160 #define ASC_MC_IRQ 0x0164 #define ASC_MC_PPR_ABLE 0x017A /* * BIOS LRAM variable absolute offsets. */ #define BIOS_CODESEG 0x54 #define BIOS_CODELEN 0x56 #define BIOS_SIGNATURE 0x58 #define BIOS_VERSION 0x5A /* * Microcode Control Flags * * Flags set by the Adv Library in RISC variable 'control_flag' (0x122) * and handled by the microcode. */ #define CONTROL_FLAG_IGNORE_PERR 0x0001 /* Ignore DMA Parity Errors */ #define CONTROL_FLAG_ENABLE_AIPP 0x0002 /* Enabled AIPP checking. */ /* * ASC_MC_DEVICE_HSHK_CFG_TABLE microcode table or HSHK_CFG register format */ #define HSHK_CFG_WIDE_XFR 0x8000 #define HSHK_CFG_RATE 0x0F00 #define HSHK_CFG_OFFSET 0x001F #define ASC_DEF_MAX_HOST_QNG 0xFD /* Max. number of host commands (253) */ #define ASC_DEF_MIN_HOST_QNG 0x10 /* Min. number of host commands (16) */ #define ASC_DEF_MAX_DVC_QNG 0x3F /* Max. number commands per device (63) */ #define ASC_DEF_MIN_DVC_QNG 0x04 /* Min. number commands per device (4) */ #define ASC_QC_DATA_CHECK 0x01 /* Require ASC_QC_DATA_OUT set or clear. */ #define ASC_QC_DATA_OUT 0x02 /* Data out DMA transfer. */ #define ASC_QC_START_MOTOR 0x04 /* Send auto-start motor before request. */ #define ASC_QC_NO_OVERRUN 0x08 /* Don't report overrun. */ #define ASC_QC_FREEZE_TIDQ 0x10 /* Freeze TID queue after request. XXX TBD */ #define ASC_QSC_NO_DISC 0x01 /* Don't allow disconnect for request. */ #define ASC_QSC_NO_TAGMSG 0x02 /* Don't allow tag queuing for request. */ #define ASC_QSC_NO_SYNC 0x04 /* Don't use Synch. transfer on request. */ #define ASC_QSC_NO_WIDE 0x08 /* Don't use Wide transfer on request. */ #define ASC_QSC_REDO_DTR 0x10 /* Renegotiate WDTR/SDTR before request. */ /* * Note: If a Tag Message is to be sent and neither ASC_QSC_HEAD_TAG or * ASC_QSC_ORDERED_TAG is set, then a Simple Tag Message (0x20) is used. */ #define ASC_QSC_HEAD_TAG 0x40 /* Use Head Tag Message (0x21). */ #define ASC_QSC_ORDERED_TAG 0x80 /* Use Ordered Tag Message (0x22). */ /* * All fields here are accessed by the board microcode and need to be * little-endian. */ typedef struct adv_carr_t { ADV_VADDR carr_va; /* Carrier Virtual Address */ ADV_PADDR carr_pa; /* Carrier Physical Address */ ADV_VADDR areq_vpa; /* ASC_SCSI_REQ_Q Virtual or Physical Address */ /* * next_vpa [31:4] Carrier Virtual or Physical Next Pointer * * next_vpa [3:1] Reserved Bits * next_vpa [0] Done Flag set in Response Queue. */ ADV_VADDR next_vpa; } ADV_CARR_T; /* * Mask used to eliminate low 4 bits of carrier 'next_vpa' field. */ #define ASC_NEXT_VPA_MASK 0xFFFFFFF0 #define ASC_RQ_DONE 0x00000001 #define ASC_RQ_GOOD 0x00000002 #define ASC_CQ_STOPPER 0x00000000 #define ASC_GET_CARRP(carrp) ((carrp) & ASC_NEXT_VPA_MASK) #define ADV_CARRIER_NUM_PAGE_CROSSING \ (((ADV_CARRIER_COUNT * sizeof(ADV_CARR_T)) + (PAGE_SIZE - 1))/PAGE_SIZE) #define ADV_CARRIER_BUFSIZE \ ((ADV_CARRIER_COUNT + ADV_CARRIER_NUM_PAGE_CROSSING) * sizeof(ADV_CARR_T)) /* * ASC_SCSI_REQ_Q 'a_flag' definitions * * The Adv Library should limit use to the lower nibble (4 bits) of * a_flag. Drivers are free to use the upper nibble (4 bits) of a_flag. */ #define ADV_POLL_REQUEST 0x01 /* poll for request completion */ #define ADV_SCSIQ_DONE 0x02 /* request done */ #define ADV_DONT_RETRY 0x08 /* don't do retry */ #define ADV_CHIP_ASC3550 0x01 /* Ultra-Wide IC */ #define ADV_CHIP_ASC38C0800 0x02 /* Ultra2-Wide/LVD IC */ #define ADV_CHIP_ASC38C1600 0x03 /* Ultra3-Wide/LVD2 IC */ /* * Adapter temporary configuration structure * * This structure can be discarded after initialization. Don't add * fields here needed after initialization. * * Field naming convention: * * *_enable indicates the field enables or disables a feature. The * value of the field is never reset. */ typedef struct adv_dvc_cfg { ushort disc_enable; /* enable disconnection */ uchar chip_version; /* chip version */ uchar termination; /* Term. Ctrl. bits 6-5 of SCSI_CFG1 register */ ushort control_flag; /* Microcode Control Flag */ ushort mcode_date; /* Microcode date */ ushort mcode_version; /* Microcode version */ ushort serial1; /* EEPROM serial number word 1 */ ushort serial2; /* EEPROM serial number word 2 */ ushort serial3; /* EEPROM serial number word 3 */ } ADV_DVC_CFG; struct adv_dvc_var; struct adv_scsi_req_q; typedef struct asc_sg_block { uchar reserved1; uchar reserved2; uchar reserved3; uchar sg_cnt; /* Valid entries in block. */ ADV_PADDR sg_ptr; /* Pointer to next sg block. */ struct { ADV_PADDR sg_addr; /* SG element address. */ ADV_DCNT sg_count; /* SG element count. */ } sg_list[NO_OF_SG_PER_BLOCK]; } ADV_SG_BLOCK; /* * ADV_SCSI_REQ_Q - microcode request structure * * All fields in this structure up to byte 60 are used by the microcode. * The microcode makes assumptions about the size and ordering of fields * in this structure. Do not change the structure definition here without * coordinating the change with the microcode. * * All fields accessed by microcode must be maintained in little_endian * order. */ typedef struct adv_scsi_req_q { uchar cntl; /* Ucode flags and state (ASC_MC_QC_*). */ uchar target_cmd; uchar target_id; /* Device target identifier. */ uchar target_lun; /* Device target logical unit number. */ ADV_PADDR data_addr; /* Data buffer physical address. */ ADV_DCNT data_cnt; /* Data count. Ucode sets to residual. */ ADV_PADDR sense_addr; ADV_PADDR carr_pa; uchar mflag; uchar sense_len; uchar cdb_len; /* SCSI CDB length. Must <= 16 bytes. */ uchar scsi_cntl; uchar done_status; /* Completion status. */ uchar scsi_status; /* SCSI status byte. */ uchar host_status; /* Ucode host status. */ uchar sg_working_ix; uchar cdb[12]; /* SCSI CDB bytes 0-11. */ ADV_PADDR sg_real_addr; /* SG list physical address. */ ADV_PADDR scsiq_rptr; uchar cdb16[4]; /* SCSI CDB bytes 12-15. */ ADV_VADDR scsiq_ptr; ADV_VADDR carr_va; /* * End of microcode structure - 60 bytes. The rest of the structure * is used by the Adv Library and ignored by the microcode. */ ADV_VADDR srb_ptr; ADV_SG_BLOCK *sg_list_ptr; /* SG list virtual address. */ char *vdata_addr; /* Data buffer virtual address. */ uchar a_flag; uchar pad[2]; /* Pad out to a word boundary. */ } ADV_SCSI_REQ_Q; /* * The following two structures are used to process Wide Board requests. * * The ADV_SCSI_REQ_Q structure in adv_req_t is passed to the Adv Library * and microcode with the ADV_SCSI_REQ_Q field 'srb_ptr' pointing to the * adv_req_t. The adv_req_t structure 'cmndp' field in turn points to the * Mid-Level SCSI request structure. * * Zero or more ADV_SG_BLOCK are used with each ADV_SCSI_REQ_Q. Each * ADV_SG_BLOCK structure holds 15 scatter-gather elements. Under Linux * up to 255 scatter-gather elements may be used per request or * ADV_SCSI_REQ_Q. * * Both structures must be 32 byte aligned. */ typedef struct adv_sgblk { ADV_SG_BLOCK sg_block; /* Sgblock structure. */ uchar align[32]; /* Sgblock structure padding. */ struct adv_sgblk *next_sgblkp; /* Next scatter-gather structure. */ } adv_sgblk_t; typedef struct adv_req { ADV_SCSI_REQ_Q scsi_req_q; /* Adv Library request structure. */ uchar align[32]; /* Request structure padding. */ struct scsi_cmnd *cmndp; /* Mid-Level SCSI command pointer. */ adv_sgblk_t *sgblkp; /* Adv Library scatter-gather pointer. */ struct adv_req *next_reqp; /* Next Request Structure. */ } adv_req_t; /* * Adapter operation variable structure. * * One structure is required per host adapter. * * Field naming convention: * * *_able indicates both whether a feature should be enabled or disabled * and whether a device isi capable of the feature. At initialization * this field may be set, but later if a device is found to be incapable * of the feature, the field is cleared. */ typedef struct adv_dvc_var { AdvPortAddr iop_base; /* I/O port address */ ushort err_code; /* fatal error code */ ushort bios_ctrl; /* BIOS control word, EEPROM word 12 */ ushort wdtr_able; /* try WDTR for a device */ ushort sdtr_able; /* try SDTR for a device */ ushort ultra_able; /* try SDTR Ultra speed for a device */ ushort sdtr_speed1; /* EEPROM SDTR Speed for TID 0-3 */ ushort sdtr_speed2; /* EEPROM SDTR Speed for TID 4-7 */ ushort sdtr_speed3; /* EEPROM SDTR Speed for TID 8-11 */ ushort sdtr_speed4; /* EEPROM SDTR Speed for TID 12-15 */ ushort tagqng_able; /* try tagged queuing with a device */ ushort ppr_able; /* PPR message capable per TID bitmask. */ uchar max_dvc_qng; /* maximum number of tagged commands per device */ ushort start_motor; /* start motor command allowed */ uchar scsi_reset_wait; /* delay in seconds after scsi bus reset */ uchar chip_no; /* should be assigned by caller */ uchar max_host_qng; /* maximum number of Q'ed command allowed */ ushort no_scam; /* scam_tolerant of EEPROM */ struct asc_board *drv_ptr; /* driver pointer to private structure */ uchar chip_scsi_id; /* chip SCSI target ID */ uchar chip_type; uchar bist_err_code; ADV_CARR_T *carrier_buf; ADV_CARR_T *carr_freelist; /* Carrier free list. */ ADV_CARR_T *icq_sp; /* Initiator command queue stopper pointer. */ ADV_CARR_T *irq_sp; /* Initiator response queue stopper pointer. */ ushort carr_pending_cnt; /* Count of pending carriers. */ struct adv_req *orig_reqp; /* adv_req_t memory block. */ /* * Note: The following fields will not be used after initialization. The * driver may discard the buffer after initialization is done. */ ADV_DVC_CFG *cfg; /* temporary configuration structure */ } ADV_DVC_VAR; /* * Microcode idle loop commands */ #define IDLE_CMD_COMPLETED 0 #define IDLE_CMD_STOP_CHIP 0x0001 #define IDLE_CMD_STOP_CHIP_SEND_INT 0x0002 #define IDLE_CMD_SEND_INT 0x0004 #define IDLE_CMD_ABORT 0x0008 #define IDLE_CMD_DEVICE_RESET 0x0010 #define IDLE_CMD_SCSI_RESET_START 0x0020 /* Assert SCSI Bus Reset */ #define IDLE_CMD_SCSI_RESET_END 0x0040 /* Deassert SCSI Bus Reset */ #define IDLE_CMD_SCSIREQ 0x0080 #define IDLE_CMD_STATUS_SUCCESS 0x0001 #define IDLE_CMD_STATUS_FAILURE 0x0002 /* * AdvSendIdleCmd() flag definitions. */ #define ADV_NOWAIT 0x01 /* * Wait loop time out values. */ #define SCSI_WAIT_100_MSEC 100UL /* 100 milliseconds */ #define SCSI_US_PER_MSEC 1000 /* microseconds per millisecond */ #define SCSI_MAX_RETRY 10 /* retry count */ #define ADV_ASYNC_RDMA_FAILURE 0x01 /* Fatal RDMA failure. */ #define ADV_ASYNC_SCSI_BUS_RESET_DET 0x02 /* Detected SCSI Bus Reset. */ #define ADV_ASYNC_CARRIER_READY_FAILURE 0x03 /* Carrier Ready failure. */ #define ADV_RDMA_IN_CARR_AND_Q_INVALID 0x04 /* RDMAed-in data invalid. */ #define ADV_HOST_SCSI_BUS_RESET 0x80 /* Host Initiated SCSI Bus Reset. */ /* Read byte from a register. */ #define AdvReadByteRegister(iop_base, reg_off) \ (ADV_MEM_READB((iop_base) + (reg_off))) /* Write byte to a register. */ #define AdvWriteByteRegister(iop_base, reg_off, byte) \ (ADV_MEM_WRITEB((iop_base) + (reg_off), (byte))) /* Read word (2 bytes) from a register. */ #define AdvReadWordRegister(iop_base, reg_off) \ (ADV_MEM_READW((iop_base) + (reg_off))) /* Write word (2 bytes) to a register. */ #define AdvWriteWordRegister(iop_base, reg_off, word) \ (ADV_MEM_WRITEW((iop_base) + (reg_off), (word))) /* Write dword (4 bytes) to a register. */ #define AdvWriteDWordRegister(iop_base, reg_off, dword) \ (ADV_MEM_WRITEDW((iop_base) + (reg_off), (dword))) /* Read byte from LRAM. */ #define AdvReadByteLram(iop_base, addr, byte) \ do { \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \ (byte) = ADV_MEM_READB((iop_base) + IOPB_RAM_DATA); \ } while (0) /* Write byte to LRAM. */ #define AdvWriteByteLram(iop_base, addr, byte) \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ ADV_MEM_WRITEB((iop_base) + IOPB_RAM_DATA, (byte))) /* Read word (2 bytes) from LRAM. */ #define AdvReadWordLram(iop_base, addr, word) \ do { \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \ (word) = (ADV_MEM_READW((iop_base) + IOPW_RAM_DATA)); \ } while (0) /* Write word (2 bytes) to LRAM. */ #define AdvWriteWordLram(iop_base, addr, word) \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word))) /* Write little-endian double word (4 bytes) to LRAM */ /* Because of unspecified C language ordering don't use auto-increment. */ #define AdvWriteDWordLramNoSwap(iop_base, addr, dword) \ ((ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \ cpu_to_le16((ushort) ((dword) & 0xFFFF)))), \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr) + 2), \ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \ cpu_to_le16((ushort) ((dword >> 16) & 0xFFFF))))) /* Read word (2 bytes) from LRAM assuming that the address is already set. */ #define AdvReadWordAutoIncLram(iop_base) \ (ADV_MEM_READW((iop_base) + IOPW_RAM_DATA)) /* Write word (2 bytes) to LRAM assuming that the address is already set. */ #define AdvWriteWordAutoIncLram(iop_base, word) \ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word))) /* * Define macro to check for Condor signature. * * Evaluate to ADV_TRUE if a Condor chip is found the specified port * address 'iop_base'. Otherwise evalue to ADV_FALSE. */ #define AdvFindSignature(iop_base) \ (((AdvReadByteRegister((iop_base), IOPB_CHIP_ID_1) == \ ADV_CHIP_ID_BYTE) && \ (AdvReadWordRegister((iop_base), IOPW_CHIP_ID_0) == \ ADV_CHIP_ID_WORD)) ? ADV_TRUE : ADV_FALSE) /* * Define macro to Return the version number of the chip at 'iop_base'. * * The second parameter 'bus_type' is currently unused. */ #define AdvGetChipVersion(iop_base, bus_type) \ AdvReadByteRegister((iop_base), IOPB_CHIP_TYPE_REV) /* * Abort an SRB in the chip's RISC Memory. The 'srb_ptr' argument must * match the ASC_SCSI_REQ_Q 'srb_ptr' field. * * If the request has not yet been sent to the device it will simply be * aborted from RISC memory. If the request is disconnected it will be * aborted on reselection by sending an Abort Message to the target ID. * * Return value: * ADV_TRUE(1) - Queue was successfully aborted. * ADV_FALSE(0) - Queue was not found on the active queue list. */ #define AdvAbortQueue(asc_dvc, scsiq) \ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_ABORT, \ (ADV_DCNT) (scsiq)) /* * Send a Bus Device Reset Message to the specified target ID. * * All outstanding commands will be purged if sending the * Bus Device Reset Message is successful. * * Return Value: * ADV_TRUE(1) - All requests on the target are purged. * ADV_FALSE(0) - Couldn't issue Bus Device Reset Message; Requests * are not purged. */ #define AdvResetDevice(asc_dvc, target_id) \ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_DEVICE_RESET, \ (ADV_DCNT) (target_id)) /* * SCSI Wide Type definition. */ #define ADV_SCSI_BIT_ID_TYPE ushort /* * AdvInitScsiTarget() 'cntl_flag' options. */ #define ADV_SCAN_LUN 0x01 #define ADV_CAPINFO_NOLUN 0x02 /* * Convert target id to target id bit mask. */ #define ADV_TID_TO_TIDMASK(tid) (0x01 << ((tid) & ADV_MAX_TID)) /* * ASC_SCSI_REQ_Q 'done_status' and 'host_status' return values. */ #define QD_NO_STATUS 0x00 /* Request not completed yet. */ #define QD_NO_ERROR 0x01 #define QD_ABORTED_BY_HOST 0x02 #define QD_WITH_ERROR 0x04 #define QHSTA_NO_ERROR 0x00 #define QHSTA_M_SEL_TIMEOUT 0x11 #define QHSTA_M_DATA_OVER_RUN 0x12 #define QHSTA_M_UNEXPECTED_BUS_FREE 0x13 #define QHSTA_M_QUEUE_ABORTED 0x15 #define QHSTA_M_SXFR_SDMA_ERR 0x16 /* SXFR_STATUS SCSI DMA Error */ #define QHSTA_M_SXFR_SXFR_PERR 0x17 /* SXFR_STATUS SCSI Bus Parity Error */ #define QHSTA_M_RDMA_PERR 0x18 /* RISC PCI DMA parity error */ #define QHSTA_M_SXFR_OFF_UFLW 0x19 /* SXFR_STATUS Offset Underflow */ #define QHSTA_M_SXFR_OFF_OFLW 0x20 /* SXFR_STATUS Offset Overflow */ #define QHSTA_M_SXFR_WD_TMO 0x21 /* SXFR_STATUS Watchdog Timeout */ #define QHSTA_M_SXFR_DESELECTED 0x22 /* SXFR_STATUS Deselected */ /* Note: QHSTA_M_SXFR_XFR_OFLW is identical to QHSTA_M_DATA_OVER_RUN. */ #define QHSTA_M_SXFR_XFR_OFLW 0x12 /* SXFR_STATUS Transfer Overflow */ #define QHSTA_M_SXFR_XFR_PH_ERR 0x24 /* SXFR_STATUS Transfer Phase Error */ #define QHSTA_M_SXFR_UNKNOWN_ERROR 0x25 /* SXFR_STATUS Unknown Error */ #define QHSTA_M_SCSI_BUS_RESET 0x30 /* Request aborted from SBR */ #define QHSTA_M_SCSI_BUS_RESET_UNSOL 0x31 /* Request aborted from unsol. SBR */ #define QHSTA_M_BUS_DEVICE_RESET 0x32 /* Request aborted from BDR */ #define QHSTA_M_DIRECTION_ERR 0x35 /* Data Phase mismatch */ #define QHSTA_M_DIRECTION_ERR_HUNG 0x36 /* Data Phase mismatch and bus hang */ #define QHSTA_M_WTM_TIMEOUT 0x41 #define QHSTA_M_BAD_CMPL_STATUS_IN 0x42 #define QHSTA_M_NO_AUTO_REQ_SENSE 0x43 #define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44 #define QHSTA_M_INVALID_DEVICE 0x45 /* Bad target ID */ #define QHSTA_M_FROZEN_TIDQ 0x46 /* TID Queue frozen. */ #define QHSTA_M_SGBACKUP_ERROR 0x47 /* Scatter-Gather backup error */ /* Return the address that is aligned at the next doubleword >= to 'addr'. */ #define ADV_8BALIGN(addr) (((ulong) (addr) + 0x7) & ~0x7) #define ADV_16BALIGN(addr) (((ulong) (addr) + 0xF) & ~0xF) #define ADV_32BALIGN(addr) (((ulong) (addr) + 0x1F) & ~0x1F) /* * Total contiguous memory needed for driver SG blocks. * * ADV_MAX_SG_LIST must be defined by a driver. It is the maximum * number of scatter-gather elements the driver supports in a * single request. */ #define ADV_SG_LIST_MAX_BYTE_SIZE \ (sizeof(ADV_SG_BLOCK) * \ ((ADV_MAX_SG_LIST + (NO_OF_SG_PER_BLOCK - 1))/NO_OF_SG_PER_BLOCK)) /* struct asc_board flags */ #define ASC_IS_WIDE_BOARD 0x04 /* AdvanSys Wide Board */ #define ASC_NARROW_BOARD(boardp) (((boardp)->flags & ASC_IS_WIDE_BOARD) == 0) #define NO_ISA_DMA 0xff /* No ISA DMA Channel Used */ #define ASC_INFO_SIZE 128 /* advansys_info() line size */ #ifdef CONFIG_PROC_FS /* /proc/scsi/advansys/[0...] related definitions */ #define ASC_PRTBUF_SIZE 2048 #define ASC_PRTLINE_SIZE 160 #define ASC_PRT_NEXT() \ if (cp) { \ totlen += len; \ leftlen -= len; \ if (leftlen == 0) { \ return totlen; \ } \ cp += len; \ } #endif /* CONFIG_PROC_FS */ /* Asc Library return codes */ #define ASC_TRUE 1 #define ASC_FALSE 0 #define ASC_NOERROR 1 #define ASC_BUSY 0 #define ASC_ERROR (-1) /* struct scsi_cmnd function return codes */ #define STATUS_BYTE(byte) (byte) #define MSG_BYTE(byte) ((byte) << 8) #define HOST_BYTE(byte) ((byte) << 16) #define DRIVER_BYTE(byte) ((byte) << 24) #define ASC_STATS(shost, counter) ASC_STATS_ADD(shost, counter, 1) #ifndef ADVANSYS_STATS #define ASC_STATS_ADD(shost, counter, count) #else /* ADVANSYS_STATS */ #define ASC_STATS_ADD(shost, counter, count) \ (((struct asc_board *) shost_priv(shost))->asc_stats.counter += (count)) #endif /* ADVANSYS_STATS */ /* If the result wraps when calculating tenths, return 0. */ #define ASC_TENTHS(num, den) \ (((10 * ((num)/(den))) > (((num) * 10)/(den))) ? \ 0 : ((((num) * 10)/(den)) - (10 * ((num)/(den))))) /* * Display a message to the console. */ #define ASC_PRINT(s) \ { \ printk("advansys: "); \ printk(s); \ } #define ASC_PRINT1(s, a1) \ { \ printk("advansys: "); \ printk((s), (a1)); \ } #define ASC_PRINT2(s, a1, a2) \ { \ printk("advansys: "); \ printk((s), (a1), (a2)); \ } #define ASC_PRINT3(s, a1, a2, a3) \ { \ printk("advansys: "); \ printk((s), (a1), (a2), (a3)); \ } #define ASC_PRINT4(s, a1, a2, a3, a4) \ { \ printk("advansys: "); \ printk((s), (a1), (a2), (a3), (a4)); \ } #ifndef ADVANSYS_DEBUG #define ASC_DBG(lvl, s...) #define ASC_DBG_PRT_SCSI_HOST(lvl, s) #define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp) #define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) #define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone) #define ADV_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) #define ASC_DBG_PRT_HEX(lvl, name, start, length) #define ASC_DBG_PRT_CDB(lvl, cdb, len) #define ASC_DBG_PRT_SENSE(lvl, sense, len) #define ASC_DBG_PRT_INQUIRY(lvl, inq, len) #else /* ADVANSYS_DEBUG */ /* * Debugging Message Levels: * 0: Errors Only * 1: High-Level Tracing * 2-N: Verbose Tracing */ #define ASC_DBG(lvl, format, arg...) { \ if (asc_dbglvl >= (lvl)) \ printk(KERN_DEBUG "%s: %s: " format, DRV_NAME, \ __func__ , ## arg); \ } #define ASC_DBG_PRT_SCSI_HOST(lvl, s) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_scsi_host(s); \ } \ } #define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_asc_scsi_q(scsiqp); \ } \ } #define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_asc_qdone_info(qdone); \ } \ } #define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_adv_scsi_req_q(scsiqp); \ } \ } #define ASC_DBG_PRT_HEX(lvl, name, start, length) \ { \ if (asc_dbglvl >= (lvl)) { \ asc_prt_hex((name), (start), (length)); \ } \ } #define ASC_DBG_PRT_CDB(lvl, cdb, len) \ ASC_DBG_PRT_HEX((lvl), "CDB", (uchar *) (cdb), (len)); #define ASC_DBG_PRT_SENSE(lvl, sense, len) \ ASC_DBG_PRT_HEX((lvl), "SENSE", (uchar *) (sense), (len)); #define ASC_DBG_PRT_INQUIRY(lvl, inq, len) \ ASC_DBG_PRT_HEX((lvl), "INQUIRY", (uchar *) (inq), (len)); #endif /* ADVANSYS_DEBUG */ #ifdef ADVANSYS_STATS /* Per board statistics structure */ struct asc_stats { /* Driver Entrypoint Statistics */ ADV_DCNT queuecommand; /* # calls to advansys_queuecommand() */ ADV_DCNT reset; /* # calls to advansys_eh_bus_reset() */ ADV_DCNT biosparam; /* # calls to advansys_biosparam() */ ADV_DCNT interrupt; /* # advansys_interrupt() calls */ ADV_DCNT callback; /* # calls to asc/adv_isr_callback() */ ADV_DCNT done; /* # calls to request's scsi_done function */ ADV_DCNT build_error; /* # asc/adv_build_req() ASC_ERROR returns. */ ADV_DCNT adv_build_noreq; /* # adv_build_req() adv_req_t alloc. fail. */ ADV_DCNT adv_build_nosg; /* # adv_build_req() adv_sgblk_t alloc. fail. */ /* AscExeScsiQueue()/AdvExeScsiQueue() Statistics */ ADV_DCNT exe_noerror; /* # ASC_NOERROR returns. */ ADV_DCNT exe_busy; /* # ASC_BUSY returns. */ ADV_DCNT exe_error; /* # ASC_ERROR returns. */ ADV_DCNT exe_unknown; /* # unknown returns. */ /* Data Transfer Statistics */ ADV_DCNT xfer_cnt; /* # I/O requests received */ ADV_DCNT xfer_elem; /* # scatter-gather elements */ ADV_DCNT xfer_sect; /* # 512-byte blocks */ }; #endif /* ADVANSYS_STATS */ /* * Structure allocated for each board. * * This structure is allocated by scsi_host_alloc() at the end * of the 'Scsi_Host' structure starting at the 'hostdata' * field. It is guaranteed to be allocated from DMA-able memory. */ struct asc_board { struct device *dev; uint flags; /* Board flags */ unsigned int irq; union { ASC_DVC_VAR asc_dvc_var; /* Narrow board */ ADV_DVC_VAR adv_dvc_var; /* Wide board */ } dvc_var; union { ASC_DVC_CFG asc_dvc_cfg; /* Narrow board */ ADV_DVC_CFG adv_dvc_cfg; /* Wide board */ } dvc_cfg; ushort asc_n_io_port; /* Number I/O ports. */ ADV_SCSI_BIT_ID_TYPE init_tidmask; /* Target init./valid mask */ ushort reqcnt[ADV_MAX_TID + 1]; /* Starvation request count */ ADV_SCSI_BIT_ID_TYPE queue_full; /* Queue full mask */ ushort queue_full_cnt[ADV_MAX_TID + 1]; /* Queue full count */ union { ASCEEP_CONFIG asc_eep; /* Narrow EEPROM config. */ ADVEEP_3550_CONFIG adv_3550_eep; /* 3550 EEPROM config. */ ADVEEP_38C0800_CONFIG adv_38C0800_eep; /* 38C0800 EEPROM config. */ ADVEEP_38C1600_CONFIG adv_38C1600_eep; /* 38C1600 EEPROM config. */ } eep_config; ulong last_reset; /* Saved last reset time */ /* /proc/scsi/advansys/[0...] */ char *prtbuf; /* /proc print buffer */ #ifdef ADVANSYS_STATS struct asc_stats asc_stats; /* Board statistics */ #endif /* ADVANSYS_STATS */ /* * The following fields are used only for Narrow Boards. */ uchar sdtr_data[ASC_MAX_TID + 1]; /* SDTR information */ /* * The following fields are used only for Wide Boards. */ void __iomem *ioremap_addr; /* I/O Memory remap address. */ ushort ioport; /* I/O Port address. */ adv_req_t *adv_reqp; /* Request structures. */ adv_sgblk_t *adv_sgblkp; /* Scatter-gather structures. */ ushort bios_signature; /* BIOS Signature. */ ushort bios_version; /* BIOS Version. */ ushort bios_codeseg; /* BIOS Code Segment. */ ushort bios_codelen; /* BIOS Code Segment Length. */ }; #define asc_dvc_to_board(asc_dvc) container_of(asc_dvc, struct asc_board, \ dvc_var.asc_dvc_var) #define adv_dvc_to_board(adv_dvc) container_of(adv_dvc, struct asc_board, \ dvc_var.adv_dvc_var) #define adv_dvc_to_pdev(adv_dvc) to_pci_dev(adv_dvc_to_board(adv_dvc)->dev) #ifdef ADVANSYS_DEBUG static int asc_dbglvl = 3; /* * asc_prt_asc_dvc_var() */ static void asc_prt_asc_dvc_var(ASC_DVC_VAR *h) { printk("ASC_DVC_VAR at addr 0x%lx\n", (ulong)h); printk(" iop_base 0x%x, err_code 0x%x, dvc_cntl 0x%x, bug_fix_cntl " "%d,\n", h->iop_base, h->err_code, h->dvc_cntl, h->bug_fix_cntl); printk(" bus_type %d, init_sdtr 0x%x,\n", h->bus_type, (unsigned)h->init_sdtr); printk(" sdtr_done 0x%x, use_tagged_qng 0x%x, unit_not_ready 0x%x, " "chip_no 0x%x,\n", (unsigned)h->sdtr_done, (unsigned)h->use_tagged_qng, (unsigned)h->unit_not_ready, (unsigned)h->chip_no); printk(" queue_full_or_busy 0x%x, start_motor 0x%x, scsi_reset_wait " "%u,\n", (unsigned)h->queue_full_or_busy, (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait); printk(" is_in_int %u, max_total_qng %u, cur_total_qng %u, " "in_critical_cnt %u,\n", (unsigned)h->is_in_int, (unsigned)h->max_total_qng, (unsigned)h->cur_total_qng, (unsigned)h->in_critical_cnt); printk(" last_q_shortage %u, init_state 0x%x, no_scam 0x%x, " "pci_fix_asyn_xfer 0x%x,\n", (unsigned)h->last_q_shortage, (unsigned)h->init_state, (unsigned)h->no_scam, (unsigned)h->pci_fix_asyn_xfer); printk(" cfg 0x%lx\n", (ulong)h->cfg); } /* * asc_prt_asc_dvc_cfg() */ static void asc_prt_asc_dvc_cfg(ASC_DVC_CFG *h) { printk("ASC_DVC_CFG at addr 0x%lx\n", (ulong)h); printk(" can_tagged_qng 0x%x, cmd_qng_enabled 0x%x,\n", h->can_tagged_qng, h->cmd_qng_enabled); printk(" disc_enable 0x%x, sdtr_enable 0x%x,\n", h->disc_enable, h->sdtr_enable); printk(" chip_scsi_id %d, isa_dma_speed %d, isa_dma_channel %d, " "chip_version %d,\n", h->chip_scsi_id, h->isa_dma_speed, h->isa_dma_channel, h->chip_version); printk(" mcode_date 0x%x, mcode_version %d\n", h->mcode_date, h->mcode_version); } /* * asc_prt_adv_dvc_var() * * Display an ADV_DVC_VAR structure. */ static void asc_prt_adv_dvc_var(ADV_DVC_VAR *h) { printk(" ADV_DVC_VAR at addr 0x%lx\n", (ulong)h); printk(" iop_base 0x%lx, err_code 0x%x, ultra_able 0x%x\n", (ulong)h->iop_base, h->err_code, (unsigned)h->ultra_able); printk(" sdtr_able 0x%x, wdtr_able 0x%x\n", (unsigned)h->sdtr_able, (unsigned)h->wdtr_able); printk(" start_motor 0x%x, scsi_reset_wait 0x%x\n", (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait); printk(" max_host_qng %u, max_dvc_qng %u, carr_freelist 0x%lxn\n", (unsigned)h->max_host_qng, (unsigned)h->max_dvc_qng, (ulong)h->carr_freelist); printk(" icq_sp 0x%lx, irq_sp 0x%lx\n", (ulong)h->icq_sp, (ulong)h->irq_sp); printk(" no_scam 0x%x, tagqng_able 0x%x\n", (unsigned)h->no_scam, (unsigned)h->tagqng_able); printk(" chip_scsi_id 0x%x, cfg 0x%lx\n", (unsigned)h->chip_scsi_id, (ulong)h->cfg); } /* * asc_prt_adv_dvc_cfg() * * Display an ADV_DVC_CFG structure. */ static void asc_prt_adv_dvc_cfg(ADV_DVC_CFG *h) { printk(" ADV_DVC_CFG at addr 0x%lx\n", (ulong)h); printk(" disc_enable 0x%x, termination 0x%x\n", h->disc_enable, h->termination); printk(" chip_version 0x%x, mcode_date 0x%x\n", h->chip_version, h->mcode_date); printk(" mcode_version 0x%x, control_flag 0x%x\n", h->mcode_version, h->control_flag); } /* * asc_prt_scsi_host() */ static void asc_prt_scsi_host(struct Scsi_Host *s) { struct asc_board *boardp = shost_priv(s); printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); printk(" host_busy %u, host_no %d, last_reset %d,\n", s->host_busy, s->host_no, (unsigned)s->last_reset); printk(" base 0x%lx, io_port 0x%lx, irq %d,\n", (ulong)s->base, (ulong)s->io_port, boardp->irq); printk(" dma_channel %d, this_id %d, can_queue %d,\n", s->dma_channel, s->this_id, s->can_queue); printk(" cmd_per_lun %d, sg_tablesize %d, unchecked_isa_dma %d\n", s->cmd_per_lun, s->sg_tablesize, s->unchecked_isa_dma); if (ASC_NARROW_BOARD(boardp)) { asc_prt_asc_dvc_var(&boardp->dvc_var.asc_dvc_var); asc_prt_asc_dvc_cfg(&boardp->dvc_cfg.asc_dvc_cfg); } else { asc_prt_adv_dvc_var(&boardp->dvc_var.adv_dvc_var); asc_prt_adv_dvc_cfg(&boardp->dvc_cfg.adv_dvc_cfg); } } /* * asc_prt_hex() * * Print hexadecimal output in 4 byte groupings 32 bytes * or 8 double-words per line. */ static void asc_prt_hex(char *f, uchar *s, int l) { int i; int j; int k; int m; printk("%s: (%d bytes)\n", f, l); for (i = 0; i < l; i += 32) { /* Display a maximum of 8 double-words per line. */ if ((k = (l - i) / 4) >= 8) { k = 8; m = 0; } else { m = (l - i) % 4; } for (j = 0; j < k; j++) { printk(" %2.2X%2.2X%2.2X%2.2X", (unsigned)s[i + (j * 4)], (unsigned)s[i + (j * 4) + 1], (unsigned)s[i + (j * 4) + 2], (unsigned)s[i + (j * 4) + 3]); } switch (m) { case 0: default: break; case 1: printk(" %2.2X", (unsigned)s[i + (j * 4)]); break; case 2: printk(" %2.2X%2.2X", (unsigned)s[i + (j * 4)], (unsigned)s[i + (j * 4) + 1]); break; case 3: printk(" %2.2X%2.2X%2.2X", (unsigned)s[i + (j * 4) + 1], (unsigned)s[i + (j * 4) + 2], (unsigned)s[i + (j * 4) + 3]); break; } printk("\n"); } } /* * asc_prt_asc_scsi_q() */ static void asc_prt_asc_scsi_q(ASC_SCSI_Q *q) { ASC_SG_HEAD *sgp; int i; printk("ASC_SCSI_Q at addr 0x%lx\n", (ulong)q); printk (" target_ix 0x%x, target_lun %u, srb_ptr 0x%lx, tag_code 0x%x,\n", q->q2.target_ix, q->q1.target_lun, (ulong)q->q2.srb_ptr, q->q2.tag_code); printk (" data_addr 0x%lx, data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n", (ulong)le32_to_cpu(q->q1.data_addr), (ulong)le32_to_cpu(q->q1.data_cnt), (ulong)le32_to_cpu(q->q1.sense_addr), q->q1.sense_len); printk(" cdbptr 0x%lx, cdb_len %u, sg_head 0x%lx, sg_queue_cnt %u\n", (ulong)q->cdbptr, q->q2.cdb_len, (ulong)q->sg_head, q->q1.sg_queue_cnt); if (q->sg_head) { sgp = q->sg_head; printk("ASC_SG_HEAD at addr 0x%lx\n", (ulong)sgp); printk(" entry_cnt %u, queue_cnt %u\n", sgp->entry_cnt, sgp->queue_cnt); for (i = 0; i < sgp->entry_cnt; i++) { printk(" [%u]: addr 0x%lx, bytes %lu\n", i, (ulong)le32_to_cpu(sgp->sg_list[i].addr), (ulong)le32_to_cpu(sgp->sg_list[i].bytes)); } } } /* * asc_prt_asc_qdone_info() */ static void asc_prt_asc_qdone_info(ASC_QDONE_INFO *q) { printk("ASC_QDONE_INFO at addr 0x%lx\n", (ulong)q); printk(" srb_ptr 0x%lx, target_ix %u, cdb_len %u, tag_code %u,\n", (ulong)q->d2.srb_ptr, q->d2.target_ix, q->d2.cdb_len, q->d2.tag_code); printk (" done_stat 0x%x, host_stat 0x%x, scsi_stat 0x%x, scsi_msg 0x%x\n", q->d3.done_stat, q->d3.host_stat, q->d3.scsi_stat, q->d3.scsi_msg); } /* * asc_prt_adv_sgblock() * * Display an ADV_SG_BLOCK structure. */ static void asc_prt_adv_sgblock(int sgblockno, ADV_SG_BLOCK *b) { int i; printk(" ASC_SG_BLOCK at addr 0x%lx (sgblockno %d)\n", (ulong)b, sgblockno); printk(" sg_cnt %u, sg_ptr 0x%lx\n", b->sg_cnt, (ulong)le32_to_cpu(b->sg_ptr)); BUG_ON(b->sg_cnt > NO_OF_SG_PER_BLOCK); if (b->sg_ptr != 0) BUG_ON(b->sg_cnt != NO_OF_SG_PER_BLOCK); for (i = 0; i < b->sg_cnt; i++) { printk(" [%u]: sg_addr 0x%lx, sg_count 0x%lx\n", i, (ulong)b->sg_list[i].sg_addr, (ulong)b->sg_list[i].sg_count); } } /* * asc_prt_adv_scsi_req_q() * * Display an ADV_SCSI_REQ_Q structure. */ static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q) { int sg_blk_cnt; struct asc_sg_block *sg_ptr; printk("ADV_SCSI_REQ_Q at addr 0x%lx\n", (ulong)q); printk(" target_id %u, target_lun %u, srb_ptr 0x%lx, a_flag 0x%x\n", q->target_id, q->target_lun, (ulong)q->srb_ptr, q->a_flag); printk(" cntl 0x%x, data_addr 0x%lx, vdata_addr 0x%lx\n", q->cntl, (ulong)le32_to_cpu(q->data_addr), (ulong)q->vdata_addr); printk(" data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n", (ulong)le32_to_cpu(q->data_cnt), (ulong)le32_to_cpu(q->sense_addr), q->sense_len); printk (" cdb_len %u, done_status 0x%x, host_status 0x%x, scsi_status 0x%x\n", q->cdb_len, q->done_status, q->host_status, q->scsi_status); printk(" sg_working_ix 0x%x, target_cmd %u\n", q->sg_working_ix, q->target_cmd); printk(" scsiq_rptr 0x%lx, sg_real_addr 0x%lx, sg_list_ptr 0x%lx\n", (ulong)le32_to_cpu(q->scsiq_rptr), (ulong)le32_to_cpu(q->sg_real_addr), (ulong)q->sg_list_ptr); /* Display the request's ADV_SG_BLOCK structures. */ if (q->sg_list_ptr != NULL) { sg_blk_cnt = 0; while (1) { /* * 'sg_ptr' is a physical address. Convert it to a virtual * address by indexing 'sg_blk_cnt' into the virtual address * array 'sg_list_ptr'. * * XXX - Assumes all SG physical blocks are virtually contiguous. */ sg_ptr = &(((ADV_SG_BLOCK *)(q->sg_list_ptr))[sg_blk_cnt]); asc_prt_adv_sgblock(sg_blk_cnt, sg_ptr); if (sg_ptr->sg_ptr == 0) { break; } sg_blk_cnt++; } } } #endif /* ADVANSYS_DEBUG */ /* * The advansys chip/microcode contains a 32-bit identifier for each command * known as the 'srb'. I don't know what it stands for. The driver used * to encode the scsi_cmnd pointer by calling virt_to_bus and retrieve it * with bus_to_virt. Now the driver keeps a per-host map of integers to * pointers. It auto-expands when full, unless it can't allocate memory. * Note that an srb of 0 is treated specially by the chip/firmware, hence * the return of i+1 in this routine, and the corresponding subtraction in * the inverse routine. */ #define BAD_SRB 0 static u32 advansys_ptr_to_srb(struct asc_dvc_var *asc_dvc, void *ptr) { int i; void **new_ptr; for (i = 0; i < asc_dvc->ptr_map_count; i++) { if (!asc_dvc->ptr_map[i]) goto out; } if (asc_dvc->ptr_map_count == 0) asc_dvc->ptr_map_count = 1; else asc_dvc->ptr_map_count *= 2; new_ptr = krealloc(asc_dvc->ptr_map, asc_dvc->ptr_map_count * sizeof(void *), GFP_ATOMIC); if (!new_ptr) return BAD_SRB; asc_dvc->ptr_map = new_ptr; out: ASC_DBG(3, "Putting ptr %p into array offset %d\n", ptr, i); asc_dvc->ptr_map[i] = ptr; return i + 1; } static void * advansys_srb_to_ptr(struct asc_dvc_var *asc_dvc, u32 srb) { void *ptr; srb--; if (srb >= asc_dvc->ptr_map_count) { printk("advansys: bad SRB %u, max %u\n", srb, asc_dvc->ptr_map_count); return NULL; } ptr = asc_dvc->ptr_map[srb]; asc_dvc->ptr_map[srb] = NULL; ASC_DBG(3, "Returning ptr %p from array offset %d\n", ptr, srb); return ptr; } /* * advansys_info() * * Return suitable for printing on the console with the argument * adapter's configuration information. * * Note: The information line should not exceed ASC_INFO_SIZE bytes, * otherwise the static 'info' array will be overrun. */ static const char *advansys_info(struct Scsi_Host *shost) { static char info[ASC_INFO_SIZE]; struct asc_board *boardp = shost_priv(shost); ASC_DVC_VAR *asc_dvc_varp; ADV_DVC_VAR *adv_dvc_varp; char *busname; char *widename = NULL; if (ASC_NARROW_BOARD(boardp)) { asc_dvc_varp = &boardp->dvc_var.asc_dvc_var; ASC_DBG(1, "begin\n"); if (asc_dvc_varp->bus_type & ASC_IS_ISA) { if ((asc_dvc_varp->bus_type & ASC_IS_ISAPNP) == ASC_IS_ISAPNP) { busname = "ISA PnP"; } else { busname = "ISA"; } sprintf(info, "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X, DMA 0x%X", ASC_VERSION, busname, (ulong)shost->io_port, (ulong)shost->io_port + ASC_IOADR_GAP - 1, boardp->irq, shost->dma_channel); } else { if (asc_dvc_varp->bus_type & ASC_IS_VL) { busname = "VL"; } else if (asc_dvc_varp->bus_type & ASC_IS_EISA) { busname = "EISA"; } else if (asc_dvc_varp->bus_type & ASC_IS_PCI) { if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) { busname = "PCI Ultra"; } else { busname = "PCI"; } } else { busname = "?"; shost_printk(KERN_ERR, shost, "unknown bus " "type %d\n", asc_dvc_varp->bus_type); } sprintf(info, "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X", ASC_VERSION, busname, (ulong)shost->io_port, (ulong)shost->io_port + ASC_IOADR_GAP - 1, boardp->irq); } } else { /* * Wide Adapter Information * * Memory-mapped I/O is used instead of I/O space to access * the adapter, but display the I/O Port range. The Memory * I/O address is displayed through the driver /proc file. */ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { widename = "Ultra-Wide"; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { widename = "Ultra2-Wide"; } else { widename = "Ultra3-Wide"; } sprintf(info, "AdvanSys SCSI %s: PCI %s: PCIMEM 0x%lX-0x%lX, IRQ 0x%X", ASC_VERSION, widename, (ulong)adv_dvc_varp->iop_base, (ulong)adv_dvc_varp->iop_base + boardp->asc_n_io_port - 1, boardp->irq); } BUG_ON(strlen(info) >= ASC_INFO_SIZE); ASC_DBG(1, "end\n"); return info; } #ifdef CONFIG_PROC_FS /* * asc_prt_line() * * If 'cp' is NULL print to the console, otherwise print to a buffer. * * Return 0 if printing to the console, otherwise return the number of * bytes written to the buffer. * * Note: If any single line is greater than ASC_PRTLINE_SIZE bytes the stack * will be corrupted. 's[]' is defined to be ASC_PRTLINE_SIZE bytes. */ static int asc_prt_line(char *buf, int buflen, char *fmt, ...) { va_list args; int ret; char s[ASC_PRTLINE_SIZE]; va_start(args, fmt); ret = vsprintf(s, fmt, args); BUG_ON(ret >= ASC_PRTLINE_SIZE); if (buf == NULL) { (void)printk(s); ret = 0; } else { ret = min(buflen, ret); memcpy(buf, s, ret); } va_end(args); return ret; } /* * asc_prt_board_devices() * * Print driver information for devices attached to the board. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_board_devices(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int leftlen; int totlen; int len; int chip_scsi_id; int i; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nDevice Information for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); if (ASC_NARROW_BOARD(boardp)) { chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id; } else { chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id; } len = asc_prt_line(cp, leftlen, "Target IDs Detected:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) { len = asc_prt_line(cp, leftlen, " %X,", i); ASC_PRT_NEXT(); } } len = asc_prt_line(cp, leftlen, " (%X=Host Adapter)\n", chip_scsi_id); ASC_PRT_NEXT(); return totlen; } /* * Display Wide Board BIOS Information. */ static int asc_prt_adv_bios(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int leftlen; int totlen; int len; ushort major, minor, letter; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nROM BIOS Version: "); ASC_PRT_NEXT(); /* * If the BIOS saved a valid signature, then fill in * the BIOS code segment base address. */ if (boardp->bios_signature != 0x55AA) { len = asc_prt_line(cp, leftlen, "Disabled or Pre-3.1\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n"); ASC_PRT_NEXT(); } else { major = (boardp->bios_version >> 12) & 0xF; minor = (boardp->bios_version >> 8) & 0xF; letter = (boardp->bios_version & 0xFF); len = asc_prt_line(cp, leftlen, "%d.%d%c\n", major, minor, letter >= 26 ? '?' : letter + 'A'); ASC_PRT_NEXT(); /* * Current available ROM BIOS release is 3.1I for UW * and 3.2I for U2W. This code doesn't differentiate * UW and U2W boards. */ if (major < 3 || (major <= 3 && minor < 1) || (major <= 3 && minor <= 1 && letter < ('I' - 'A'))) { len = asc_prt_line(cp, leftlen, "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "ftp://ftp.connectcom.net/pub\n"); ASC_PRT_NEXT(); } } return totlen; } /* * Add serial number to information bar if signature AAh * is found in at bit 15-9 (7 bits) of word 1. * * Serial Number consists fo 12 alpha-numeric digits. * * 1 - Product type (A,B,C,D..) Word0: 15-13 (3 bits) * 2 - MFG Location (A,B,C,D..) Word0: 12-10 (3 bits) * 3-4 - Product ID (0-99) Word0: 9-0 (10 bits) * 5 - Product revision (A-J) Word0: " " * * Signature Word1: 15-9 (7 bits) * 6 - Year (0-9) Word1: 8-6 (3 bits) & Word2: 15 (1 bit) * 7-8 - Week of the year (1-52) Word1: 5-0 (6 bits) * * 9-12 - Serial Number (A001-Z999) Word2: 14-0 (15 bits) * * Note 1: Only production cards will have a serial number. * * Note 2: Signature is most significant 7 bits (0xFE). * * Returns ASC_TRUE if serial number found, otherwise returns ASC_FALSE. */ static int asc_get_eeprom_string(ushort *serialnum, uchar *cp) { ushort w, num; if ((serialnum[1] & 0xFE00) != ((ushort)0xAA << 8)) { return ASC_FALSE; } else { /* * First word - 6 digits. */ w = serialnum[0]; /* Product type - 1st digit. */ if ((*cp = 'A' + ((w & 0xE000) >> 13)) == 'H') { /* Product type is P=Prototype */ *cp += 0x8; } cp++; /* Manufacturing location - 2nd digit. */ *cp++ = 'A' + ((w & 0x1C00) >> 10); /* Product ID - 3rd, 4th digits. */ num = w & 0x3FF; *cp++ = '0' + (num / 100); num %= 100; *cp++ = '0' + (num / 10); /* Product revision - 5th digit. */ *cp++ = 'A' + (num % 10); /* * Second word */ w = serialnum[1]; /* * Year - 6th digit. * * If bit 15 of third word is set, then the * last digit of the year is greater than 7. */ if (serialnum[2] & 0x8000) { *cp++ = '8' + ((w & 0x1C0) >> 6); } else { *cp++ = '0' + ((w & 0x1C0) >> 6); } /* Week of year - 7th, 8th digits. */ num = w & 0x003F; *cp++ = '0' + num / 10; num %= 10; *cp++ = '0' + num; /* * Third word */ w = serialnum[2] & 0x7FFF; /* Serial number - 9th digit. */ *cp++ = 'A' + (w / 1000); /* 10th, 11th, 12th digits. */ num = w % 1000; *cp++ = '0' + num / 100; num %= 100; *cp++ = '0' + num / 10; num %= 10; *cp++ = '0' + num; *cp = '\0'; /* Null Terminate the string. */ return ASC_TRUE; } } /* * asc_prt_asc_board_eeprom() * * Print board EEPROM configuration. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_asc_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); ASC_DVC_VAR *asc_dvc_varp; int leftlen; int totlen; int len; ASCEEP_CONFIG *ep; int i; #ifdef CONFIG_ISA int isa_dma_speed[] = { 10, 8, 7, 6, 5, 4, 3, 2 }; #endif /* CONFIG_ISA */ uchar serialstr[13]; asc_dvc_varp = &boardp->dvc_var.asc_dvc_var; ep = &boardp->eep_config.asc_eep; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nEEPROM Settings for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); if (asc_get_eeprom_string((ushort *)&ep->adapter_info[0], serialstr) == ASC_TRUE) { len = asc_prt_line(cp, leftlen, " Serial Number: %s\n", serialstr); ASC_PRT_NEXT(); } else { if (ep->adapter_info[5] == 0xBB) { len = asc_prt_line(cp, leftlen, " Default Settings Used for EEPROM-less Adapter.\n"); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " Serial Number Signature Not Present.\n"); ASC_PRT_NEXT(); } } len = asc_prt_line(cp, leftlen, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ASC_EEP_GET_CHIP_ID(ep), ep->max_total_qng, ep->max_tag_qng); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Target ID: "); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %d", i); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Disconnects: "); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep-> disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Command Queuing: "); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep-> use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Start Motor: "); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep-> start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Synchronous Transfer:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep-> init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); #ifdef CONFIG_ISA if (asc_dvc_varp->bus_type & ASC_IS_ISA) { len = asc_prt_line(cp, leftlen, " Host ISA DMA speed: %d MB/S\n", isa_dma_speed[ASC_EEP_GET_DMA_SPD(ep)]); ASC_PRT_NEXT(); } #endif /* CONFIG_ISA */ return totlen; } /* * asc_prt_adv_board_eeprom() * * Print board EEPROM configuration. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); ADV_DVC_VAR *adv_dvc_varp; int leftlen; int totlen; int len; int i; char *termstr; uchar serialstr[13]; ADVEEP_3550_CONFIG *ep_3550 = NULL; ADVEEP_38C0800_CONFIG *ep_38C0800 = NULL; ADVEEP_38C1600_CONFIG *ep_38C1600 = NULL; ushort word; ushort *wordp; ushort sdtr_speed = 0; adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { ep_3550 = &boardp->eep_config.adv_3550_eep; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { ep_38C0800 = &boardp->eep_config.adv_38C0800_eep; } else { ep_38C1600 = &boardp->eep_config.adv_38C1600_eep; } leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nEEPROM Settings for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { wordp = &ep_3550->serial_number_word1; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { wordp = &ep_38C0800->serial_number_word1; } else { wordp = &ep_38C1600->serial_number_word1; } if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE) { len = asc_prt_line(cp, leftlen, " Serial Number: %s\n", serialstr); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " Serial Number Signature Not Present.\n"); ASC_PRT_NEXT(); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { len = asc_prt_line(cp, leftlen, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ep_3550->adapter_scsi_id, ep_3550->max_host_qng, ep_3550->max_dvc_qng); ASC_PRT_NEXT(); } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { len = asc_prt_line(cp, leftlen, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ep_38C0800->adapter_scsi_id, ep_38C0800->max_host_qng, ep_38C0800->max_dvc_qng); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", ep_38C1600->adapter_scsi_id, ep_38C1600->max_host_qng, ep_38C1600->max_dvc_qng); ASC_PRT_NEXT(); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->termination; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->termination_lvd; } else { word = ep_38C1600->termination_lvd; } switch (word) { case 1: termstr = "Low Off/High Off"; break; case 2: termstr = "Low Off/High On"; break; case 3: termstr = "Low On/High On"; break; default: case 0: termstr = "Automatic"; break; } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { len = asc_prt_line(cp, leftlen, " termination: %u (%s), bios_ctrl: 0x%x\n", ep_3550->termination, termstr, ep_3550->bios_ctrl); ASC_PRT_NEXT(); } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { len = asc_prt_line(cp, leftlen, " termination: %u (%s), bios_ctrl: 0x%x\n", ep_38C0800->termination_lvd, termstr, ep_38C0800->bios_ctrl); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " termination: %u (%s), bios_ctrl: 0x%x\n", ep_38C1600->termination_lvd, termstr, ep_38C1600->bios_ctrl); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, " Target ID: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %X", i); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->disc_enable; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->disc_enable; } else { word = ep_38C1600->disc_enable; } len = asc_prt_line(cp, leftlen, " Disconnects: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->tagqng_able; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->tagqng_able; } else { word = ep_38C1600->tagqng_able; } len = asc_prt_line(cp, leftlen, " Command Queuing: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->start_motor; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->start_motor; } else { word = ep_38C1600->start_motor; } len = asc_prt_line(cp, leftlen, " Start Motor: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { len = asc_prt_line(cp, leftlen, " Synchronous Transfer:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep_3550-> sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { len = asc_prt_line(cp, leftlen, " Ultra Transfer: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (ep_3550-> ultra_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->wdtr_able; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { word = ep_38C0800->wdtr_able; } else { word = ep_38C1600->wdtr_able; } len = asc_prt_line(cp, leftlen, " Wide Transfer: "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { len = asc_prt_line(cp, leftlen, " %c", (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800 || adv_dvc_varp->chip_type == ADV_CHIP_ASC38C1600) { len = asc_prt_line(cp, leftlen, " Synchronous Transfer Speed (Mhz):\n "); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { char *speed_str; if (i == 0) { sdtr_speed = adv_dvc_varp->sdtr_speed1; } else if (i == 4) { sdtr_speed = adv_dvc_varp->sdtr_speed2; } else if (i == 8) { sdtr_speed = adv_dvc_varp->sdtr_speed3; } else if (i == 12) { sdtr_speed = adv_dvc_varp->sdtr_speed4; } switch (sdtr_speed & ADV_MAX_TID) { case 0: speed_str = "Off"; break; case 1: speed_str = " 5"; break; case 2: speed_str = " 10"; break; case 3: speed_str = " 20"; break; case 4: speed_str = " 40"; break; case 5: speed_str = " 80"; break; default: speed_str = "Unk"; break; } len = asc_prt_line(cp, leftlen, "%X:%s ", i, speed_str); ASC_PRT_NEXT(); if (i == 7) { len = asc_prt_line(cp, leftlen, "\n "); ASC_PRT_NEXT(); } sdtr_speed >>= 4; } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); } return totlen; } /* * asc_prt_driver_conf() * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_driver_conf(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int leftlen; int totlen; int len; int chip_scsi_id; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nLinux Driver Configuration and Information for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " host_busy %u, last_reset %u, max_id %u, max_lun %u, max_channel %u\n", shost->host_busy, shost->last_reset, shost->max_id, shost->max_lun, shost->max_channel); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " unique_id %d, can_queue %d, this_id %d, sg_tablesize %u, cmd_per_lun %u\n", shost->unique_id, shost->can_queue, shost->this_id, shost->sg_tablesize, shost->cmd_per_lun); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " unchecked_isa_dma %d, use_clustering %d\n", shost->unchecked_isa_dma, shost->use_clustering); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " flags 0x%x, last_reset 0x%x, jiffies 0x%x, asc_n_io_port 0x%x\n", boardp->flags, boardp->last_reset, jiffies, boardp->asc_n_io_port); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " io_port 0x%x\n", shost->io_port); ASC_PRT_NEXT(); if (ASC_NARROW_BOARD(boardp)) { chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id; } else { chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id; } return totlen; } /* * asc_prt_asc_board_info() * * Print dynamic board configuration information. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_asc_board_info(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int chip_scsi_id; int leftlen; int totlen; int len; ASC_DVC_VAR *v; ASC_DVC_CFG *c; int i; int renegotiate = 0; v = &boardp->dvc_var.asc_dvc_var; c = &boardp->dvc_cfg.asc_dvc_cfg; chip_scsi_id = c->chip_scsi_id; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nAsc Library Configuration and Statistics for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " chip_version %u, mcode_date 0x%x, " "mcode_version 0x%x, err_code %u\n", c->chip_version, c->mcode_date, c->mcode_version, v->err_code); ASC_PRT_NEXT(); /* Current number of commands waiting for the host. */ len = asc_prt_line(cp, leftlen, " Total Command Pending: %d\n", v->cur_total_qng); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Command Queuing:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (v-> use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); /* Current number of commands waiting for a device. */ len = asc_prt_line(cp, leftlen, " Command Queue Pending:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%u", i, v->cur_dvc_qng[i]); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); /* Current limit on number of commands that can be sent to a device. */ len = asc_prt_line(cp, leftlen, " Command Queue Limit:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%u", i, v->max_dvc_qng[i]); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); /* Indicate whether the device has returned queue full status. */ len = asc_prt_line(cp, leftlen, " Command Queue Full:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } if (boardp->queue_full & ADV_TID_TO_TIDMASK(i)) { len = asc_prt_line(cp, leftlen, " %X:Y-%d", i, boardp->queue_full_cnt[i]); } else { len = asc_prt_line(cp, leftlen, " %X:N", i); } ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Synchronous Transfer:"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (v-> sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); for (i = 0; i <= ASC_MAX_TID; i++) { uchar syn_period_ix; if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0) || ((v->init_sdtr & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:", i); ASC_PRT_NEXT(); if ((boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET) == 0) { len = asc_prt_line(cp, leftlen, " Asynchronous"); ASC_PRT_NEXT(); } else { syn_period_ix = (boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index - 1); len = asc_prt_line(cp, leftlen, " Transfer Period Factor: %d (%d.%d Mhz),", v->sdtr_period_tbl[syn_period_ix], 250 / v->sdtr_period_tbl[syn_period_ix], ASC_TENTHS(250, v-> sdtr_period_tbl [syn_period_ix])); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " REQ/ACK Offset: %d", boardp-> sdtr_data[i] & ASC_SYN_MAX_OFFSET); ASC_PRT_NEXT(); } if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { len = asc_prt_line(cp, leftlen, "*\n"); renegotiate = 1; } else { len = asc_prt_line(cp, leftlen, "\n"); } ASC_PRT_NEXT(); } if (renegotiate) { len = asc_prt_line(cp, leftlen, " * = Re-negotiation pending before next command.\n"); ASC_PRT_NEXT(); } return totlen; } /* * asc_prt_adv_board_info() * * Print dynamic board configuration information. * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); int leftlen; int totlen; int len; int i; ADV_DVC_VAR *v; ADV_DVC_CFG *c; AdvPortAddr iop_base; ushort chip_scsi_id; ushort lramword; uchar lrambyte; ushort tagqng_able; ushort sdtr_able, wdtr_able; ushort wdtr_done, sdtr_done; ushort period = 0; int renegotiate = 0; v = &boardp->dvc_var.adv_dvc_var; c = &boardp->dvc_cfg.adv_dvc_cfg; iop_base = v->iop_base; chip_scsi_id = v->chip_scsi_id; leftlen = cplen; totlen = len = 0; len = asc_prt_line(cp, leftlen, "\nAdv Library Configuration and Statistics for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " iop_base 0x%lx, cable_detect: %X, err_code %u\n", v->iop_base, AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1) & CABLE_DETECT, v->err_code); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " chip_version %u, mcode_date 0x%x, " "mcode_version 0x%x\n", c->chip_version, c->mcode_date, c->mcode_version); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); len = asc_prt_line(cp, leftlen, " Queuing Enabled:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Queue Limit:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + i, lrambyte); len = asc_prt_line(cp, leftlen, " %X:%d", i, lrambyte); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " Command Pending:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_QUEUED_CMD + i, lrambyte); len = asc_prt_line(cp, leftlen, " %X:%d", i, lrambyte); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); len = asc_prt_line(cp, leftlen, " Wide Enabled:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, wdtr_done); len = asc_prt_line(cp, leftlen, " Transfer Bit Width:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } AdvReadWordLram(iop_base, ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i), lramword); len = asc_prt_line(cp, leftlen, " %X:%d", i, (lramword & 0x8000) ? 16 : 8); ASC_PRT_NEXT(); if ((wdtr_able & ADV_TID_TO_TIDMASK(i)) && (wdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { len = asc_prt_line(cp, leftlen, "*"); ASC_PRT_NEXT(); renegotiate = 1; } } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); len = asc_prt_line(cp, leftlen, " Synchronous Enabled:"); ASC_PRT_NEXT(); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:%c", i, (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); ASC_PRT_NEXT(); } len = asc_prt_line(cp, leftlen, "\n"); ASC_PRT_NEXT(); AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, sdtr_done); for (i = 0; i <= ADV_MAX_TID; i++) { AdvReadWordLram(iop_base, ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i), lramword); lramword &= ~0x8000; if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0) || ((sdtr_able & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } len = asc_prt_line(cp, leftlen, " %X:", i); ASC_PRT_NEXT(); if ((lramword & 0x1F) == 0) { /* Check for REQ/ACK Offset 0. */ len = asc_prt_line(cp, leftlen, " Asynchronous"); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, " Transfer Period Factor: "); ASC_PRT_NEXT(); if ((lramword & 0x1F00) == 0x1100) { /* 80 Mhz */ len = asc_prt_line(cp, leftlen, "9 (80.0 Mhz),"); ASC_PRT_NEXT(); } else if ((lramword & 0x1F00) == 0x1000) { /* 40 Mhz */ len = asc_prt_line(cp, leftlen, "10 (40.0 Mhz),"); ASC_PRT_NEXT(); } else { /* 20 Mhz or below. */ period = (((lramword >> 8) * 25) + 50) / 4; if (period == 0) { /* Should never happen. */ len = asc_prt_line(cp, leftlen, "%d (? Mhz), "); ASC_PRT_NEXT(); } else { len = asc_prt_line(cp, leftlen, "%d (%d.%d Mhz),", period, 250 / period, ASC_TENTHS(250, period)); ASC_PRT_NEXT(); } } len = asc_prt_line(cp, leftlen, " REQ/ACK Offset: %d", lramword & 0x1F); ASC_PRT_NEXT(); } if ((sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { len = asc_prt_line(cp, leftlen, "*\n"); renegotiate = 1; } else { len = asc_prt_line(cp, leftlen, "\n"); } ASC_PRT_NEXT(); } if (renegotiate) { len = asc_prt_line(cp, leftlen, " * = Re-negotiation pending before next command.\n"); ASC_PRT_NEXT(); } return totlen; } /* * asc_proc_copy() * * Copy proc information to a read buffer taking into account the current * read offset in the file and the remaining space in the read buffer. */ static int asc_proc_copy(off_t advoffset, off_t offset, char *curbuf, int leftlen, char *cp, int cplen) { int cnt = 0; ASC_DBG(2, "offset %d, advoffset %d, cplen %d\n", (unsigned)offset, (unsigned)advoffset, cplen); if (offset <= advoffset) { /* Read offset below current offset, copy everything. */ cnt = min(cplen, leftlen); ASC_DBG(2, "curbuf 0x%lx, cp 0x%lx, cnt %d\n", (ulong)curbuf, (ulong)cp, cnt); memcpy(curbuf, cp, cnt); } else if (offset < advoffset + cplen) { /* Read offset within current range, partial copy. */ cnt = (advoffset + cplen) - offset; cp = (cp + cplen) - cnt; cnt = min(cnt, leftlen); ASC_DBG(2, "curbuf 0x%lx, cp 0x%lx, cnt %d\n", (ulong)curbuf, (ulong)cp, cnt); memcpy(curbuf, cp, cnt); } return cnt; } #ifdef ADVANSYS_STATS /* * asc_prt_board_stats() * * Note: no single line should be greater than ASC_PRTLINE_SIZE, * cf. asc_prt_line(). * * Return the number of characters copied into 'cp'. No more than * 'cplen' characters will be copied to 'cp'. */ static int asc_prt_board_stats(struct Scsi_Host *shost, char *cp, int cplen) { struct asc_board *boardp = shost_priv(shost); struct asc_stats *s = &boardp->asc_stats; int leftlen = cplen; int len, totlen = 0; len = asc_prt_line(cp, leftlen, "\nLinux Driver Statistics for AdvanSys SCSI Host %d:\n", shost->host_no); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " queuecommand %lu, reset %lu, biosparam %lu, interrupt %lu\n", s->queuecommand, s->reset, s->biosparam, s->interrupt); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " callback %lu, done %lu, build_error %lu, build_noreq %lu, build_nosg %lu\n", s->callback, s->done, s->build_error, s->adv_build_noreq, s->adv_build_nosg); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, " exe_noerror %lu, exe_busy %lu, exe_error %lu, exe_unknown %lu\n", s->exe_noerror, s->exe_busy, s->exe_error, s->exe_unknown); ASC_PRT_NEXT(); /* * Display data transfer statistics. */ if (s->xfer_cnt > 0) { len = asc_prt_line(cp, leftlen, " xfer_cnt %lu, xfer_elem %lu, ", s->xfer_cnt, s->xfer_elem); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "xfer_bytes %lu.%01lu kb\n", s->xfer_sect / 2, ASC_TENTHS(s->xfer_sect, 2)); ASC_PRT_NEXT(); /* Scatter gather transfer statistics */ len = asc_prt_line(cp, leftlen, " avg_num_elem %lu.%01lu, ", s->xfer_elem / s->xfer_cnt, ASC_TENTHS(s->xfer_elem, s->xfer_cnt)); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "avg_elem_size %lu.%01lu kb, ", (s->xfer_sect / 2) / s->xfer_elem, ASC_TENTHS((s->xfer_sect / 2), s->xfer_elem)); ASC_PRT_NEXT(); len = asc_prt_line(cp, leftlen, "avg_xfer_size %lu.%01lu kb\n", (s->xfer_sect / 2) / s->xfer_cnt, ASC_TENTHS((s->xfer_sect / 2), s->xfer_cnt)); ASC_PRT_NEXT(); } return totlen; } #endif /* ADVANSYS_STATS */ /* * advansys_proc_info() - /proc/scsi/advansys/{0,1,2,3,...} * * *buffer: I/O buffer * **start: if inout == FALSE pointer into buffer where user read should start * offset: current offset into a /proc/scsi/advansys/[0...] file * length: length of buffer * hostno: Scsi_Host host_no * inout: TRUE - user is writing; FALSE - user is reading * * Return the number of bytes read from or written to a * /proc/scsi/advansys/[0...] file. * * Note: This function uses the per board buffer 'prtbuf' which is * allocated when the board is initialized in advansys_detect(). The * buffer is ASC_PRTBUF_SIZE bytes. The function asc_proc_copy() is * used to write to the buffer. The way asc_proc_copy() is written * if 'prtbuf' is too small it will not be overwritten. Instead the * user just won't get all the available statistics. */ static int advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, int length, int inout) { struct asc_board *boardp = shost_priv(shost); char *cp; int cplen; int cnt; int totcnt; int leftlen; char *curbuf; off_t advoffset; ASC_DBG(1, "begin\n"); /* * User write not supported. */ if (inout == TRUE) return -ENOSYS; /* * User read of /proc/scsi/advansys/[0...] file. */ /* Copy read data starting at the beginning of the buffer. */ *start = buffer; curbuf = buffer; advoffset = 0; totcnt = 0; leftlen = length; /* * Get board configuration information. * * advansys_info() returns the board string from its own static buffer. */ cp = (char *)advansys_info(shost); strcat(cp, "\n"); cplen = strlen(cp); /* Copy board information. */ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; /* * Display Wide Board BIOS Information. */ if (!ASC_NARROW_BOARD(boardp)) { cp = boardp->prtbuf; cplen = asc_prt_adv_bios(shost, cp, ASC_PRTBUF_SIZE); BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; } /* * Display driver information for each device attached to the board. */ cp = boardp->prtbuf; cplen = asc_prt_board_devices(shost, cp, ASC_PRTBUF_SIZE); BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; /* * Display EEPROM configuration for the board. */ cp = boardp->prtbuf; if (ASC_NARROW_BOARD(boardp)) { cplen = asc_prt_asc_board_eeprom(shost, cp, ASC_PRTBUF_SIZE); } else { cplen = asc_prt_adv_board_eeprom(shost, cp, ASC_PRTBUF_SIZE); } BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; /* * Display driver configuration and information for the board. */ cp = boardp->prtbuf; cplen = asc_prt_driver_conf(shost, cp, ASC_PRTBUF_SIZE); BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; #ifdef ADVANSYS_STATS /* * Display driver statistics for the board. */ cp = boardp->prtbuf; cplen = asc_prt_board_stats(shost, cp, ASC_PRTBUF_SIZE); BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; #endif /* ADVANSYS_STATS */ /* * Display Asc Library dynamic configuration information * for the board. */ cp = boardp->prtbuf; if (ASC_NARROW_BOARD(boardp)) { cplen = asc_prt_asc_board_info(shost, cp, ASC_PRTBUF_SIZE); } else { cplen = asc_prt_adv_board_info(shost, cp, ASC_PRTBUF_SIZE); } BUG_ON(cplen >= ASC_PRTBUF_SIZE); cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); totcnt += cnt; leftlen -= cnt; if (leftlen == 0) { ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } advoffset += cplen; curbuf += cnt; ASC_DBG(1, "totcnt %d\n", totcnt); return totcnt; } #endif /* CONFIG_PROC_FS */ static void asc_scsi_done(struct scsi_cmnd *scp) { scsi_dma_unmap(scp); ASC_STATS(scp->device->host, done); scp->scsi_done(scp); } static void AscSetBank(PortAddr iop_base, uchar bank) { uchar val; val = AscGetChipControl(iop_base) & (~ (CC_SINGLE_STEP | CC_TEST | CC_DIAG | CC_SCSI_RESET | CC_CHIP_RESET)); if (bank == 1) { val |= CC_BANK_ONE; } else if (bank == 2) { val |= CC_DIAG | CC_BANK_ONE; } else { val &= ~CC_BANK_ONE; } AscSetChipControl(iop_base, val); } static void AscSetChipIH(PortAddr iop_base, ushort ins_code) { AscSetBank(iop_base, 1); AscWriteChipIH(iop_base, ins_code); AscSetBank(iop_base, 0); } static int AscStartChip(PortAddr iop_base) { AscSetChipControl(iop_base, 0); if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) { return (0); } return (1); } static int AscStopChip(PortAddr iop_base) { uchar cc_val; cc_val = AscGetChipControl(iop_base) & (~(CC_SINGLE_STEP | CC_TEST | CC_DIAG)); AscSetChipControl(iop_base, (uchar)(cc_val | CC_HALT)); AscSetChipIH(iop_base, INS_HALT); AscSetChipIH(iop_base, INS_RFLAG_WTM); if ((AscGetChipStatus(iop_base) & CSW_HALTED) == 0) { return (0); } return (1); } static int AscIsChipHalted(PortAddr iop_base) { if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) { if ((AscGetChipControl(iop_base) & CC_HALT) != 0) { return (1); } } return (0); } static int AscResetChipAndScsiBus(ASC_DVC_VAR *asc_dvc) { PortAddr iop_base; int i = 10; iop_base = asc_dvc->iop_base; while ((AscGetChipStatus(iop_base) & CSW_SCSI_RESET_ACTIVE) && (i-- > 0)) { mdelay(100); } AscStopChip(iop_base); AscSetChipControl(iop_base, CC_CHIP_RESET | CC_SCSI_RESET | CC_HALT); udelay(60); AscSetChipIH(iop_base, INS_RFLAG_WTM); AscSetChipIH(iop_base, INS_HALT); AscSetChipControl(iop_base, CC_CHIP_RESET | CC_HALT); AscSetChipControl(iop_base, CC_HALT); mdelay(200); AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT); AscSetChipStatus(iop_base, 0); return (AscIsChipHalted(iop_base)); } static int AscFindSignature(PortAddr iop_base) { ushort sig_word; ASC_DBG(1, "AscGetChipSignatureByte(0x%x) 0x%x\n", iop_base, AscGetChipSignatureByte(iop_base)); if (AscGetChipSignatureByte(iop_base) == (uchar)ASC_1000_ID1B) { ASC_DBG(1, "AscGetChipSignatureWord(0x%x) 0x%x\n", iop_base, AscGetChipSignatureWord(iop_base)); sig_word = AscGetChipSignatureWord(iop_base); if ((sig_word == (ushort)ASC_1000_ID0W) || (sig_word == (ushort)ASC_1000_ID0W_FIX)) { return (1); } } return (0); } static void AscEnableInterrupt(PortAddr iop_base) { ushort cfg; cfg = AscGetChipCfgLsw(iop_base); AscSetChipCfgLsw(iop_base, cfg | ASC_CFG0_HOST_INT_ON); } static void AscDisableInterrupt(PortAddr iop_base) { ushort cfg; cfg = AscGetChipCfgLsw(iop_base); AscSetChipCfgLsw(iop_base, cfg & (~ASC_CFG0_HOST_INT_ON)); } static uchar AscReadLramByte(PortAddr iop_base, ushort addr) { unsigned char byte_data; unsigned short word_data; if (isodd_word(addr)) { AscSetChipLramAddr(iop_base, addr - 1); word_data = AscGetChipLramData(iop_base); byte_data = (word_data >> 8) & 0xFF; } else { AscSetChipLramAddr(iop_base, addr); word_data = AscGetChipLramData(iop_base); byte_data = word_data & 0xFF; } return byte_data; } static ushort AscReadLramWord(PortAddr iop_base, ushort addr) { ushort word_data; AscSetChipLramAddr(iop_base, addr); word_data = AscGetChipLramData(iop_base); return (word_data); } #if CC_VERY_LONG_SG_LIST static ASC_DCNT AscReadLramDWord(PortAddr iop_base, ushort addr) { ushort val_low, val_high; ASC_DCNT dword_data; AscSetChipLramAddr(iop_base, addr); val_low = AscGetChipLramData(iop_base); val_high = AscGetChipLramData(iop_base); dword_data = ((ASC_DCNT) val_high << 16) | (ASC_DCNT) val_low; return (dword_data); } #endif /* CC_VERY_LONG_SG_LIST */ static void AscMemWordSetLram(PortAddr iop_base, ushort s_addr, ushort set_wval, int words) { int i; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < words; i++) { AscSetChipLramData(iop_base, set_wval); } } static void AscWriteLramWord(PortAddr iop_base, ushort addr, ushort word_val) { AscSetChipLramAddr(iop_base, addr); AscSetChipLramData(iop_base, word_val); } static void AscWriteLramByte(PortAddr iop_base, ushort addr, uchar byte_val) { ushort word_data; if (isodd_word(addr)) { addr--; word_data = AscReadLramWord(iop_base, addr); word_data &= 0x00FF; word_data |= (((ushort)byte_val << 8) & 0xFF00); } else { word_data = AscReadLramWord(iop_base, addr); word_data &= 0xFF00; word_data |= ((ushort)byte_val & 0x00FF); } AscWriteLramWord(iop_base, addr, word_data); } /* * Copy 2 bytes to LRAM. * * The source data is assumed to be in little-endian order in memory * and is maintained in little-endian order when written to LRAM. */ static void AscMemWordCopyPtrToLram(PortAddr iop_base, ushort s_addr, const uchar *s_buffer, int words) { int i; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { /* * On a little-endian system the second argument below * produces a little-endian ushort which is written to * LRAM in little-endian order. On a big-endian system * the second argument produces a big-endian ushort which * is "transparently" byte-swapped by outpw() and written * in little-endian order to LRAM. */ outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 1] << 8) | s_buffer[i]); } } /* * Copy 4 bytes to LRAM. * * The source data is assumed to be in little-endian order in memory * and is maintained in little-endian order when written to LRAM. */ static void AscMemDWordCopyPtrToLram(PortAddr iop_base, ushort s_addr, uchar *s_buffer, int dwords) { int i; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 4 * dwords; i += 4) { outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 1] << 8) | s_buffer[i]); /* LSW */ outpw(iop_base + IOP_RAM_DATA, ((ushort)s_buffer[i + 3] << 8) | s_buffer[i + 2]); /* MSW */ } } /* * Copy 2 bytes from LRAM. * * The source data is assumed to be in little-endian order in LRAM * and is maintained in little-endian order when written to memory. */ static void AscMemWordCopyPtrFromLram(PortAddr iop_base, ushort s_addr, uchar *d_buffer, int words) { int i; ushort word; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { word = inpw(iop_base + IOP_RAM_DATA); d_buffer[i] = word & 0xff; d_buffer[i + 1] = (word >> 8) & 0xff; } } static ASC_DCNT AscMemSumLramWord(PortAddr iop_base, ushort s_addr, int words) { ASC_DCNT sum; int i; sum = 0L; for (i = 0; i < words; i++, s_addr += 2) { sum += AscReadLramWord(iop_base, s_addr); } return (sum); } static ushort AscInitLram(ASC_DVC_VAR *asc_dvc) { uchar i; ushort s_addr; PortAddr iop_base; ushort warn_code; iop_base = asc_dvc->iop_base; warn_code = 0; AscMemWordSetLram(iop_base, ASC_QADR_BEG, 0, (ushort)(((int)(asc_dvc->max_total_qng + 2 + 1) * 64) >> 1)); i = ASC_MIN_ACTIVE_QNO; s_addr = ASC_QADR_BEG + ASC_QBLK_SIZE; AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), (uchar)(i + 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), (uchar)(asc_dvc->max_total_qng)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), (uchar)i); i++; s_addr += ASC_QBLK_SIZE; for (; i < asc_dvc->max_total_qng; i++, s_addr += ASC_QBLK_SIZE) { AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), (uchar)(i + 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), (uchar)(i - 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), (uchar)i); } AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_FWD), (uchar)ASC_QLINK_END); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_BWD), (uchar)(asc_dvc->max_total_qng - 1)); AscWriteLramByte(iop_base, (ushort)(s_addr + ASC_SCSIQ_B_QNO), (uchar)asc_dvc->max_total_qng); i++; s_addr += ASC_QBLK_SIZE; for (; i <= (uchar)(asc_dvc->max_total_qng + 3); i++, s_addr += ASC_QBLK_SIZE) { AscWriteLramByte(iop_base, (ushort)(s_addr + (ushort)ASC_SCSIQ_B_FWD), i); AscWriteLramByte(iop_base, (ushort)(s_addr + (ushort)ASC_SCSIQ_B_BWD), i); AscWriteLramByte(iop_base, (ushort)(s_addr + (ushort)ASC_SCSIQ_B_QNO), i); } return warn_code; } static ASC_DCNT AscLoadMicroCode(PortAddr iop_base, ushort s_addr, const uchar *mcode_buf, ushort mcode_size) { ASC_DCNT chksum; ushort mcode_word_size; ushort mcode_chksum; /* Write the microcode buffer starting at LRAM address 0. */ mcode_word_size = (ushort)(mcode_size >> 1); AscMemWordSetLram(iop_base, s_addr, 0, mcode_word_size); AscMemWordCopyPtrToLram(iop_base, s_addr, mcode_buf, mcode_word_size); chksum = AscMemSumLramWord(iop_base, s_addr, mcode_word_size); ASC_DBG(1, "chksum 0x%lx\n", (ulong)chksum); mcode_chksum = (ushort)AscMemSumLramWord(iop_base, (ushort)ASC_CODE_SEC_BEG, (ushort)((mcode_size - s_addr - (ushort) ASC_CODE_SEC_BEG) / 2)); ASC_DBG(1, "mcode_chksum 0x%lx\n", (ulong)mcode_chksum); AscWriteLramWord(iop_base, ASCV_MCODE_CHKSUM_W, mcode_chksum); AscWriteLramWord(iop_base, ASCV_MCODE_SIZE_W, mcode_size); return chksum; } static void AscInitQLinkVar(ASC_DVC_VAR *asc_dvc) { PortAddr iop_base; int i; ushort lram_addr; iop_base = asc_dvc->iop_base; AscPutRiscVarFreeQHead(iop_base, 1); AscPutRiscVarDoneQTail(iop_base, asc_dvc->max_total_qng); AscPutVarFreeQHead(iop_base, 1); AscPutVarDoneQTail(iop_base, asc_dvc->max_total_qng); AscWriteLramByte(iop_base, ASCV_BUSY_QHEAD_B, (uchar)((int)asc_dvc->max_total_qng + 1)); AscWriteLramByte(iop_base, ASCV_DISC1_QHEAD_B, (uchar)((int)asc_dvc->max_total_qng + 2)); AscWriteLramByte(iop_base, (ushort)ASCV_TOTAL_READY_Q_B, asc_dvc->max_total_qng); AscWriteLramWord(iop_base, ASCV_ASCDVC_ERR_CODE_W, 0); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, 0); AscWriteLramByte(iop_base, ASCV_SCSIBUSY_B, 0); AscWriteLramByte(iop_base, ASCV_WTM_FLAG_B, 0); AscPutQDoneInProgress(iop_base, 0); lram_addr = ASC_QADR_BEG; for (i = 0; i < 32; i++, lram_addr += 2) { AscWriteLramWord(iop_base, lram_addr, 0); } } static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc) { int i; ushort warn_code; PortAddr iop_base; ASC_PADDR phy_addr; ASC_DCNT phy_size; struct asc_board *board = asc_dvc_to_board(asc_dvc); iop_base = asc_dvc->iop_base; warn_code = 0; for (i = 0; i <= ASC_MAX_TID; i++) { AscPutMCodeInitSDTRAtID(iop_base, i, asc_dvc->cfg->sdtr_period_offset[i]); } AscInitQLinkVar(asc_dvc); AscWriteLramByte(iop_base, ASCV_DISC_ENABLE_B, asc_dvc->cfg->disc_enable); AscWriteLramByte(iop_base, ASCV_HOSTSCSI_ID_B, ASC_TID_TO_TARGET_ID(asc_dvc->cfg->chip_scsi_id)); /* Ensure overrun buffer is aligned on an 8 byte boundary. */ BUG_ON((unsigned long)asc_dvc->overrun_buf & 7); asc_dvc->overrun_dma = dma_map_single(board->dev, asc_dvc->overrun_buf, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); if (dma_mapping_error(board->dev, asc_dvc->overrun_dma)) { warn_code = -ENOMEM; goto err_dma_map; } phy_addr = cpu_to_le32(asc_dvc->overrun_dma); AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_PADDR_D, (uchar *)&phy_addr, 1); phy_size = cpu_to_le32(ASC_OVERRUN_BSIZE); AscMemDWordCopyPtrToLram(iop_base, ASCV_OVERRUN_BSIZE_D, (uchar *)&phy_size, 1); asc_dvc->cfg->mcode_date = AscReadLramWord(iop_base, (ushort)ASCV_MC_DATE_W); asc_dvc->cfg->mcode_version = AscReadLramWord(iop_base, (ushort)ASCV_MC_VER_W); AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; warn_code = UW_ERR; goto err_mcode_start; } if (AscStartChip(iop_base) != 1) { asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; warn_code = UW_ERR; goto err_mcode_start; } return warn_code; err_mcode_start: dma_unmap_single(board->dev, asc_dvc->overrun_dma, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); err_dma_map: asc_dvc->overrun_dma = 0; return warn_code; } static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/mcode.bin"; int err; unsigned long chksum; ushort warn_code; PortAddr iop_base; iop_base = asc_dvc->iop_base; warn_code = 0; if ((asc_dvc->dvc_cntl & ASC_CNTL_RESET_SCSI) && !(asc_dvc->init_state & ASC_INIT_RESET_SCSI_DONE)) { AscResetChipAndScsiBus(asc_dvc); mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ } asc_dvc->init_state |= ASC_INIT_STATE_BEG_LOAD_MC; if (asc_dvc->err_code != 0) return UW_ERR; if (!AscFindSignature(asc_dvc->iop_base)) { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; return warn_code; } AscDisableInterrupt(iop_base); warn_code |= AscInitLram(asc_dvc); if (asc_dvc->err_code != 0) return UW_ERR; err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; ASC_DBG(1, "_asc_mcode_chksum 0x%lx\n", (ulong)chksum); if (AscLoadMicroCode(iop_base, 0, &fw->data[4], fw->size - 4) != chksum) { asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM; release_firmware(fw); return warn_code; } release_firmware(fw); warn_code |= AscInitMicroCodeVar(asc_dvc); if (!asc_dvc->overrun_dma) return warn_code; asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC; AscEnableInterrupt(iop_base); return warn_code; } /* * Load the Microcode * * Write the microcode image to RISC memory starting at address 0. * * The microcode is stored compressed in the following format: * * 254 word (508 byte) table indexed by byte code followed * by the following byte codes: * * 1-Byte Code: * 00: Emit word 0 in table. * 01: Emit word 1 in table. * . * FD: Emit word 253 in table. * * Multi-Byte Code: * FE WW WW: (3 byte code) Word to emit is the next word WW WW. * FF BB WW WW: (4 byte code) Emit BB count times next word WW WW. * * Returns 0 or an error if the checksum doesn't match */ static int AdvLoadMicrocode(AdvPortAddr iop_base, const unsigned char *buf, int size, int memsize, int chksum) { int i, j, end, len = 0; ADV_DCNT sum; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0); for (i = 253 * 2; i < size; i++) { if (buf[i] == 0xff) { unsigned short word = (buf[i + 3] << 8) | buf[i + 2]; for (j = 0; j < buf[i + 1]; j++) { AdvWriteWordAutoIncLram(iop_base, word); len += 2; } i += 3; } else if (buf[i] == 0xfe) { unsigned short word = (buf[i + 2] << 8) | buf[i + 1]; AdvWriteWordAutoIncLram(iop_base, word); i += 2; len += 2; } else { unsigned int off = buf[i] * 2; unsigned short word = (buf[off + 1] << 8) | buf[off]; AdvWriteWordAutoIncLram(iop_base, word); len += 2; } } end = len; while (len < memsize) { AdvWriteWordAutoIncLram(iop_base, 0); len += 2; } /* Verify the microcode checksum. */ sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0); for (len = 0; len < end; len += 2) { sum += AdvReadWordAutoIncLram(iop_base); } if (sum != chksum) return ASC_IERR_MCODE_CHKSUM; return 0; } static void AdvBuildCarrierFreelist(struct adv_dvc_var *asc_dvc) { ADV_CARR_T *carrp; ADV_SDCNT buf_size; ADV_PADDR carr_paddr; carrp = (ADV_CARR_T *) ADV_16BALIGN(asc_dvc->carrier_buf); asc_dvc->carr_freelist = NULL; if (carrp == asc_dvc->carrier_buf) { buf_size = ADV_CARRIER_BUFSIZE; } else { buf_size = ADV_CARRIER_BUFSIZE - sizeof(ADV_CARR_T); } do { /* Get physical address of the carrier 'carrp'. */ carr_paddr = cpu_to_le32(virt_to_bus(carrp)); buf_size -= sizeof(ADV_CARR_T); carrp->carr_pa = carr_paddr; carrp->carr_va = cpu_to_le32(ADV_VADDR_TO_U32(carrp)); /* * Insert the carrier at the beginning of the freelist. */ carrp->next_vpa = cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->carr_freelist)); asc_dvc->carr_freelist = carrp; carrp++; } while (buf_size > 0); } /* * Send an idle command to the chip and wait for completion. * * Command completion is polled for once per microsecond. * * The function can be called from anywhere including an interrupt handler. * But the function is not re-entrant, so it uses the DvcEnter/LeaveCritical() * functions to prevent reentrancy. * * Return Values: * ADV_TRUE - command completed successfully * ADV_FALSE - command failed * ADV_ERROR - command timed out */ static int AdvSendIdleCmd(ADV_DVC_VAR *asc_dvc, ushort idle_cmd, ADV_DCNT idle_cmd_parameter) { int result; ADV_DCNT i, j; AdvPortAddr iop_base; iop_base = asc_dvc->iop_base; /* * Clear the idle command status which is set by the microcode * to a non-zero value to indicate when the command is completed. * The non-zero result is one of the IDLE_CMD_STATUS_* values */ AdvWriteWordLram(iop_base, ASC_MC_IDLE_CMD_STATUS, (ushort)0); /* * Write the idle command value after the idle command parameter * has been written to avoid a race condition. If the order is not * followed, the microcode may process the idle command before the * parameters have been written to LRAM. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IDLE_CMD_PARAMETER, cpu_to_le32(idle_cmd_parameter)); AdvWriteWordLram(iop_base, ASC_MC_IDLE_CMD, idle_cmd); /* * Tickle the RISC to tell it to process the idle command. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_B); if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { /* * Clear the tickle value. In the ASC-3550 the RISC flag * command 'clr_tickle_b' does not work unless the host * value is cleared. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_NOP); } /* Wait for up to 100 millisecond for the idle command to timeout. */ for (i = 0; i < SCSI_WAIT_100_MSEC; i++) { /* Poll once each microsecond for command completion. */ for (j = 0; j < SCSI_US_PER_MSEC; j++) { AdvReadWordLram(iop_base, ASC_MC_IDLE_CMD_STATUS, result); if (result != 0) return result; udelay(1); } } BUG(); /* The idle command should never timeout. */ return ADV_ERROR; } /* * Reset SCSI Bus and purge all outstanding requests. * * Return Value: * ADV_TRUE(1) - All requests are purged and SCSI Bus is reset. * ADV_FALSE(0) - Microcode command failed. * ADV_ERROR(-1) - Microcode command timed-out. Microcode or IC * may be hung which requires driver recovery. */ static int AdvResetSB(ADV_DVC_VAR *asc_dvc) { int status; /* * Send the SCSI Bus Reset idle start idle command which asserts * the SCSI Bus Reset signal. */ status = AdvSendIdleCmd(asc_dvc, (ushort)IDLE_CMD_SCSI_RESET_START, 0L); if (status != ADV_TRUE) { return status; } /* * Delay for the specified SCSI Bus Reset hold time. * * The hold time delay is done on the host because the RISC has no * microsecond accurate timer. */ udelay(ASC_SCSI_RESET_HOLD_TIME_US); /* * Send the SCSI Bus Reset end idle command which de-asserts * the SCSI Bus Reset signal and purges any pending requests. */ status = AdvSendIdleCmd(asc_dvc, (ushort)IDLE_CMD_SCSI_RESET_END, 0L); if (status != ADV_TRUE) { return status; } mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ return status; } /* * Initialize the ASC-3550. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Needed after initialization for error recovery. */ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/3550.bin"; AdvPortAddr iop_base; ushort warn_code; int begin_addr; int end_addr; ushort code_sum; int word; int i; int err; unsigned long chksum; ushort scsi_cfg1; uchar tid; ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ ushort wdtr_able = 0, sdtr_able, tagqng_able; uchar max_cmd[ADV_MAX_TID + 1]; /* If there is already an error, don't continue. */ if (asc_dvc->err_code != 0) return ADV_ERROR; /* * The caller must set 'chip_type' to ADV_CHIP_ASC3550. */ if (asc_dvc->chip_type != ADV_CHIP_ASC3550) { asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } warn_code = 0; iop_base = asc_dvc->iop_base; /* * Save the RISC memory BIOS region before writing the microcode. * The BIOS may already be loaded and using its RISC LRAM region * so its region must be saved and restored. * * Note: This code makes the assumption, which is currently true, * that a chip reset does not clear RISC LRAM. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Save current per TID negotiated values. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { ushort bios_version, major, minor; bios_version = bios_mem[(ASC_MC_BIOS_VERSION - ASC_MC_BIOSMEM) / 2]; major = (bios_version >> 12) & 0xF; minor = (bios_version >> 8) & 0xF; if (major < 3 || (major == 3 && minor == 1)) { /* BIOS 3.1 and earlier location of 'wdtr_able' variable. */ AdvReadWordLram(iop_base, 0x120, wdtr_able); } else { AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); } } AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], fw->size - 4, ADV_3550_MEMSIZE, chksum); release_firmware(fw); if (asc_dvc->err_code) return ADV_ERROR; /* * Restore the RISC memory BIOS region. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Calculate and write the microcode code checksum to the microcode * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). */ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); code_sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); for (word = begin_addr; word < end_addr; word += 2) { code_sum += AdvReadWordAutoIncLram(iop_base); } AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); /* * Read and save microcode version and date. */ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, asc_dvc->cfg->mcode_date); AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, asc_dvc->cfg->mcode_version); /* * Set the chip type to indicate the ASC3550. */ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC3550); /* * If the PCI Configuration Command Register "Parity Error Response * Control" Bit was clear (0), then set the microcode variable * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode * to ignore DMA parity errors. */ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_IGNORE_PERR; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * For ASC-3550, setting the START_CTL_EMFU [3:2] bits sets a FIFO * threshold of 128 bytes. This register is only accessible to the host. */ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, START_CTL_EMFU | READ_CMD_MRM); /* * Microcode operating variables for WDTR, SDTR, and command tag * queuing will be set in slave_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set * SDTR and WDTR from the EEPROM configuration. This will allow * the BIOS and warm boot to work without a SCSI bus hang on * the Inquiry caused by host and target mismatched DTR values. * Without the SCSI Bus Reset, before an Inquiry a device can't * be assumed to be in Asynchronous, Narrow mode. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, asc_dvc->wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, asc_dvc->sdtr_able); } /* * Set microcode operating variables for SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 based on the ULTRA EEPROM per TID * bitmask. These values determine the maximum SDTR speed negotiated * with a device. * * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them * without determining here whether the device supports SDTR. * * 4-bit speed SDTR speed name * =========== =============== * 0000b (0x0) SDTR disabled * 0001b (0x1) 5 Mhz * 0010b (0x2) 10 Mhz * 0011b (0x3) 20 Mhz (Ultra) * 0100b (0x4) 40 Mhz (LVD/Ultra2) * 0101b (0x5) 80 Mhz (LVD2/Ultra3) * 0110b (0x6) Undefined * . * 1111b (0xF) Undefined */ word = 0; for (tid = 0; tid <= ADV_MAX_TID; tid++) { if (ADV_TID_TO_TIDMASK(tid) & asc_dvc->ultra_able) { /* Set Ultra speed for TID 'tid'. */ word |= (0x3 << (4 * (tid % 4))); } else { /* Set Fast speed for TID 'tid'. */ word |= (0x2 << (4 * (tid % 4))); } if (tid == 3) { /* Check if done with sdtr_speed1. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, word); word = 0; } else if (tid == 7) { /* Check if done with sdtr_speed2. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, word); word = 0; } else if (tid == 11) { /* Check if done with sdtr_speed3. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, word); word = 0; } else if (tid == 15) { /* Check if done with sdtr_speed4. */ AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, word); /* End of loop. */ } } /* * Set microcode operating variable for the disconnect per TID bitmask. */ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, asc_dvc->cfg->disc_enable); /* * Set SCSI_CFG0 Microcode Default Value. * * The microcode will set the SCSI_CFG0 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | asc_dvc->chip_scsi_id); /* * Determine SCSI_CFG1 Microcode Default Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ /* Read current SCSI_CFG1 Register value. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); /* * If all three connectors are in use, return an error. */ if ((scsi_cfg1 & CABLE_ILLEGAL_A) == 0 || (scsi_cfg1 & CABLE_ILLEGAL_B) == 0) { asc_dvc->err_code |= ASC_IERR_ILLEGAL_CONNECTION; return ADV_ERROR; } /* * If the internal narrow cable is reversed all of the SCSI_CTRL * register signals will be set. Check for and return an error if * this condition is found. */ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; return ADV_ERROR; } /* * If this is a differential board and a single-ended device * is attached to one of the connectors, return an error. */ if ((scsi_cfg1 & DIFF_MODE) && (scsi_cfg1 & DIFF_SENSE) == 0) { asc_dvc->err_code |= ASC_IERR_SINGLE_END_DEVICE; return ADV_ERROR; } /* * If automatic termination control is enabled, then set the * termination value based on a table listed in a_condor.h. * * If manual termination was specified with an EEPROM setting * then 'termination' was set-up in AdvInitFrom3550EEPROM() and * is ready to be 'ored' into SCSI_CFG1. */ if (asc_dvc->cfg->termination == 0) { /* * The software always controls termination by setting TERM_CTL_SEL. * If TERM_CTL_SEL were set to 0, the hardware would set termination. */ asc_dvc->cfg->termination |= TERM_CTL_SEL; switch (scsi_cfg1 & CABLE_DETECT) { /* TERM_CTL_H: on, TERM_CTL_L: on */ case 0x3: case 0x7: case 0xB: case 0xD: case 0xE: case 0xF: asc_dvc->cfg->termination |= (TERM_CTL_H | TERM_CTL_L); break; /* TERM_CTL_H: on, TERM_CTL_L: off */ case 0x1: case 0x5: case 0x9: case 0xA: case 0xC: asc_dvc->cfg->termination |= TERM_CTL_H; break; /* TERM_CTL_H: off, TERM_CTL_L: off */ case 0x2: case 0x6: break; } } /* * Clear any set TERM_CTL_H and TERM_CTL_L bits. */ scsi_cfg1 &= ~TERM_CTL; /* * Invert the TERM_CTL_H and TERM_CTL_L bits and then * set 'scsi_cfg1'. The TERM_POL bit does not need to be * referenced, because the hardware internally inverts * the Termination High and Low bits if TERM_POL is set. */ scsi_cfg1 |= (TERM_CTL_SEL | (~asc_dvc->cfg->termination & TERM_CTL)); /* * Set SCSI_CFG1 Microcode Default Value * * Set filter value and possibly modified termination control * bits in the Microcode SCSI_CFG1 Register Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, FLTR_DISABLE | scsi_cfg1); /* * Set MEM_CFG Microcode Default Value * * The microcode will set the MEM_CFG register using this value * after it is started below. * * MEM_CFG may be accessed as a word or byte, but only bits 0-7 * are defined. * * ASC-3550 has 8KB internal memory. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, BIOS_EN | RAM_SZ_8KB); /* * Set SEL_MASK Microcode Default Value * * The microcode will set the SEL_MASK register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); AdvBuildCarrierFreelist(asc_dvc); /* * Set-up the Host->RISC Initiator Command Queue (ICQ). */ if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa)); /* * The first command issued will be placed in the stopper carrier. */ asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC ICQ physical address start value. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa)); /* * The first command completed by the RISC will be placed in * the stopper. * * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is * completed the RISC will set the ASC_RQ_STOPPER bit. */ asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC IRQ physical address start value. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); asc_dvc->carr_pending_cnt = 0; AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, (ADV_INTR_ENABLE_HOST_INTR | ADV_INTR_ENABLE_GLOBAL_INTR)); AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); AdvWriteWordRegister(iop_base, IOPW_PC, word); /* finally, finally, gentlemen, start your engine */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); /* * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus * Resets should be performed. The RISC has to be running * to issue a SCSI Bus Reset. */ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { /* * If the BIOS Signature is present in memory, restore the * BIOS Handshake Configuration Table and do not perform * a SCSI Bus Reset. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } } else { if (AdvResetSB(asc_dvc) != ADV_TRUE) { warn_code = ASC_WARN_BUSRESET_ERROR; } } } return warn_code; } /* * Initialize the ASC-38C0800. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Needed after initialization for error recovery. */ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/38C0800.bin"; AdvPortAddr iop_base; ushort warn_code; int begin_addr; int end_addr; ushort code_sum; int word; int i; int err; unsigned long chksum; ushort scsi_cfg1; uchar byte; uchar tid; ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ ushort wdtr_able, sdtr_able, tagqng_able; uchar max_cmd[ADV_MAX_TID + 1]; /* If there is already an error, don't continue. */ if (asc_dvc->err_code != 0) return ADV_ERROR; /* * The caller must set 'chip_type' to ADV_CHIP_ASC38C0800. */ if (asc_dvc->chip_type != ADV_CHIP_ASC38C0800) { asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } warn_code = 0; iop_base = asc_dvc->iop_base; /* * Save the RISC memory BIOS region before writing the microcode. * The BIOS may already be loaded and using its RISC LRAM region * so its region must be saved and restored. * * Note: This code makes the assumption, which is currently true, * that a chip reset does not clear RISC LRAM. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Save current per TID negotiated values. */ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } /* * RAM BIST (RAM Built-In Self Test) * * Address : I/O base + offset 0x38h register (byte). * Function: Bit 7-6(RW) : RAM mode * Normal Mode : 0x00 * Pre-test Mode : 0x40 * RAM Test Mode : 0x80 * Bit 5 : unused * Bit 4(RO) : Done bit * Bit 3-0(RO) : Status * Host Error : 0x08 * Int_RAM Error : 0x04 * RISC Error : 0x02 * SCSI Error : 0x01 * No Error : 0x00 * * Note: RAM BIST code should be put right here, before loading the * microcode and after saving the RISC memory BIOS region. */ /* * LRAM Pre-test * * Write PRE_TEST_MODE (0x40) to register and wait for 10 milliseconds. * If Done bit not set or low nibble not PRE_TEST_VALUE (0x05), return * an error. Reset to NORMAL_MODE (0x00) and do again. If cannot reset * to NORMAL_MODE, return an error too. */ for (i = 0; i < 2; i++) { AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, PRE_TEST_MODE); mdelay(10); /* Wait for 10ms before reading back. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & 0x0F) != PRE_TEST_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); mdelay(10); /* Wait for 10ms before reading back. */ if (AdvReadByteRegister(iop_base, IOPB_RAM_BIST) != NORMAL_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } } /* * LRAM Test - It takes about 1.5 ms to run through the test. * * Write RAM_TEST_MODE (0x80) to register and wait for 10 milliseconds. * If Done bit not set or Status not 0, save register byte, set the * err_code, and return an error. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, RAM_TEST_MODE); mdelay(10); /* Wait for 10ms before checking status. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & RAM_TEST_STATUS) != 0) { /* Get here if Done bit not set or Status not 0. */ asc_dvc->bist_err_code = byte; /* for BIOS display message */ asc_dvc->err_code = ASC_IERR_BIST_RAM_TEST; return ADV_ERROR; } /* We need to reset back to normal mode after LRAM test passes. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], fw->size - 4, ADV_38C0800_MEMSIZE, chksum); release_firmware(fw); if (asc_dvc->err_code) return ADV_ERROR; /* * Restore the RISC memory BIOS region. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Calculate and write the microcode code checksum to the microcode * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). */ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); code_sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); for (word = begin_addr; word < end_addr; word += 2) { code_sum += AdvReadWordAutoIncLram(iop_base); } AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); /* * Read microcode version and date. */ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, asc_dvc->cfg->mcode_date); AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, asc_dvc->cfg->mcode_version); /* * Set the chip type to indicate the ASC38C0800. */ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC38C0800); /* * Write 1 to bit 14 'DIS_TERM_DRV' in the SCSI_CFG1 register. * When DIS_TERM_DRV set to 1, C_DET[3:0] will reflect current * cable detection and then we are able to read C_DET[3:0]. * * Note: We will reset DIS_TERM_DRV to 0 in the 'Set SCSI_CFG1 * Microcode Default Value' section below. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); AdvWriteWordRegister(iop_base, IOPW_SCSI_CFG1, scsi_cfg1 | DIS_TERM_DRV); /* * If the PCI Configuration Command Register "Parity Error Response * Control" Bit was clear (0), then set the microcode variable * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode * to ignore DMA parity errors. */ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_IGNORE_PERR; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * For ASC-38C0800, set FIFO_THRESH_80B [6:4] bits and START_CTL_TH [3:2] * bits for the default FIFO threshold. * * Note: ASC-38C0800 FIFO threshold has been changed to 256 bytes. * * For DMA Errata #4 set the BC_THRESH_ENB bit. */ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, BC_THRESH_ENB | FIFO_THRESH_80B | START_CTL_TH | READ_CMD_MRM); /* * Microcode operating variables for WDTR, SDTR, and command tag * queuing will be set in slave_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set * SDTR and WDTR from the EEPROM configuration. This will allow * the BIOS and warm boot to work without a SCSI bus hang on * the Inquiry caused by host and target mismatched DTR values. * Without the SCSI Bus Reset, before an Inquiry a device can't * be assumed to be in Asynchronous, Narrow mode. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, asc_dvc->wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, asc_dvc->sdtr_able); } /* * Set microcode operating variables for DISC and SDTR_SPEED1, * SDTR_SPEED2, SDTR_SPEED3, and SDTR_SPEED4 based on the EEPROM * configuration values. * * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them * without determining here whether the device supports SDTR. */ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, asc_dvc->cfg->disc_enable); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, asc_dvc->sdtr_speed1); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, asc_dvc->sdtr_speed2); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, asc_dvc->sdtr_speed3); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, asc_dvc->sdtr_speed4); /* * Set SCSI_CFG0 Microcode Default Value. * * The microcode will set the SCSI_CFG0 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | asc_dvc->chip_scsi_id); /* * Determine SCSI_CFG1 Microcode Default Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ /* Read current SCSI_CFG1 Register value. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); /* * If the internal narrow cable is reversed all of the SCSI_CTRL * register signals will be set. Check for and return an error if * this condition is found. */ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; return ADV_ERROR; } /* * All kind of combinations of devices attached to one of four * connectors are acceptable except HVD device attached. For example, * LVD device can be attached to SE connector while SE device attached * to LVD connector. If LVD device attached to SE connector, it only * runs up to Ultra speed. * * If an HVD device is attached to one of LVD connectors, return an * error. However, there is no way to detect HVD device attached to * SE connectors. */ if (scsi_cfg1 & HVD) { asc_dvc->err_code = ASC_IERR_HVD_DEVICE; return ADV_ERROR; } /* * If either SE or LVD automatic termination control is enabled, then * set the termination value based on a table listed in a_condor.h. * * If manual termination was specified with an EEPROM setting then * 'termination' was set-up in AdvInitFrom38C0800EEPROM() and is ready * to be 'ored' into SCSI_CFG1. */ if ((asc_dvc->cfg->termination & TERM_SE) == 0) { /* SE automatic termination control is enabled. */ switch (scsi_cfg1 & C_DET_SE) { /* TERM_SE_HI: on, TERM_SE_LO: on */ case 0x1: case 0x2: case 0x3: asc_dvc->cfg->termination |= TERM_SE; break; /* TERM_SE_HI: on, TERM_SE_LO: off */ case 0x0: asc_dvc->cfg->termination |= TERM_SE_HI; break; } } if ((asc_dvc->cfg->termination & TERM_LVD) == 0) { /* LVD automatic termination control is enabled. */ switch (scsi_cfg1 & C_DET_LVD) { /* TERM_LVD_HI: on, TERM_LVD_LO: on */ case 0x4: case 0x8: case 0xC: asc_dvc->cfg->termination |= TERM_LVD; break; /* TERM_LVD_HI: off, TERM_LVD_LO: off */ case 0x0: break; } } /* * Clear any set TERM_SE and TERM_LVD bits. */ scsi_cfg1 &= (~TERM_SE & ~TERM_LVD); /* * Invert the TERM_SE and TERM_LVD bits and then set 'scsi_cfg1'. */ scsi_cfg1 |= (~asc_dvc->cfg->termination & 0xF0); /* * Clear BIG_ENDIAN, DIS_TERM_DRV, Terminator Polarity and HVD/LVD/SE * bits and set possibly modified termination control bits in the * Microcode SCSI_CFG1 Register Value. */ scsi_cfg1 &= (~BIG_ENDIAN & ~DIS_TERM_DRV & ~TERM_POL & ~HVD_LVD_SE); /* * Set SCSI_CFG1 Microcode Default Value * * Set possibly modified termination control and reset DIS_TERM_DRV * bits in the Microcode SCSI_CFG1 Register Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, scsi_cfg1); /* * Set MEM_CFG Microcode Default Value * * The microcode will set the MEM_CFG register using this value * after it is started below. * * MEM_CFG may be accessed as a word or byte, but only bits 0-7 * are defined. * * ASC-38C0800 has 16KB internal memory. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, BIOS_EN | RAM_SZ_16KB); /* * Set SEL_MASK Microcode Default Value * * The microcode will set the SEL_MASK register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); AdvBuildCarrierFreelist(asc_dvc); /* * Set-up the Host->RISC Initiator Command Queue (ICQ). */ if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa)); /* * The first command issued will be placed in the stopper carrier. */ asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC ICQ physical address start value. * carr_pa is LE, must be native before write */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa)); /* * The first command completed by the RISC will be placed in * the stopper. * * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is * completed the RISC will set the ASC_RQ_STOPPER bit. */ asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC IRQ physical address start value. * * carr_pa is LE, must be native before write * */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); asc_dvc->carr_pending_cnt = 0; AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, (ADV_INTR_ENABLE_HOST_INTR | ADV_INTR_ENABLE_GLOBAL_INTR)); AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); AdvWriteWordRegister(iop_base, IOPW_PC, word); /* finally, finally, gentlemen, start your engine */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); /* * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus * Resets should be performed. The RISC has to be running * to issue a SCSI Bus Reset. */ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { /* * If the BIOS Signature is present in memory, restore the * BIOS Handshake Configuration Table and do not perform * a SCSI Bus Reset. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } } else { if (AdvResetSB(asc_dvc) != ADV_TRUE) { warn_code = ASC_WARN_BUSRESET_ERROR; } } } return warn_code; } /* * Initialize the ASC-38C1600. * * On failure set the ASC_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Needed after initialization for error recovery. */ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/38C1600.bin"; AdvPortAddr iop_base; ushort warn_code; int begin_addr; int end_addr; ushort code_sum; long word; int i; int err; unsigned long chksum; ushort scsi_cfg1; uchar byte; uchar tid; ushort bios_mem[ASC_MC_BIOSLEN / 2]; /* BIOS RISC Memory 0x40-0x8F. */ ushort wdtr_able, sdtr_able, ppr_able, tagqng_able; uchar max_cmd[ASC_MAX_TID + 1]; /* If there is already an error, don't continue. */ if (asc_dvc->err_code != 0) { return ADV_ERROR; } /* * The caller must set 'chip_type' to ADV_CHIP_ASC38C1600. */ if (asc_dvc->chip_type != ADV_CHIP_ASC38C1600) { asc_dvc->err_code = ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } warn_code = 0; iop_base = asc_dvc->iop_base; /* * Save the RISC memory BIOS region before writing the microcode. * The BIOS may already be loaded and using its RISC LRAM region * so its region must be saved and restored. * * Note: This code makes the assumption, which is currently true, * that a chip reset does not clear RISC LRAM. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvReadWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Save current per TID negotiated values. */ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ASC_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } /* * RAM BIST (Built-In Self Test) * * Address : I/O base + offset 0x38h register (byte). * Function: Bit 7-6(RW) : RAM mode * Normal Mode : 0x00 * Pre-test Mode : 0x40 * RAM Test Mode : 0x80 * Bit 5 : unused * Bit 4(RO) : Done bit * Bit 3-0(RO) : Status * Host Error : 0x08 * Int_RAM Error : 0x04 * RISC Error : 0x02 * SCSI Error : 0x01 * No Error : 0x00 * * Note: RAM BIST code should be put right here, before loading the * microcode and after saving the RISC memory BIOS region. */ /* * LRAM Pre-test * * Write PRE_TEST_MODE (0x40) to register and wait for 10 milliseconds. * If Done bit not set or low nibble not PRE_TEST_VALUE (0x05), return * an error. Reset to NORMAL_MODE (0x00) and do again. If cannot reset * to NORMAL_MODE, return an error too. */ for (i = 0; i < 2; i++) { AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, PRE_TEST_MODE); mdelay(10); /* Wait for 10ms before reading back. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & 0x0F) != PRE_TEST_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); mdelay(10); /* Wait for 10ms before reading back. */ if (AdvReadByteRegister(iop_base, IOPB_RAM_BIST) != NORMAL_VALUE) { asc_dvc->err_code = ASC_IERR_BIST_PRE_TEST; return ADV_ERROR; } } /* * LRAM Test - It takes about 1.5 ms to run through the test. * * Write RAM_TEST_MODE (0x80) to register and wait for 10 milliseconds. * If Done bit not set or Status not 0, save register byte, set the * err_code, and return an error. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, RAM_TEST_MODE); mdelay(10); /* Wait for 10ms before checking status. */ byte = AdvReadByteRegister(iop_base, IOPB_RAM_BIST); if ((byte & RAM_TEST_DONE) == 0 || (byte & RAM_TEST_STATUS) != 0) { /* Get here if Done bit not set or Status not 0. */ asc_dvc->bist_err_code = byte; /* for BIOS display message */ asc_dvc->err_code = ASC_IERR_BIST_RAM_TEST; return ADV_ERROR; } /* We need to reset back to normal mode after LRAM test passes. */ AdvWriteByteRegister(iop_base, IOPB_RAM_BIST, NORMAL_MODE); err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fwname, err); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return err; } if (fw->size < 4) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, fwname); release_firmware(fw); asc_dvc->err_code = ASC_IERR_MCODE_CHKSUM; return -EINVAL; } chksum = (fw->data[3] << 24) | (fw->data[2] << 16) | (fw->data[1] << 8) | fw->data[0]; asc_dvc->err_code = AdvLoadMicrocode(iop_base, &fw->data[4], fw->size - 4, ADV_38C1600_MEMSIZE, chksum); release_firmware(fw); if (asc_dvc->err_code) return ADV_ERROR; /* * Restore the RISC memory BIOS region. */ for (i = 0; i < ASC_MC_BIOSLEN / 2; i++) { AdvWriteWordLram(iop_base, ASC_MC_BIOSMEM + (2 * i), bios_mem[i]); } /* * Calculate and write the microcode code checksum to the microcode * code checksum location ASC_MC_CODE_CHK_SUM (0x2C). */ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr); AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr); code_sum = 0; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, begin_addr); for (word = begin_addr; word < end_addr; word += 2) { code_sum += AdvReadWordAutoIncLram(iop_base); } AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum); /* * Read microcode version and date. */ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, asc_dvc->cfg->mcode_date); AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, asc_dvc->cfg->mcode_version); /* * Set the chip type to indicate the ASC38C1600. */ AdvWriteWordLram(iop_base, ASC_MC_CHIP_TYPE, ADV_CHIP_ASC38C1600); /* * Write 1 to bit 14 'DIS_TERM_DRV' in the SCSI_CFG1 register. * When DIS_TERM_DRV set to 1, C_DET[3:0] will reflect current * cable detection and then we are able to read C_DET[3:0]. * * Note: We will reset DIS_TERM_DRV to 0 in the 'Set SCSI_CFG1 * Microcode Default Value' section below. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); AdvWriteWordRegister(iop_base, IOPW_SCSI_CFG1, scsi_cfg1 | DIS_TERM_DRV); /* * If the PCI Configuration Command Register "Parity Error Response * Control" Bit was clear (0), then set the microcode variable * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode * to ignore DMA parity errors. */ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_IGNORE_PERR; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * If the BIOS control flag AIPP (Asynchronous Information * Phase Protection) disable bit is not set, then set the firmware * 'control_flag' CONTROL_FLAG_ENABLE_AIPP bit to enable * AIPP checking and encoding. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_AIPP_DIS) == 0) { AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); word |= CONTROL_FLAG_ENABLE_AIPP; AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word); } /* * For ASC-38C1600 use DMA_CFG0 default values: FIFO_THRESH_80B [6:4], * and START_CTL_TH [3:2]. */ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0, FIFO_THRESH_80B | START_CTL_TH | READ_CMD_MRM); /* * Microcode operating variables for WDTR, SDTR, and command tag * queuing will be set in slave_configure() based on what a * device reports it is capable of in Inquiry byte 7. * * If SCSI Bus Resets have been disabled, then directly set * SDTR and WDTR from the EEPROM configuration. This will allow * the BIOS and warm boot to work without a SCSI bus hang on * the Inquiry caused by host and target mismatched DTR values. * Without the SCSI Bus Reset, before an Inquiry a device can't * be assumed to be in Asynchronous, Narrow mode. */ if ((asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) == 0) { AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, asc_dvc->wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, asc_dvc->sdtr_able); } /* * Set microcode operating variables for DISC and SDTR_SPEED1, * SDTR_SPEED2, SDTR_SPEED3, and SDTR_SPEED4 based on the EEPROM * configuration values. * * The SDTR per TID bitmask overrides the SDTR_SPEED1, SDTR_SPEED2, * SDTR_SPEED3, and SDTR_SPEED4 values so it is safe to set them * without determining here whether the device supports SDTR. */ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, asc_dvc->cfg->disc_enable); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED1, asc_dvc->sdtr_speed1); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED2, asc_dvc->sdtr_speed2); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED3, asc_dvc->sdtr_speed3); AdvWriteWordLram(iop_base, ASC_MC_SDTR_SPEED4, asc_dvc->sdtr_speed4); /* * Set SCSI_CFG0 Microcode Default Value. * * The microcode will set the SCSI_CFG0 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0, PARITY_EN | QUEUE_128 | SEL_TMO_LONG | OUR_ID_EN | asc_dvc->chip_scsi_id); /* * Calculate SCSI_CFG1 Microcode Default Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. * * Each ASC-38C1600 function has only two cable detect bits. * The bus mode override bits are in IOPB_SOFT_OVER_WR. */ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1); /* * If the cable is reversed all of the SCSI_CTRL register signals * will be set. Check for and return an error if this condition is * found. */ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07) { asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE; return ADV_ERROR; } /* * Each ASC-38C1600 function has two connectors. Only an HVD device * can not be connected to either connector. An LVD device or SE device * may be connected to either connecor. If an SE device is connected, * then at most Ultra speed (20 Mhz) can be used on both connectors. * * If an HVD device is attached, return an error. */ if (scsi_cfg1 & HVD) { asc_dvc->err_code |= ASC_IERR_HVD_DEVICE; return ADV_ERROR; } /* * Each function in the ASC-38C1600 uses only the SE cable detect and * termination because there are two connectors for each function. Each * function may use either LVD or SE mode. Corresponding the SE automatic * termination control EEPROM bits are used for each function. Each * function has its own EEPROM. If SE automatic control is enabled for * the function, then set the termination value based on a table listed * in a_condor.h. * * If manual termination is specified in the EEPROM for the function, * then 'termination' was set-up in AscInitFrom38C1600EEPROM() and is * ready to be 'ored' into SCSI_CFG1. */ if ((asc_dvc->cfg->termination & TERM_SE) == 0) { struct pci_dev *pdev = adv_dvc_to_pdev(asc_dvc); /* SE automatic termination control is enabled. */ switch (scsi_cfg1 & C_DET_SE) { /* TERM_SE_HI: on, TERM_SE_LO: on */ case 0x1: case 0x2: case 0x3: asc_dvc->cfg->termination |= TERM_SE; break; case 0x0: if (PCI_FUNC(pdev->devfn) == 0) { /* Function 0 - TERM_SE_HI: off, TERM_SE_LO: off */ } else { /* Function 1 - TERM_SE_HI: on, TERM_SE_LO: off */ asc_dvc->cfg->termination |= TERM_SE_HI; } break; } } /* * Clear any set TERM_SE bits. */ scsi_cfg1 &= ~TERM_SE; /* * Invert the TERM_SE bits and then set 'scsi_cfg1'. */ scsi_cfg1 |= (~asc_dvc->cfg->termination & TERM_SE); /* * Clear Big Endian and Terminator Polarity bits and set possibly * modified termination control bits in the Microcode SCSI_CFG1 * Register Value. * * Big Endian bit is not used even on big endian machines. */ scsi_cfg1 &= (~BIG_ENDIAN & ~DIS_TERM_DRV & ~TERM_POL); /* * Set SCSI_CFG1 Microcode Default Value * * Set possibly modified termination control bits in the Microcode * SCSI_CFG1 Register Value. * * The microcode will set the SCSI_CFG1 register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1, scsi_cfg1); /* * Set MEM_CFG Microcode Default Value * * The microcode will set the MEM_CFG register using this value * after it is started below. * * MEM_CFG may be accessed as a word or byte, but only bits 0-7 * are defined. * * ASC-38C1600 has 32KB internal memory. * * XXX - Since ASC38C1600 Rev.3 has a Local RAM failure issue, we come * out a special 16K Adv Library and Microcode version. After the issue * resolved, we should turn back to the 32K support. Both a_condor.h and * mcode.sas files also need to be updated. * * AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, * BIOS_EN | RAM_SZ_32KB); */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_MEM_CFG, BIOS_EN | RAM_SZ_16KB); /* * Set SEL_MASK Microcode Default Value * * The microcode will set the SEL_MASK register using this value * after it is started below. */ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK, ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id)); AdvBuildCarrierFreelist(asc_dvc); /* * Set-up the Host->RISC Initiator Command Queue (ICQ). */ if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa)); /* * The first command issued will be placed in the stopper carrier. */ asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC ICQ physical address start value. Initialize the * COMMA register to the same value otherwise the RISC will * prematurely detect a command is available. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_ICQ, asc_dvc->icq_sp->carr_pa); AdvWriteDWordRegister(iop_base, IOPDW_COMMA, le32_to_cpu(asc_dvc->icq_sp->carr_pa)); /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa)); /* * The first command completed by the RISC will be placed in * the stopper. * * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is * completed the RISC will set the ASC_RQ_STOPPER bit. */ asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC IRQ physical address start value. */ AdvWriteDWordLramNoSwap(iop_base, ASC_MC_IRQ, asc_dvc->irq_sp->carr_pa); asc_dvc->carr_pending_cnt = 0; AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES, (ADV_INTR_ENABLE_HOST_INTR | ADV_INTR_ENABLE_GLOBAL_INTR)); AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word); AdvWriteWordRegister(iop_base, IOPW_PC, word); /* finally, finally, gentlemen, start your engine */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN); /* * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus * Resets should be performed. The RISC has to be running * to issue a SCSI Bus Reset. */ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS) { /* * If the BIOS Signature is present in memory, restore the * per TID microcode operating variables. */ if (bios_mem[(ASC_MC_BIOS_SIGNATURE - ASC_MC_BIOSMEM) / 2] == 0x55AA) { /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ASC_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } } else { if (AdvResetSB(asc_dvc) != ADV_TRUE) { warn_code = ASC_WARN_BUSRESET_ERROR; } } } return warn_code; } /* * Reset chip and SCSI Bus. * * Return Value: * ADV_TRUE(1) - Chip re-initialization and SCSI Bus Reset successful. * ADV_FALSE(0) - Chip re-initialization and SCSI Bus Reset failure. */ static int AdvResetChipAndSB(ADV_DVC_VAR *asc_dvc) { int status; ushort wdtr_able, sdtr_able, tagqng_able; ushort ppr_able = 0; uchar tid, max_cmd[ADV_MAX_TID + 1]; AdvPortAddr iop_base; ushort bios_sig; iop_base = asc_dvc->iop_base; /* * Save current per TID negotiated values. */ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); } AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } /* * Force the AdvInitAsc3550/38C0800Driver() function to * perform a SCSI Bus Reset by clearing the BIOS signature word. * The initialization functions assumes a SCSI Bus Reset is not * needed if the BIOS signature word is present. */ AdvReadWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, bios_sig); AdvWriteWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, 0); /* * Stop chip and reset it. */ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_STOP); AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_RESET); mdelay(100); AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_WR_IO_REG); /* * Reset Adv Library error code, if any, and try * re-initializing the chip. */ asc_dvc->err_code = 0; if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { status = AdvInitAsc38C1600Driver(asc_dvc); } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { status = AdvInitAsc38C0800Driver(asc_dvc); } else { status = AdvInitAsc3550Driver(asc_dvc); } /* Translate initialization return value to status value. */ if (status == 0) { status = ADV_TRUE; } else { status = ADV_FALSE; } /* * Restore the BIOS signature word. */ AdvWriteWordLram(iop_base, ASC_MC_BIOS_SIGNATURE, bios_sig); /* * Restore per TID negotiated values. */ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, ppr_able); } AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); for (tid = 0; tid <= ADV_MAX_TID; tid++) { AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid, max_cmd[tid]); } return status; } /* * adv_async_callback() - Adv Library asynchronous event callback function. */ static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code) { switch (code) { case ADV_ASYNC_SCSI_BUS_RESET_DET: /* * The firmware detected a SCSI Bus reset. */ ASC_DBG(0, "ADV_ASYNC_SCSI_BUS_RESET_DET\n"); break; case ADV_ASYNC_RDMA_FAILURE: /* * Handle RDMA failure by resetting the SCSI Bus and * possibly the chip if it is unresponsive. Log the error * with a unique code. */ ASC_DBG(0, "ADV_ASYNC_RDMA_FAILURE\n"); AdvResetChipAndSB(adv_dvc_varp); break; case ADV_HOST_SCSI_BUS_RESET: /* * Host generated SCSI bus reset occurred. */ ASC_DBG(0, "ADV_HOST_SCSI_BUS_RESET\n"); break; default: ASC_DBG(0, "unknown code 0x%x\n", code); break; } } /* * adv_isr_callback() - Second Level Interrupt Handler called by AdvISR(). * * Callback function for the Wide SCSI Adv Library. */ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp) { struct asc_board *boardp; adv_req_t *reqp; adv_sgblk_t *sgblkp; struct scsi_cmnd *scp; struct Scsi_Host *shost; ADV_DCNT resid_cnt; ASC_DBG(1, "adv_dvc_varp 0x%lx, scsiqp 0x%lx\n", (ulong)adv_dvc_varp, (ulong)scsiqp); ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp); /* * Get the adv_req_t structure for the command that has been * completed. The adv_req_t structure actually contains the * completed ADV_SCSI_REQ_Q structure. */ reqp = (adv_req_t *)ADV_U32_TO_VADDR(scsiqp->srb_ptr); ASC_DBG(1, "reqp 0x%lx\n", (ulong)reqp); if (reqp == NULL) { ASC_PRINT("adv_isr_callback: reqp is NULL\n"); return; } /* * Get the struct scsi_cmnd structure and Scsi_Host structure for the * command that has been completed. * * Note: The adv_req_t request structure and adv_sgblk_t structure, * if any, are dropped, because a board structure pointer can not be * determined. */ scp = reqp->cmndp; ASC_DBG(1, "scp 0x%p\n", scp); if (scp == NULL) { ASC_PRINT ("adv_isr_callback: scp is NULL; adv_req_t dropped.\n"); return; } ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len); shost = scp->device->host; ASC_STATS(shost, callback); ASC_DBG(1, "shost 0x%p\n", shost); boardp = shost_priv(shost); BUG_ON(adv_dvc_varp != &boardp->dvc_var.adv_dvc_var); /* * 'done_status' contains the command's ending status. */ switch (scsiqp->done_status) { case QD_NO_ERROR: ASC_DBG(2, "QD_NO_ERROR\n"); scp->result = 0; /* * Check for an underrun condition. * * If there was no error and an underrun condition, then * then return the number of underrun bytes. */ resid_cnt = le32_to_cpu(scsiqp->data_cnt); if (scsi_bufflen(scp) != 0 && resid_cnt != 0 && resid_cnt <= scsi_bufflen(scp)) { ASC_DBG(1, "underrun condition %lu bytes\n", (ulong)resid_cnt); scsi_set_resid(scp, resid_cnt); } break; case QD_WITH_ERROR: ASC_DBG(2, "QD_WITH_ERROR\n"); switch (scsiqp->host_status) { case QHSTA_NO_ERROR: if (scsiqp->scsi_status == SAM_STAT_CHECK_CONDITION) { ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n"); ASC_DBG_PRT_SENSE(2, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE); /* * Note: The 'status_byte()' macro used by * target drivers defined in scsi.h shifts the * status byte returned by host drivers right * by 1 bit. This is why target drivers also * use right shifted status byte definitions. * For instance target drivers use * CHECK_CONDITION, defined to 0x1, instead of * the SCSI defined check condition value of * 0x2. Host drivers are supposed to return * the status byte as it is defined by SCSI. */ scp->result = DRIVER_BYTE(DRIVER_SENSE) | STATUS_BYTE(scsiqp->scsi_status); } else { scp->result = STATUS_BYTE(scsiqp->scsi_status); } break; default: /* Some other QHSTA error occurred. */ ASC_DBG(1, "host_status 0x%x\n", scsiqp->host_status); scp->result = HOST_BYTE(DID_BAD_TARGET); break; } break; case QD_ABORTED_BY_HOST: ASC_DBG(1, "QD_ABORTED_BY_HOST\n"); scp->result = HOST_BYTE(DID_ABORT) | STATUS_BYTE(scsiqp->scsi_status); break; default: ASC_DBG(1, "done_status 0x%x\n", scsiqp->done_status); scp->result = HOST_BYTE(DID_ERROR) | STATUS_BYTE(scsiqp->scsi_status); break; } /* * If the 'init_tidmask' bit isn't already set for the target and the * current request finished normally, then set the bit for the target * to indicate that a device is present. */ if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->device->id)) == 0 && scsiqp->done_status == QD_NO_ERROR && scsiqp->host_status == QHSTA_NO_ERROR) { boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->device->id); } asc_scsi_done(scp); /* * Free all 'adv_sgblk_t' structures allocated for the request. */ while ((sgblkp = reqp->sgblkp) != NULL) { /* Remove 'sgblkp' from the request list. */ reqp->sgblkp = sgblkp->next_sgblkp; /* Add 'sgblkp' to the board free list. */ sgblkp->next_sgblkp = boardp->adv_sgblkp; boardp->adv_sgblkp = sgblkp; } /* * Free the adv_req_t structure used with the command by adding * it back to the board free list. */ reqp->next_reqp = boardp->adv_reqp; boardp->adv_reqp = reqp; ASC_DBG(1, "done\n"); } /* * Adv Library Interrupt Service Routine * * This function is called by a driver's interrupt service routine. * The function disables and re-enables interrupts. * * When a microcode idle command is completed, the ADV_DVC_VAR * 'idle_cmd_done' field is set to ADV_TRUE. * * Note: AdvISR() can be called when interrupts are disabled or even * when there is no hardware interrupt condition present. It will * always check for completed idle commands and microcode requests. * This is an important feature that shouldn't be changed because it * allows commands to be completed from polling mode loops. * * Return: * ADV_TRUE(1) - interrupt was pending * ADV_FALSE(0) - no interrupt was pending */ static int AdvISR(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; uchar int_stat; ushort target_bit; ADV_CARR_T *free_carrp; ADV_VADDR irq_next_vpa; ADV_SCSI_REQ_Q *scsiq; iop_base = asc_dvc->iop_base; /* Reading the register clears the interrupt. */ int_stat = AdvReadByteRegister(iop_base, IOPB_INTR_STATUS_REG); if ((int_stat & (ADV_INTR_STATUS_INTRA | ADV_INTR_STATUS_INTRB | ADV_INTR_STATUS_INTRC)) == 0) { return ADV_FALSE; } /* * Notify the driver of an asynchronous microcode condition by * calling the adv_async_callback function. The function * is passed the microcode ASC_MC_INTRB_CODE byte value. */ if (int_stat & ADV_INTR_STATUS_INTRB) { uchar intrb_code; AdvReadByteLram(iop_base, ASC_MC_INTRB_CODE, intrb_code); if (asc_dvc->chip_type == ADV_CHIP_ASC3550 || asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { if (intrb_code == ADV_ASYNC_CARRIER_READY_FAILURE && asc_dvc->carr_pending_cnt != 0) { AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_A); if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_NOP); } } } adv_async_callback(asc_dvc, intrb_code); } /* * Check if the IRQ stopper carrier contains a completed request. */ while (((irq_next_vpa = le32_to_cpu(asc_dvc->irq_sp->next_vpa)) & ASC_RQ_DONE) != 0) { /* * Get a pointer to the newly completed ADV_SCSI_REQ_Q structure. * The RISC will have set 'areq_vpa' to a virtual address. * * The firmware will have copied the ASC_SCSI_REQ_Q.scsiq_ptr * field to the carrier ADV_CARR_T.areq_vpa field. The conversion * below complements the conversion of ASC_SCSI_REQ_Q.scsiq_ptr' * in AdvExeScsiQueue(). */ scsiq = (ADV_SCSI_REQ_Q *) ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->areq_vpa)); /* * Request finished with good status and the queue was not * DMAed to host memory by the firmware. Set all status fields * to indicate good status. */ if ((irq_next_vpa & ASC_RQ_GOOD) != 0) { scsiq->done_status = QD_NO_ERROR; scsiq->host_status = scsiq->scsi_status = 0; scsiq->data_cnt = 0L; } /* * Advance the stopper pointer to the next carrier * ignoring the lower four bits. Free the previous * stopper carrier. */ free_carrp = asc_dvc->irq_sp; asc_dvc->irq_sp = (ADV_CARR_T *) ADV_U32_TO_VADDR(ASC_GET_CARRP(irq_next_vpa)); free_carrp->next_vpa = cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->carr_freelist)); asc_dvc->carr_freelist = free_carrp; asc_dvc->carr_pending_cnt--; target_bit = ADV_TID_TO_TIDMASK(scsiq->target_id); /* * Clear request microcode control flag. */ scsiq->cntl = 0; /* * Notify the driver of the completed request by passing * the ADV_SCSI_REQ_Q pointer to its callback function. */ scsiq->a_flag |= ADV_SCSIQ_DONE; adv_isr_callback(asc_dvc, scsiq); /* * Note: After the driver callback function is called, 'scsiq' * can no longer be referenced. * * Fall through and continue processing other completed * requests... */ } return ADV_TRUE; } static int AscSetLibErrorCode(ASC_DVC_VAR *asc_dvc, ushort err_code) { if (asc_dvc->err_code == 0) { asc_dvc->err_code = err_code; AscWriteLramWord(asc_dvc->iop_base, ASCV_ASCDVC_ERR_CODE_W, err_code); } return err_code; } static void AscAckInterrupt(PortAddr iop_base) { uchar host_flag; uchar risc_flag; ushort loop; loop = 0; do { risc_flag = AscReadLramByte(iop_base, ASCV_RISC_FLAG_B); if (loop++ > 0x7FFF) { break; } } while ((risc_flag & ASC_RISC_FLAG_GEN_INT) != 0); host_flag = AscReadLramByte(iop_base, ASCV_HOST_FLAG_B) & (~ASC_HOST_FLAG_ACK_INT); AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, (uchar)(host_flag | ASC_HOST_FLAG_ACK_INT)); AscSetChipStatus(iop_base, CIW_INT_ACK); loop = 0; while (AscGetChipStatus(iop_base) & CSW_INT_PENDING) { AscSetChipStatus(iop_base, CIW_INT_ACK); if (loop++ > 3) { break; } } AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag); } static uchar AscGetSynPeriodIndex(ASC_DVC_VAR *asc_dvc, uchar syn_time) { const uchar *period_table; int max_index; int min_index; int i; period_table = asc_dvc->sdtr_period_tbl; max_index = (int)asc_dvc->max_sdtr_index; min_index = (int)asc_dvc->min_sdtr_index; if ((syn_time <= period_table[max_index])) { for (i = min_index; i < (max_index - 1); i++) { if (syn_time <= period_table[i]) { return (uchar)i; } } return (uchar)max_index; } else { return (uchar)(max_index + 1); } } static uchar AscMsgOutSDTR(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar sdtr_offset) { EXT_MSG sdtr_buf; uchar sdtr_period_index; PortAddr iop_base; iop_base = asc_dvc->iop_base; sdtr_buf.msg_type = EXTENDED_MESSAGE; sdtr_buf.msg_len = MS_SDTR_LEN; sdtr_buf.msg_req = EXTENDED_SDTR; sdtr_buf.xfer_period = sdtr_period; sdtr_offset &= ASC_SYN_MAX_OFFSET; sdtr_buf.req_ack_offset = sdtr_offset; sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period); if (sdtr_period_index <= asc_dvc->max_sdtr_index) { AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&sdtr_buf, sizeof(EXT_MSG) >> 1); return ((sdtr_period_index << 4) | sdtr_offset); } else { sdtr_buf.req_ack_offset = 0; AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&sdtr_buf, sizeof(EXT_MSG) >> 1); return 0; } } static uchar AscCalSDTRData(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar syn_offset) { uchar byte; uchar sdtr_period_ix; sdtr_period_ix = AscGetSynPeriodIndex(asc_dvc, sdtr_period); if (sdtr_period_ix > asc_dvc->max_sdtr_index) return 0xFF; byte = (sdtr_period_ix << 4) | (syn_offset & ASC_SYN_MAX_OFFSET); return byte; } static int AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data) { ASC_SCSI_BIT_ID_TYPE org_id; int i; int sta = TRUE; AscSetBank(iop_base, 1); org_id = AscReadChipDvcID(iop_base); for (i = 0; i <= ASC_MAX_TID; i++) { if (org_id == (0x01 << i)) break; } org_id = (ASC_SCSI_BIT_ID_TYPE) i; AscWriteChipDvcID(iop_base, id); if (AscReadChipDvcID(iop_base) == (0x01 << id)) { AscSetBank(iop_base, 0); AscSetChipSyn(iop_base, sdtr_data); if (AscGetChipSyn(iop_base) != sdtr_data) { sta = FALSE; } } else { sta = FALSE; } AscSetBank(iop_base, 1); AscWriteChipDvcID(iop_base, org_id); AscSetBank(iop_base, 0); return (sta); } static void AscSetChipSDTR(PortAddr iop_base, uchar sdtr_data, uchar tid_no) { AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data); AscPutMCodeSDTRDoneAtID(iop_base, tid_no, sdtr_data); } static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) { EXT_MSG ext_msg; EXT_MSG out_msg; ushort halt_q_addr; int sdtr_accept; ushort int_halt_code; ASC_SCSI_BIT_ID_TYPE scsi_busy; ASC_SCSI_BIT_ID_TYPE target_id; PortAddr iop_base; uchar tag_code; uchar q_status; uchar halt_qp; uchar sdtr_data; uchar target_ix; uchar q_cntl, tid_no; uchar cur_dvc_qng; uchar asyn_sdtr; uchar scsi_status; struct asc_board *boardp; BUG_ON(!asc_dvc->drv_ptr); boardp = asc_dvc->drv_ptr; iop_base = asc_dvc->iop_base; int_halt_code = AscReadLramWord(iop_base, ASCV_HALTCODE_W); halt_qp = AscReadLramByte(iop_base, ASCV_CURCDB_B); halt_q_addr = ASC_QNO_TO_QADDR(halt_qp); target_ix = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_TARGET_IX)); q_cntl = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL)); tid_no = ASC_TIX_TO_TID(target_ix); target_id = (uchar)ASC_TID_TO_TARGET_ID(tid_no); if (asc_dvc->pci_fix_asyn_xfer & target_id) { asyn_sdtr = ASYN_SDTR_DATA_FIX_PCI_REV_AB; } else { asyn_sdtr = 0; } if (int_halt_code == ASC_HALT_DISABLE_ASYN_USE_SYN_FIX) { if (asc_dvc->pci_fix_asyn_xfer & target_id) { AscSetChipSDTR(iop_base, 0, tid_no); boardp->sdtr_data[tid_no] = 0; } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (int_halt_code == ASC_HALT_ENABLE_ASYN_USE_SYN_FIX) { if (asc_dvc->pci_fix_asyn_xfer & target_id) { AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); boardp->sdtr_data[tid_no] = asyn_sdtr; } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (int_halt_code == ASC_HALT_EXTMSG_IN) { AscMemWordCopyPtrFromLram(iop_base, ASCV_MSGIN_BEG, (uchar *)&ext_msg, sizeof(EXT_MSG) >> 1); if (ext_msg.msg_type == EXTENDED_MESSAGE && ext_msg.msg_req == EXTENDED_SDTR && ext_msg.msg_len == MS_SDTR_LEN) { sdtr_accept = TRUE; if ((ext_msg.req_ack_offset > ASC_SYN_MAX_OFFSET)) { sdtr_accept = FALSE; ext_msg.req_ack_offset = ASC_SYN_MAX_OFFSET; } if ((ext_msg.xfer_period < asc_dvc->sdtr_period_tbl[asc_dvc->min_sdtr_index]) || (ext_msg.xfer_period > asc_dvc->sdtr_period_tbl[asc_dvc-> max_sdtr_index])) { sdtr_accept = FALSE; ext_msg.xfer_period = asc_dvc->sdtr_period_tbl[asc_dvc-> min_sdtr_index]; } if (sdtr_accept) { sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period, ext_msg.req_ack_offset); if ((sdtr_data == 0xFF)) { q_cntl |= QC_MSG_OUT; asc_dvc->init_sdtr &= ~target_id; asc_dvc->sdtr_done &= ~target_id; AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); boardp->sdtr_data[tid_no] = asyn_sdtr; } } if (ext_msg.req_ack_offset == 0) { q_cntl &= ~QC_MSG_OUT; asc_dvc->init_sdtr &= ~target_id; asc_dvc->sdtr_done &= ~target_id; AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); } else { if (sdtr_accept && (q_cntl & QC_MSG_OUT)) { q_cntl &= ~QC_MSG_OUT; asc_dvc->sdtr_done |= target_id; asc_dvc->init_sdtr |= target_id; asc_dvc->pci_fix_asyn_xfer &= ~target_id; sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period, ext_msg. req_ack_offset); AscSetChipSDTR(iop_base, sdtr_data, tid_no); boardp->sdtr_data[tid_no] = sdtr_data; } else { q_cntl |= QC_MSG_OUT; AscMsgOutSDTR(asc_dvc, ext_msg.xfer_period, ext_msg.req_ack_offset); asc_dvc->pci_fix_asyn_xfer &= ~target_id; sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period, ext_msg. req_ack_offset); AscSetChipSDTR(iop_base, sdtr_data, tid_no); boardp->sdtr_data[tid_no] = sdtr_data; asc_dvc->sdtr_done |= target_id; asc_dvc->init_sdtr |= target_id; } } AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (ext_msg.msg_type == EXTENDED_MESSAGE && ext_msg.msg_req == EXTENDED_WDTR && ext_msg.msg_len == MS_WDTR_LEN) { ext_msg.wdtr_width = 0; AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&ext_msg, sizeof(EXT_MSG) >> 1); q_cntl |= QC_MSG_OUT; AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else { ext_msg.msg_type = MESSAGE_REJECT; AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&ext_msg, sizeof(EXT_MSG) >> 1); q_cntl |= QC_MSG_OUT; AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } } else if (int_halt_code == ASC_HALT_CHK_CONDITION) { q_cntl |= QC_REQ_SENSE; if ((asc_dvc->init_sdtr & target_id) != 0) { asc_dvc->sdtr_done &= ~target_id; sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); q_cntl |= QC_MSG_OUT; AscMsgOutSDTR(asc_dvc, asc_dvc-> sdtr_period_tbl[(sdtr_data >> 4) & (uchar)(asc_dvc-> max_sdtr_index - 1)], (uchar)(sdtr_data & (uchar) ASC_SYN_MAX_OFFSET)); } AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); tag_code = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort) ASC_SCSIQ_B_TAG_CODE)); tag_code &= 0xDC; if ((asc_dvc->pci_fix_asyn_xfer & target_id) && !(asc_dvc->pci_fix_asyn_xfer_always & target_id) ) { tag_code |= (ASC_TAG_FLAG_DISABLE_DISCONNECT | ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX); } AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_TAG_CODE), tag_code); q_status = AscReadLramByte(iop_base, (ushort)(halt_q_addr + (ushort) ASC_SCSIQ_B_STATUS)); q_status |= (QS_READY | QS_BUSY); AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_STATUS), q_status); scsi_busy = AscReadLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B); scsi_busy &= ~target_id; AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (int_halt_code == ASC_HALT_SDTR_REJECTED) { AscMemWordCopyPtrFromLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&out_msg, sizeof(EXT_MSG) >> 1); if ((out_msg.msg_type == EXTENDED_MESSAGE) && (out_msg.msg_len == MS_SDTR_LEN) && (out_msg.msg_req == EXTENDED_SDTR)) { asc_dvc->init_sdtr &= ~target_id; asc_dvc->sdtr_done &= ~target_id; AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); boardp->sdtr_data[tid_no] = asyn_sdtr; } q_cntl &= ~QC_MSG_OUT; AscWriteLramByte(iop_base, (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } else if (int_halt_code == ASC_HALT_SS_QUEUE_FULL) { scsi_status = AscReadLramByte(iop_base, (ushort)((ushort)halt_q_addr + (ushort) ASC_SCSIQ_SCSI_STATUS)); cur_dvc_qng = AscReadLramByte(iop_base, (ushort)((ushort)ASC_QADR_BEG + (ushort)target_ix)); if ((cur_dvc_qng > 0) && (asc_dvc->cur_dvc_qng[tid_no] > 0)) { scsi_busy = AscReadLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B); scsi_busy |= target_id; AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); asc_dvc->queue_full_or_busy |= target_id; if (scsi_status == SAM_STAT_TASK_SET_FULL) { if (cur_dvc_qng > ASC_MIN_TAGGED_CMD) { cur_dvc_qng -= 1; asc_dvc->max_dvc_qng[tid_no] = cur_dvc_qng; AscWriteLramByte(iop_base, (ushort)((ushort) ASCV_MAX_DVC_QNG_BEG + (ushort) tid_no), cur_dvc_qng); /* * Set the device queue depth to the * number of active requests when the * QUEUE FULL condition was encountered. */ boardp->queue_full |= target_id; boardp->queue_full_cnt[tid_no] = cur_dvc_qng; } } } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } #if CC_VERY_LONG_SG_LIST else if (int_halt_code == ASC_HALT_HOST_COPY_SG_LIST_TO_RISC) { uchar q_no; ushort q_addr; uchar sg_wk_q_no; uchar first_sg_wk_q_no; ASC_SCSI_Q *scsiq; /* Ptr to driver request. */ ASC_SG_HEAD *sg_head; /* Ptr to driver SG request. */ ASC_SG_LIST_Q scsi_sg_q; /* Structure written to queue. */ ushort sg_list_dwords; ushort sg_entry_cnt; uchar next_qp; int i; q_no = AscReadLramByte(iop_base, (ushort)ASCV_REQ_SG_LIST_QP); if (q_no == ASC_QLINK_END) return 0; q_addr = ASC_QNO_TO_QADDR(q_no); /* * Convert the request's SRB pointer to a host ASC_SCSI_REQ * structure pointer using a macro provided by the driver. * The ASC_SCSI_REQ pointer provides a pointer to the * host ASC_SG_HEAD structure. */ /* Read request's SRB pointer. */ scsiq = (ASC_SCSI_Q *) ASC_SRB2SCSIQ(ASC_U32_TO_VADDR(AscReadLramDWord(iop_base, (ushort) (q_addr + ASC_SCSIQ_D_SRBPTR)))); /* * Get request's first and working SG queue. */ sg_wk_q_no = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_SG_WK_QP)); first_sg_wk_q_no = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FIRST_SG_WK_QP)); /* * Reset request's working SG queue back to the * first SG queue. */ AscWriteLramByte(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_SG_WK_QP), first_sg_wk_q_no); sg_head = scsiq->sg_head; /* * Set sg_entry_cnt to the number of SG elements * that will be completed on this interrupt. * * Note: The allocated SG queues contain ASC_MAX_SG_LIST - 1 * SG elements. The data_cnt and data_addr fields which * add 1 to the SG element capacity are not used when * restarting SG handling after a halt. */ if (scsiq->remain_sg_entry_cnt > (ASC_MAX_SG_LIST - 1)) { sg_entry_cnt = ASC_MAX_SG_LIST - 1; /* * Keep track of remaining number of SG elements that * will need to be handled on the next interrupt. */ scsiq->remain_sg_entry_cnt -= (ASC_MAX_SG_LIST - 1); } else { sg_entry_cnt = scsiq->remain_sg_entry_cnt; scsiq->remain_sg_entry_cnt = 0; } /* * Copy SG elements into the list of allocated SG queues. * * Last index completed is saved in scsiq->next_sg_index. */ next_qp = first_sg_wk_q_no; q_addr = ASC_QNO_TO_QADDR(next_qp); scsi_sg_q.sg_head_qp = q_no; scsi_sg_q.cntl = QCSG_SG_XFER_LIST; for (i = 0; i < sg_head->queue_cnt; i++) { scsi_sg_q.seq_no = i + 1; if (sg_entry_cnt > ASC_SG_LIST_PER_Q) { sg_list_dwords = (uchar)(ASC_SG_LIST_PER_Q * 2); sg_entry_cnt -= ASC_SG_LIST_PER_Q; /* * After very first SG queue RISC FW uses next * SG queue first element then checks sg_list_cnt * against zero and then decrements, so set * sg_list_cnt 1 less than number of SG elements * in each SG queue. */ scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q - 1; scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q - 1; } else { /* * This is the last SG queue in the list of * allocated SG queues. If there are more * SG elements than will fit in the allocated * queues, then set the QCSG_SG_XFER_MORE flag. */ if (scsiq->remain_sg_entry_cnt != 0) { scsi_sg_q.cntl |= QCSG_SG_XFER_MORE; } else { scsi_sg_q.cntl |= QCSG_SG_XFER_END; } /* equals sg_entry_cnt * 2 */ sg_list_dwords = sg_entry_cnt << 1; scsi_sg_q.sg_list_cnt = sg_entry_cnt - 1; scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt - 1; sg_entry_cnt = 0; } scsi_sg_q.q_no = next_qp; AscMemWordCopyPtrToLram(iop_base, q_addr + ASC_SCSIQ_SGHD_CPY_BEG, (uchar *)&scsi_sg_q, sizeof(ASC_SG_LIST_Q) >> 1); AscMemDWordCopyPtrToLram(iop_base, q_addr + ASC_SGQ_LIST_BEG, (uchar *)&sg_head-> sg_list[scsiq->next_sg_index], sg_list_dwords); scsiq->next_sg_index += ASC_SG_LIST_PER_Q; /* * If the just completed SG queue contained the * last SG element, then no more SG queues need * to be written. */ if (scsi_sg_q.cntl & QCSG_SG_XFER_END) { break; } next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FWD)); q_addr = ASC_QNO_TO_QADDR(next_qp); } /* * Clear the halt condition so the RISC will be restarted * after the return. */ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); return (0); } #endif /* CC_VERY_LONG_SG_LIST */ return (0); } /* * void * DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words) * * Calling/Exit State: * none * * Description: * Input an ASC_QDONE_INFO structure from the chip */ static void DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words) { int i; ushort word; AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { if (i == 10) { continue; } word = inpw(iop_base + IOP_RAM_DATA); inbuf[i] = word & 0xff; inbuf[i + 1] = (word >> 8) & 0xff; } ASC_DBG_PRT_HEX(2, "DvcGetQinfo", inbuf, 2 * words); } static uchar _AscCopyLramScsiDoneQ(PortAddr iop_base, ushort q_addr, ASC_QDONE_INFO *scsiq, ASC_DCNT max_dma_count) { ushort _val; uchar sg_queue_cnt; DvcGetQinfo(iop_base, q_addr + ASC_SCSIQ_DONE_INFO_BEG, (uchar *)scsiq, (sizeof(ASC_SCSIQ_2) + sizeof(ASC_SCSIQ_3)) / 2); _val = AscReadLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS)); scsiq->q_status = (uchar)_val; scsiq->q_no = (uchar)(_val >> 8); _val = AscReadLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_CNTL)); scsiq->cntl = (uchar)_val; sg_queue_cnt = (uchar)(_val >> 8); _val = AscReadLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_SENSE_LEN)); scsiq->sense_len = (uchar)_val; scsiq->extra_bytes = (uchar)(_val >> 8); /* * Read high word of remain bytes from alternate location. */ scsiq->remain_bytes = (((ADV_DCNT)AscReadLramWord(iop_base, (ushort)(q_addr + (ushort) ASC_SCSIQ_W_ALT_DC1))) << 16); /* * Read low word of remain bytes from original location. */ scsiq->remain_bytes += AscReadLramWord(iop_base, (ushort)(q_addr + (ushort) ASC_SCSIQ_DW_REMAIN_XFER_CNT)); scsiq->remain_bytes &= max_dma_count; return sg_queue_cnt; } /* * asc_isr_callback() - Second Level Interrupt Handler called by AscISR(). * * Interrupt callback function for the Narrow SCSI Asc Library. */ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep) { struct asc_board *boardp; struct scsi_cmnd *scp; struct Scsi_Host *shost; ASC_DBG(1, "asc_dvc_varp 0x%p, qdonep 0x%p\n", asc_dvc_varp, qdonep); ASC_DBG_PRT_ASC_QDONE_INFO(2, qdonep); scp = advansys_srb_to_ptr(asc_dvc_varp, qdonep->d2.srb_ptr); if (!scp) return; ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len); shost = scp->device->host; ASC_STATS(shost, callback); ASC_DBG(1, "shost 0x%p\n", shost); boardp = shost_priv(shost); BUG_ON(asc_dvc_varp != &boardp->dvc_var.asc_dvc_var); dma_unmap_single(boardp->dev, scp->SCp.dma_handle, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); /* * 'qdonep' contains the command's ending status. */ switch (qdonep->d3.done_stat) { case QD_NO_ERROR: ASC_DBG(2, "QD_NO_ERROR\n"); scp->result = 0; /* * Check for an underrun condition. * * If there was no error and an underrun condition, then * return the number of underrun bytes. */ if (scsi_bufflen(scp) != 0 && qdonep->remain_bytes != 0 && qdonep->remain_bytes <= scsi_bufflen(scp)) { ASC_DBG(1, "underrun condition %u bytes\n", (unsigned)qdonep->remain_bytes); scsi_set_resid(scp, qdonep->remain_bytes); } break; case QD_WITH_ERROR: ASC_DBG(2, "QD_WITH_ERROR\n"); switch (qdonep->d3.host_stat) { case QHSTA_NO_ERROR: if (qdonep->d3.scsi_stat == SAM_STAT_CHECK_CONDITION) { ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n"); ASC_DBG_PRT_SENSE(2, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE); /* * Note: The 'status_byte()' macro used by * target drivers defined in scsi.h shifts the * status byte returned by host drivers right * by 1 bit. This is why target drivers also * use right shifted status byte definitions. * For instance target drivers use * CHECK_CONDITION, defined to 0x1, instead of * the SCSI defined check condition value of * 0x2. Host drivers are supposed to return * the status byte as it is defined by SCSI. */ scp->result = DRIVER_BYTE(DRIVER_SENSE) | STATUS_BYTE(qdonep->d3.scsi_stat); } else { scp->result = STATUS_BYTE(qdonep->d3.scsi_stat); } break; default: /* QHSTA error occurred */ ASC_DBG(1, "host_stat 0x%x\n", qdonep->d3.host_stat); scp->result = HOST_BYTE(DID_BAD_TARGET); break; } break; case QD_ABORTED_BY_HOST: ASC_DBG(1, "QD_ABORTED_BY_HOST\n"); scp->result = HOST_BYTE(DID_ABORT) | MSG_BYTE(qdonep->d3. scsi_msg) | STATUS_BYTE(qdonep->d3.scsi_stat); break; default: ASC_DBG(1, "done_stat 0x%x\n", qdonep->d3.done_stat); scp->result = HOST_BYTE(DID_ERROR) | MSG_BYTE(qdonep->d3. scsi_msg) | STATUS_BYTE(qdonep->d3.scsi_stat); break; } /* * If the 'init_tidmask' bit isn't already set for the target and the * current request finished normally, then set the bit for the target * to indicate that a device is present. */ if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->device->id)) == 0 && qdonep->d3.done_stat == QD_NO_ERROR && qdonep->d3.host_stat == QHSTA_NO_ERROR) { boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->device->id); } asc_scsi_done(scp); } static int AscIsrQDone(ASC_DVC_VAR *asc_dvc) { uchar next_qp; uchar n_q_used; uchar sg_list_qp; uchar sg_queue_cnt; uchar q_cnt; uchar done_q_tail; uchar tid_no; ASC_SCSI_BIT_ID_TYPE scsi_busy; ASC_SCSI_BIT_ID_TYPE target_id; PortAddr iop_base; ushort q_addr; ushort sg_q_addr; uchar cur_target_qng; ASC_QDONE_INFO scsiq_buf; ASC_QDONE_INFO *scsiq; int false_overrun; iop_base = asc_dvc->iop_base; n_q_used = 1; scsiq = (ASC_QDONE_INFO *)&scsiq_buf; done_q_tail = (uchar)AscGetVarDoneQTail(iop_base); q_addr = ASC_QNO_TO_QADDR(done_q_tail); next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_FWD)); if (next_qp != ASC_QLINK_END) { AscPutVarDoneQTail(iop_base, next_qp); q_addr = ASC_QNO_TO_QADDR(next_qp); sg_queue_cnt = _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq, asc_dvc->max_dma_count); AscWriteLramByte(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS), (uchar)(scsiq-> q_status & (uchar)~(QS_READY | QS_ABORTED))); tid_no = ASC_TIX_TO_TID(scsiq->d2.target_ix); target_id = ASC_TIX_TO_TARGET_ID(scsiq->d2.target_ix); if ((scsiq->cntl & QC_SG_HEAD) != 0) { sg_q_addr = q_addr; sg_list_qp = next_qp; for (q_cnt = 0; q_cnt < sg_queue_cnt; q_cnt++) { sg_list_qp = AscReadLramByte(iop_base, (ushort)(sg_q_addr + (ushort) ASC_SCSIQ_B_FWD)); sg_q_addr = ASC_QNO_TO_QADDR(sg_list_qp); if (sg_list_qp == ASC_QLINK_END) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_SG_Q_LINKS); scsiq->d3.done_stat = QD_WITH_ERROR; scsiq->d3.host_stat = QHSTA_D_QDONE_SG_LIST_CORRUPTED; goto FATAL_ERR_QDONE; } AscWriteLramByte(iop_base, (ushort)(sg_q_addr + (ushort) ASC_SCSIQ_B_STATUS), QS_FREE); } n_q_used = sg_queue_cnt + 1; AscPutVarDoneQTail(iop_base, sg_list_qp); } if (asc_dvc->queue_full_or_busy & target_id) { cur_target_qng = AscReadLramByte(iop_base, (ushort)((ushort) ASC_QADR_BEG + (ushort) scsiq->d2. target_ix)); if (cur_target_qng < asc_dvc->max_dvc_qng[tid_no]) { scsi_busy = AscReadLramByte(iop_base, (ushort) ASCV_SCSIBUSY_B); scsi_busy &= ~target_id; AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); asc_dvc->queue_full_or_busy &= ~target_id; } } if (asc_dvc->cur_total_qng >= n_q_used) { asc_dvc->cur_total_qng -= n_q_used; if (asc_dvc->cur_dvc_qng[tid_no] != 0) { asc_dvc->cur_dvc_qng[tid_no]--; } } else { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CUR_QNG); scsiq->d3.done_stat = QD_WITH_ERROR; goto FATAL_ERR_QDONE; } if ((scsiq->d2.srb_ptr == 0UL) || ((scsiq->q_status & QS_ABORTED) != 0)) { return (0x11); } else if (scsiq->q_status == QS_DONE) { false_overrun = FALSE; if (scsiq->extra_bytes != 0) { scsiq->remain_bytes += (ADV_DCNT)scsiq->extra_bytes; } if (scsiq->d3.done_stat == QD_WITH_ERROR) { if (scsiq->d3.host_stat == QHSTA_M_DATA_OVER_RUN) { if ((scsiq-> cntl & (QC_DATA_IN | QC_DATA_OUT)) == 0) { scsiq->d3.done_stat = QD_NO_ERROR; scsiq->d3.host_stat = QHSTA_NO_ERROR; } else if (false_overrun) { scsiq->d3.done_stat = QD_NO_ERROR; scsiq->d3.host_stat = QHSTA_NO_ERROR; } } else if (scsiq->d3.host_stat == QHSTA_M_HUNG_REQ_SCSI_BUS_RESET) { AscStopChip(iop_base); AscSetChipControl(iop_base, (uchar)(CC_SCSI_RESET | CC_HALT)); udelay(60); AscSetChipControl(iop_base, CC_HALT); AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT); AscSetChipStatus(iop_base, 0); AscSetChipControl(iop_base, 0); } } if ((scsiq->cntl & QC_NO_CALLBACK) == 0) { asc_isr_callback(asc_dvc, scsiq); } else { if ((AscReadLramByte(iop_base, (ushort)(q_addr + (ushort) ASC_SCSIQ_CDB_BEG)) == START_STOP)) { asc_dvc->unit_not_ready &= ~target_id; if (scsiq->d3.done_stat != QD_NO_ERROR) { asc_dvc->start_motor &= ~target_id; } } } return (1); } else { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_Q_STATUS); FATAL_ERR_QDONE: if ((scsiq->cntl & QC_NO_CALLBACK) == 0) { asc_isr_callback(asc_dvc, scsiq); } return (0x80); } } return (0); } static int AscISR(ASC_DVC_VAR *asc_dvc) { ASC_CS_TYPE chipstat; PortAddr iop_base; ushort saved_ram_addr; uchar ctrl_reg; uchar saved_ctrl_reg; int int_pending; int status; uchar host_flag; iop_base = asc_dvc->iop_base; int_pending = FALSE; if (AscIsIntPending(iop_base) == 0) return int_pending; if ((asc_dvc->init_state & ASC_INIT_STATE_END_LOAD_MC) == 0) { return ERR; } if (asc_dvc->in_critical_cnt != 0) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_ON_CRITICAL); return ERR; } if (asc_dvc->is_in_int) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_RE_ENTRY); return ERR; } asc_dvc->is_in_int = TRUE; ctrl_reg = AscGetChipControl(iop_base); saved_ctrl_reg = ctrl_reg & (~(CC_SCSI_RESET | CC_CHIP_RESET | CC_SINGLE_STEP | CC_DIAG | CC_TEST)); chipstat = AscGetChipStatus(iop_base); if (chipstat & CSW_SCSI_RESET_LATCH) { if (!(asc_dvc->bus_type & (ASC_IS_VL | ASC_IS_EISA))) { int i = 10; int_pending = TRUE; asc_dvc->sdtr_done = 0; saved_ctrl_reg &= (uchar)(~CC_HALT); while ((AscGetChipStatus(iop_base) & CSW_SCSI_RESET_ACTIVE) && (i-- > 0)) { mdelay(100); } AscSetChipControl(iop_base, (CC_CHIP_RESET | CC_HALT)); AscSetChipControl(iop_base, CC_HALT); AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT); AscSetChipStatus(iop_base, 0); chipstat = AscGetChipStatus(iop_base); } } saved_ram_addr = AscGetChipLramAddr(iop_base); host_flag = AscReadLramByte(iop_base, ASCV_HOST_FLAG_B) & (uchar)(~ASC_HOST_FLAG_IN_ISR); AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, (uchar)(host_flag | (uchar)ASC_HOST_FLAG_IN_ISR)); if ((chipstat & CSW_INT_PENDING) || (int_pending)) { AscAckInterrupt(iop_base); int_pending = TRUE; if ((chipstat & CSW_HALTED) && (ctrl_reg & CC_SINGLE_STEP)) { if (AscIsrChipHalted(asc_dvc) == ERR) { goto ISR_REPORT_QDONE_FATAL_ERROR; } else { saved_ctrl_reg &= (uchar)(~CC_HALT); } } else { ISR_REPORT_QDONE_FATAL_ERROR: if ((asc_dvc->dvc_cntl & ASC_CNTL_INT_MULTI_Q) != 0) { while (((status = AscIsrQDone(asc_dvc)) & 0x01) != 0) { } } else { do { if ((status = AscIsrQDone(asc_dvc)) == 1) { break; } } while (status == 0x11); } if ((status & 0x80) != 0) int_pending = ERR; } } AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag); AscSetChipLramAddr(iop_base, saved_ram_addr); AscSetChipControl(iop_base, saved_ctrl_reg); asc_dvc->is_in_int = FALSE; return int_pending; } /* * advansys_reset() * * Reset the bus associated with the command 'scp'. * * This function runs its own thread. Interrupts must be blocked but * sleeping is allowed and no locking other than for host structures is * required. Returns SUCCESS or FAILED. */ static int advansys_reset(struct scsi_cmnd *scp) { struct Scsi_Host *shost = scp->device->host; struct asc_board *boardp = shost_priv(shost); unsigned long flags; int status; int ret = SUCCESS; ASC_DBG(1, "0x%p\n", scp); ASC_STATS(shost, reset); scmd_printk(KERN_INFO, scp, "SCSI bus reset started...\n"); if (ASC_NARROW_BOARD(boardp)) { ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var; /* Reset the chip and SCSI bus. */ ASC_DBG(1, "before AscInitAsc1000Driver()\n"); status = AscInitAsc1000Driver(asc_dvc); /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */ if (asc_dvc->err_code || !asc_dvc->overrun_dma) { scmd_printk(KERN_INFO, scp, "SCSI bus reset error: " "0x%x, status: 0x%x\n", asc_dvc->err_code, status); ret = FAILED; } else if (status) { scmd_printk(KERN_INFO, scp, "SCSI bus reset warning: " "0x%x\n", status); } else { scmd_printk(KERN_INFO, scp, "SCSI bus reset " "successful\n"); } ASC_DBG(1, "after AscInitAsc1000Driver()\n"); spin_lock_irqsave(shost->host_lock, flags); } else { /* * If the suggest reset bus flags are set, then reset the bus. * Otherwise only reset the device. */ ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var; /* * Reset the target's SCSI bus. */ ASC_DBG(1, "before AdvResetChipAndSB()\n"); switch (AdvResetChipAndSB(adv_dvc)) { case ASC_TRUE: scmd_printk(KERN_INFO, scp, "SCSI bus reset " "successful\n"); break; case ASC_FALSE: default: scmd_printk(KERN_INFO, scp, "SCSI bus reset error\n"); ret = FAILED; break; } spin_lock_irqsave(shost->host_lock, flags); AdvISR(adv_dvc); } /* Save the time of the most recently completed reset. */ boardp->last_reset = jiffies; spin_unlock_irqrestore(shost->host_lock, flags); ASC_DBG(1, "ret %d\n", ret); return ret; } /* * advansys_biosparam() * * Translate disk drive geometry if the "BIOS greater than 1 GB" * support is enabled for a drive. * * ip (information pointer) is an int array with the following definition: * ip[0]: heads * ip[1]: sectors * ip[2]: cylinders */ static int advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int ip[]) { struct asc_board *boardp = shost_priv(sdev->host); ASC_DBG(1, "begin\n"); ASC_STATS(sdev->host, biosparam); if (ASC_NARROW_BOARD(boardp)) { if ((boardp->dvc_var.asc_dvc_var.dvc_cntl & ASC_CNTL_BIOS_GT_1GB) && capacity > 0x200000) { ip[0] = 255; ip[1] = 63; } else { ip[0] = 64; ip[1] = 32; } } else { if ((boardp->dvc_var.adv_dvc_var.bios_ctrl & BIOS_CTRL_EXTENDED_XLAT) && capacity > 0x200000) { ip[0] = 255; ip[1] = 63; } else { ip[0] = 64; ip[1] = 32; } } ip[2] = (unsigned long)capacity / (ip[0] * ip[1]); ASC_DBG(1, "end\n"); return 0; } /* * First-level interrupt handler. * * 'dev_id' is a pointer to the interrupting adapter's Scsi_Host. */ static irqreturn_t advansys_interrupt(int irq, void *dev_id) { struct Scsi_Host *shost = dev_id; struct asc_board *boardp = shost_priv(shost); irqreturn_t result = IRQ_NONE; ASC_DBG(2, "boardp 0x%p\n", boardp); spin_lock(shost->host_lock); if (ASC_NARROW_BOARD(boardp)) { if (AscIsIntPending(shost->io_port)) { result = IRQ_HANDLED; ASC_STATS(shost, interrupt); ASC_DBG(1, "before AscISR()\n"); AscISR(&boardp->dvc_var.asc_dvc_var); } } else { ASC_DBG(1, "before AdvISR()\n"); if (AdvISR(&boardp->dvc_var.adv_dvc_var)) { result = IRQ_HANDLED; ASC_STATS(shost, interrupt); } } spin_unlock(shost->host_lock); ASC_DBG(1, "end\n"); return result; } static int AscHostReqRiscHalt(PortAddr iop_base) { int count = 0; int sta = 0; uchar saved_stop_code; if (AscIsChipHalted(iop_base)) return (1); saved_stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B); AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, ASC_STOP_HOST_REQ_RISC_HALT | ASC_STOP_REQ_RISC_STOP); do { if (AscIsChipHalted(iop_base)) { sta = 1; break; } mdelay(100); } while (count++ < 20); AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, saved_stop_code); return (sta); } static int AscSetRunChipSynRegAtID(PortAddr iop_base, uchar tid_no, uchar sdtr_data) { int sta = FALSE; if (AscHostReqRiscHalt(iop_base)) { sta = AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data); AscStartChip(iop_base); } return sta; } static void AscAsyncFix(ASC_DVC_VAR *asc_dvc, struct scsi_device *sdev) { char type = sdev->type; ASC_SCSI_BIT_ID_TYPE tid_bits = 1 << sdev->id; if (!(asc_dvc->bug_fix_cntl & ASC_BUG_FIX_ASYN_USE_SYN)) return; if (asc_dvc->init_sdtr & tid_bits) return; if ((type == TYPE_ROM) && (strncmp(sdev->vendor, "HP ", 3) == 0)) asc_dvc->pci_fix_asyn_xfer_always |= tid_bits; asc_dvc->pci_fix_asyn_xfer |= tid_bits; if ((type == TYPE_PROCESSOR) || (type == TYPE_SCANNER) || (type == TYPE_ROM) || (type == TYPE_TAPE)) asc_dvc->pci_fix_asyn_xfer &= ~tid_bits; if (asc_dvc->pci_fix_asyn_xfer & tid_bits) AscSetRunChipSynRegAtID(asc_dvc->iop_base, sdev->id, ASYN_SDTR_DATA_FIX_PCI_REV_AB); } static void advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc) { ASC_SCSI_BIT_ID_TYPE tid_bit = 1 << sdev->id; ASC_SCSI_BIT_ID_TYPE orig_use_tagged_qng = asc_dvc->use_tagged_qng; if (sdev->lun == 0) { ASC_SCSI_BIT_ID_TYPE orig_init_sdtr = asc_dvc->init_sdtr; if ((asc_dvc->cfg->sdtr_enable & tid_bit) && sdev->sdtr) { asc_dvc->init_sdtr |= tid_bit; } else { asc_dvc->init_sdtr &= ~tid_bit; } if (orig_init_sdtr != asc_dvc->init_sdtr) AscAsyncFix(asc_dvc, sdev); } if (sdev->tagged_supported) { if (asc_dvc->cfg->cmd_qng_enabled & tid_bit) { if (sdev->lun == 0) { asc_dvc->cfg->can_tagged_qng |= tid_bit; asc_dvc->use_tagged_qng |= tid_bit; } scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, asc_dvc->max_dvc_qng[sdev->id]); } } else { if (sdev->lun == 0) { asc_dvc->cfg->can_tagged_qng &= ~tid_bit; asc_dvc->use_tagged_qng &= ~tid_bit; } scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); } if ((sdev->lun == 0) && (orig_use_tagged_qng != asc_dvc->use_tagged_qng)) { AscWriteLramByte(asc_dvc->iop_base, ASCV_DISC_ENABLE_B, asc_dvc->cfg->disc_enable); AscWriteLramByte(asc_dvc->iop_base, ASCV_USE_TAGGED_QNG_B, asc_dvc->use_tagged_qng); AscWriteLramByte(asc_dvc->iop_base, ASCV_CAN_TAGGED_QNG_B, asc_dvc->cfg->can_tagged_qng); asc_dvc->max_dvc_qng[sdev->id] = asc_dvc->cfg->max_tag_qng[sdev->id]; AscWriteLramByte(asc_dvc->iop_base, (ushort)(ASCV_MAX_DVC_QNG_BEG + sdev->id), asc_dvc->max_dvc_qng[sdev->id]); } } /* * Wide Transfers * * If the EEPROM enabled WDTR for the device and the device supports wide * bus (16 bit) transfers, then turn on the device's 'wdtr_able' bit and * write the new value to the microcode. */ static void advansys_wide_enable_wdtr(AdvPortAddr iop_base, unsigned short tidmask) { unsigned short cfg_word; AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word); if ((cfg_word & tidmask) != 0) return; cfg_word |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word); /* * Clear the microcode SDTR and WDTR negotiation done indicators for * the target to cause it to negotiate with the new setting set above. * WDTR when accepted causes the target to enter asynchronous mode, so * SDTR must be negotiated. */ AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); cfg_word &= ~tidmask; AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word); cfg_word &= ~tidmask; AdvWriteWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word); } /* * Synchronous Transfers * * If the EEPROM enabled SDTR for the device and the device * supports synchronous transfers, then turn on the device's * 'sdtr_able' bit. Write the new value to the microcode. */ static void advansys_wide_enable_sdtr(AdvPortAddr iop_base, unsigned short tidmask) { unsigned short cfg_word; AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word); if ((cfg_word & tidmask) != 0) return; cfg_word |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word); /* * Clear the microcode "SDTR negotiation" done indicator for the * target to cause it to negotiate with the new setting set above. */ AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); cfg_word &= ~tidmask; AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word); } /* * PPR (Parallel Protocol Request) Capable * * If the device supports DT mode, then it must be PPR capable. * The PPR message will be used in place of the SDTR and WDTR * messages to negotiate synchronous speed and offset, transfer * width, and protocol options. */ static void advansys_wide_enable_ppr(ADV_DVC_VAR *adv_dvc, AdvPortAddr iop_base, unsigned short tidmask) { AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, adv_dvc->ppr_able); adv_dvc->ppr_able |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, adv_dvc->ppr_able); } static void advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc) { AdvPortAddr iop_base = adv_dvc->iop_base; unsigned short tidmask = 1 << sdev->id; if (sdev->lun == 0) { /* * Handle WDTR, SDTR, and Tag Queuing. If the feature * is enabled in the EEPROM and the device supports the * feature, then enable it in the microcode. */ if ((adv_dvc->wdtr_able & tidmask) && sdev->wdtr) advansys_wide_enable_wdtr(iop_base, tidmask); if ((adv_dvc->sdtr_able & tidmask) && sdev->sdtr) advansys_wide_enable_sdtr(iop_base, tidmask); if (adv_dvc->chip_type == ADV_CHIP_ASC38C1600 && sdev->ppr) advansys_wide_enable_ppr(adv_dvc, iop_base, tidmask); /* * Tag Queuing is disabled for the BIOS which runs in polled * mode and would see no benefit from Tag Queuing. Also by * disabling Tag Queuing in the BIOS devices with Tag Queuing * bugs will at least work with the BIOS. */ if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) { unsigned short cfg_word; AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word); cfg_word |= tidmask; AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word); AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + sdev->id, adv_dvc->max_dvc_qng); } } if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) { scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, adv_dvc->max_dvc_qng); } else { scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); } } /* * Set the number of commands to queue per device for the * specified host adapter. */ static int advansys_slave_configure(struct scsi_device *sdev) { struct asc_board *boardp = shost_priv(sdev->host); if (ASC_NARROW_BOARD(boardp)) advansys_narrow_slave_configure(sdev, &boardp->dvc_var.asc_dvc_var); else advansys_wide_slave_configure(sdev, &boardp->dvc_var.adv_dvc_var); return 0; } static __le32 advansys_get_sense_buffer_dma(struct scsi_cmnd *scp) { struct asc_board *board = shost_priv(scp->device->host); scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); dma_cache_sync(board->dev, scp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); return cpu_to_le32(scp->SCp.dma_handle); } static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, struct asc_scsi_q *asc_scsi_q) { struct asc_dvc_var *asc_dvc = &boardp->dvc_var.asc_dvc_var; int use_sg; memset(asc_scsi_q, 0, sizeof(*asc_scsi_q)); /* * Point the ASC_SCSI_Q to the 'struct scsi_cmnd'. */ asc_scsi_q->q2.srb_ptr = advansys_ptr_to_srb(asc_dvc, scp); if (asc_scsi_q->q2.srb_ptr == BAD_SRB) { scp->result = HOST_BYTE(DID_SOFT_ERROR); return ASC_ERROR; } /* * Build the ASC_SCSI_Q request. */ asc_scsi_q->cdbptr = &scp->cmnd[0]; asc_scsi_q->q2.cdb_len = scp->cmd_len; asc_scsi_q->q1.target_id = ASC_TID_TO_TARGET_ID(scp->device->id); asc_scsi_q->q1.target_lun = scp->device->lun; asc_scsi_q->q2.target_ix = ASC_TIDLUN_TO_IX(scp->device->id, scp->device->lun); asc_scsi_q->q1.sense_addr = advansys_get_sense_buffer_dma(scp); asc_scsi_q->q1.sense_len = SCSI_SENSE_BUFFERSIZE; /* * If there are any outstanding requests for the current target, * then every 255th request send an ORDERED request. This heuristic * tries to retain the benefit of request sorting while preventing * request starvation. 255 is the max number of tags or pending commands * a device may have outstanding. * * The request count is incremented below for every successfully * started request. * */ if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) && (boardp->reqcnt[scp->device->id] % 255) == 0) { asc_scsi_q->q2.tag_code = MSG_ORDERED_TAG; } else { asc_scsi_q->q2.tag_code = MSG_SIMPLE_TAG; } /* Build ASC_SCSI_Q */ use_sg = scsi_dma_map(scp); if (use_sg != 0) { int sgcnt; struct scatterlist *slp; struct asc_sg_head *asc_sg_head; if (use_sg > scp->device->host->sg_tablesize) { scmd_printk(KERN_ERR, scp, "use_sg %d > " "sg_tablesize %d\n", use_sg, scp->device->host->sg_tablesize); scsi_dma_unmap(scp); scp->result = HOST_BYTE(DID_ERROR); return ASC_ERROR; } asc_sg_head = kzalloc(sizeof(asc_scsi_q->sg_head) + use_sg * sizeof(struct asc_sg_list), GFP_ATOMIC); if (!asc_sg_head) { scsi_dma_unmap(scp); scp->result = HOST_BYTE(DID_SOFT_ERROR); return ASC_ERROR; } asc_scsi_q->q1.cntl |= QC_SG_HEAD; asc_scsi_q->sg_head = asc_sg_head; asc_scsi_q->q1.data_cnt = 0; asc_scsi_q->q1.data_addr = 0; /* This is a byte value, otherwise it would need to be swapped. */ asc_sg_head->entry_cnt = asc_scsi_q->q1.sg_queue_cnt = use_sg; ASC_STATS_ADD(scp->device->host, xfer_elem, asc_sg_head->entry_cnt); /* * Convert scatter-gather list into ASC_SG_HEAD list. */ scsi_for_each_sg(scp, slp, use_sg, sgcnt) { asc_sg_head->sg_list[sgcnt].addr = cpu_to_le32(sg_dma_address(slp)); asc_sg_head->sg_list[sgcnt].bytes = cpu_to_le32(sg_dma_len(slp)); ASC_STATS_ADD(scp->device->host, xfer_sect, DIV_ROUND_UP(sg_dma_len(slp), 512)); } } ASC_STATS(scp->device->host, xfer_cnt); ASC_DBG_PRT_ASC_SCSI_Q(2, asc_scsi_q); ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len); return ASC_NOERROR; } /* * Build scatter-gather list for Adv Library (Wide Board). * * Additional ADV_SG_BLOCK structures will need to be allocated * if the total number of scatter-gather elements exceeds * NO_OF_SG_PER_BLOCK (15). The ADV_SG_BLOCK structures are * assumed to be physically contiguous. * * Return: * ADV_SUCCESS(1) - SG List successfully created * ADV_ERROR(-1) - SG List creation failed */ static int adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp, int use_sg) { adv_sgblk_t *sgblkp; ADV_SCSI_REQ_Q *scsiqp; struct scatterlist *slp; int sg_elem_cnt; ADV_SG_BLOCK *sg_block, *prev_sg_block; ADV_PADDR sg_block_paddr; int i; scsiqp = (ADV_SCSI_REQ_Q *)ADV_32BALIGN(&reqp->scsi_req_q); slp = scsi_sglist(scp); sg_elem_cnt = use_sg; prev_sg_block = NULL; reqp->sgblkp = NULL; for (;;) { /* * Allocate a 'adv_sgblk_t' structure from the board free * list. One 'adv_sgblk_t' structure holds NO_OF_SG_PER_BLOCK * (15) scatter-gather elements. */ if ((sgblkp = boardp->adv_sgblkp) == NULL) { ASC_DBG(1, "no free adv_sgblk_t\n"); ASC_STATS(scp->device->host, adv_build_nosg); /* * Allocation failed. Free 'adv_sgblk_t' structures * already allocated for the request. */ while ((sgblkp = reqp->sgblkp) != NULL) { /* Remove 'sgblkp' from the request list. */ reqp->sgblkp = sgblkp->next_sgblkp; /* Add 'sgblkp' to the board free list. */ sgblkp->next_sgblkp = boardp->adv_sgblkp; boardp->adv_sgblkp = sgblkp; } return ASC_BUSY; } /* Complete 'adv_sgblk_t' board allocation. */ boardp->adv_sgblkp = sgblkp->next_sgblkp; sgblkp->next_sgblkp = NULL; /* * Get 8 byte aligned virtual and physical addresses * for the allocated ADV_SG_BLOCK structure. */ sg_block = (ADV_SG_BLOCK *)ADV_8BALIGN(&sgblkp->sg_block); sg_block_paddr = virt_to_bus(sg_block); /* * Check if this is the first 'adv_sgblk_t' for the * request. */ if (reqp->sgblkp == NULL) { /* Request's first scatter-gather block. */ reqp->sgblkp = sgblkp; /* * Set ADV_SCSI_REQ_T ADV_SG_BLOCK virtual and physical * address pointers. */ scsiqp->sg_list_ptr = sg_block; scsiqp->sg_real_addr = cpu_to_le32(sg_block_paddr); } else { /* Request's second or later scatter-gather block. */ sgblkp->next_sgblkp = reqp->sgblkp; reqp->sgblkp = sgblkp; /* * Point the previous ADV_SG_BLOCK structure to * the newly allocated ADV_SG_BLOCK structure. */ prev_sg_block->sg_ptr = cpu_to_le32(sg_block_paddr); } for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) { sg_block->sg_list[i].sg_addr = cpu_to_le32(sg_dma_address(slp)); sg_block->sg_list[i].sg_count = cpu_to_le32(sg_dma_len(slp)); ASC_STATS_ADD(scp->device->host, xfer_sect, DIV_ROUND_UP(sg_dma_len(slp), 512)); if (--sg_elem_cnt == 0) { /* Last ADV_SG_BLOCK and scatter-gather entry. */ sg_block->sg_cnt = i + 1; sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */ return ADV_SUCCESS; } slp++; } sg_block->sg_cnt = NO_OF_SG_PER_BLOCK; prev_sg_block = sg_block; } } /* * Build a request structure for the Adv Library (Wide Board). * * If an adv_req_t can not be allocated to issue the request, * then return ASC_BUSY. If an error occurs, then return ASC_ERROR. * * Multi-byte fields in the ASC_SCSI_REQ_Q that are used by the * microcode for DMA addresses or math operations are byte swapped * to little-endian order. */ static int adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, ADV_SCSI_REQ_Q **adv_scsiqpp) { adv_req_t *reqp; ADV_SCSI_REQ_Q *scsiqp; int i; int ret; int use_sg; /* * Allocate an adv_req_t structure from the board to execute * the command. */ if (boardp->adv_reqp == NULL) { ASC_DBG(1, "no free adv_req_t\n"); ASC_STATS(scp->device->host, adv_build_noreq); return ASC_BUSY; } else { reqp = boardp->adv_reqp; boardp->adv_reqp = reqp->next_reqp; reqp->next_reqp = NULL; } /* * Get 32-byte aligned ADV_SCSI_REQ_Q and ADV_SG_BLOCK pointers. */ scsiqp = (ADV_SCSI_REQ_Q *)ADV_32BALIGN(&reqp->scsi_req_q); /* * Initialize the structure. */ scsiqp->cntl = scsiqp->scsi_cntl = scsiqp->done_status = 0; /* * Set the ADV_SCSI_REQ_Q 'srb_ptr' to point to the adv_req_t structure. */ scsiqp->srb_ptr = ADV_VADDR_TO_U32(reqp); /* * Set the adv_req_t 'cmndp' to point to the struct scsi_cmnd structure. */ reqp->cmndp = scp; /* * Build the ADV_SCSI_REQ_Q request. */ /* Set CDB length and copy it to the request structure. */ scsiqp->cdb_len = scp->cmd_len; /* Copy first 12 CDB bytes to cdb[]. */ for (i = 0; i < scp->cmd_len && i < 12; i++) { scsiqp->cdb[i] = scp->cmnd[i]; } /* Copy last 4 CDB bytes, if present, to cdb16[]. */ for (; i < scp->cmd_len; i++) { scsiqp->cdb16[i - 12] = scp->cmnd[i]; } scsiqp->target_id = scp->device->id; scsiqp->target_lun = scp->device->lun; scsiqp->sense_addr = cpu_to_le32(virt_to_bus(&scp->sense_buffer[0])); scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE; /* Build ADV_SCSI_REQ_Q */ use_sg = scsi_dma_map(scp); if (use_sg == 0) { /* Zero-length transfer */ reqp->sgblkp = NULL; scsiqp->data_cnt = 0; scsiqp->vdata_addr = NULL; scsiqp->data_addr = 0; scsiqp->sg_list_ptr = NULL; scsiqp->sg_real_addr = 0; } else { if (use_sg > ADV_MAX_SG_LIST) { scmd_printk(KERN_ERR, scp, "use_sg %d > " "ADV_MAX_SG_LIST %d\n", use_sg, scp->device->host->sg_tablesize); scsi_dma_unmap(scp); scp->result = HOST_BYTE(DID_ERROR); /* * Free the 'adv_req_t' structure by adding it back * to the board free list. */ reqp->next_reqp = boardp->adv_reqp; boardp->adv_reqp = reqp; return ASC_ERROR; } scsiqp->data_cnt = cpu_to_le32(scsi_bufflen(scp)); ret = adv_get_sglist(boardp, reqp, scp, use_sg); if (ret != ADV_SUCCESS) { /* * Free the adv_req_t structure by adding it back to * the board free list. */ reqp->next_reqp = boardp->adv_reqp; boardp->adv_reqp = reqp; return ret; } ASC_STATS_ADD(scp->device->host, xfer_elem, use_sg); } ASC_STATS(scp->device->host, xfer_cnt); ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp); ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len); *adv_scsiqpp = scsiqp; return ASC_NOERROR; } static int AscSgListToQueue(int sg_list) { int n_sg_list_qs; n_sg_list_qs = ((sg_list - 1) / ASC_SG_LIST_PER_Q); if (((sg_list - 1) % ASC_SG_LIST_PER_Q) != 0) n_sg_list_qs++; return n_sg_list_qs + 1; } static uint AscGetNumOfFreeQueue(ASC_DVC_VAR *asc_dvc, uchar target_ix, uchar n_qs) { uint cur_used_qs; uint cur_free_qs; ASC_SCSI_BIT_ID_TYPE target_id; uchar tid_no; target_id = ASC_TIX_TO_TARGET_ID(target_ix); tid_no = ASC_TIX_TO_TID(target_ix); if ((asc_dvc->unit_not_ready & target_id) || (asc_dvc->queue_full_or_busy & target_id)) { return 0; } if (n_qs == 1) { cur_used_qs = (uint) asc_dvc->cur_total_qng + (uint) asc_dvc->last_q_shortage + (uint) ASC_MIN_FREE_Q; } else { cur_used_qs = (uint) asc_dvc->cur_total_qng + (uint) ASC_MIN_FREE_Q; } if ((uint) (cur_used_qs + n_qs) <= (uint) asc_dvc->max_total_qng) { cur_free_qs = (uint) asc_dvc->max_total_qng - cur_used_qs; if (asc_dvc->cur_dvc_qng[tid_no] >= asc_dvc->max_dvc_qng[tid_no]) { return 0; } return cur_free_qs; } if (n_qs > 1) { if ((n_qs > asc_dvc->last_q_shortage) && (n_qs <= (asc_dvc->max_total_qng - ASC_MIN_FREE_Q))) { asc_dvc->last_q_shortage = n_qs; } } return 0; } static uchar AscAllocFreeQueue(PortAddr iop_base, uchar free_q_head) { ushort q_addr; uchar next_qp; uchar q_status; q_addr = ASC_QNO_TO_QADDR(free_q_head); q_status = (uchar)AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_STATUS)); next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FWD)); if (((q_status & QS_READY) == 0) && (next_qp != ASC_QLINK_END)) return next_qp; return ASC_QLINK_END; } static uchar AscAllocMultipleFreeQueue(PortAddr iop_base, uchar free_q_head, uchar n_free_q) { uchar i; for (i = 0; i < n_free_q; i++) { free_q_head = AscAllocFreeQueue(iop_base, free_q_head); if (free_q_head == ASC_QLINK_END) break; } return free_q_head; } /* * void * DvcPutScsiQ(PortAddr iop_base, ushort s_addr, uchar *outbuf, int words) * * Calling/Exit State: * none * * Description: * Output an ASC_SCSI_Q structure to the chip */ static void DvcPutScsiQ(PortAddr iop_base, ushort s_addr, uchar *outbuf, int words) { int i; ASC_DBG_PRT_HEX(2, "DvcPutScsiQ", outbuf, 2 * words); AscSetChipLramAddr(iop_base, s_addr); for (i = 0; i < 2 * words; i += 2) { if (i == 4 || i == 20) { continue; } outpw(iop_base + IOP_RAM_DATA, ((ushort)outbuf[i + 1] << 8) | outbuf[i]); } } static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) { ushort q_addr; uchar tid_no; uchar sdtr_data; uchar syn_period_ix; uchar syn_offset; PortAddr iop_base; iop_base = asc_dvc->iop_base; if (((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) && ((asc_dvc->sdtr_done & scsiq->q1.target_id) == 0)) { tid_no = ASC_TIX_TO_TID(scsiq->q2.target_ix); sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); syn_period_ix = (sdtr_data >> 4) & (asc_dvc->max_sdtr_index - 1); syn_offset = sdtr_data & ASC_SYN_MAX_OFFSET; AscMsgOutSDTR(asc_dvc, asc_dvc->sdtr_period_tbl[syn_period_ix], syn_offset); scsiq->q1.cntl |= QC_MSG_OUT; } q_addr = ASC_QNO_TO_QADDR(q_no); if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) { scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG; } scsiq->q1.status = QS_FREE; AscMemWordCopyPtrToLram(iop_base, q_addr + ASC_SCSIQ_CDB_BEG, (uchar *)scsiq->cdbptr, scsiq->q2.cdb_len >> 1); DvcPutScsiQ(iop_base, q_addr + ASC_SCSIQ_CPY_BEG, (uchar *)&scsiq->q1.cntl, ((sizeof(ASC_SCSIQ_1) + sizeof(ASC_SCSIQ_2)) / 2) - 1); AscWriteLramWord(iop_base, (ushort)(q_addr + (ushort)ASC_SCSIQ_B_STATUS), (ushort)(((ushort)scsiq->q1. q_no << 8) | (ushort)QS_READY)); return 1; } static int AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) { int sta; int i; ASC_SG_HEAD *sg_head; ASC_SG_LIST_Q scsi_sg_q; ASC_DCNT saved_data_addr; ASC_DCNT saved_data_cnt; PortAddr iop_base; ushort sg_list_dwords; ushort sg_index; ushort sg_entry_cnt; ushort q_addr; uchar next_qp; iop_base = asc_dvc->iop_base; sg_head = scsiq->sg_head; saved_data_addr = scsiq->q1.data_addr; saved_data_cnt = scsiq->q1.data_cnt; scsiq->q1.data_addr = (ASC_PADDR) sg_head->sg_list[0].addr; scsiq->q1.data_cnt = (ASC_DCNT) sg_head->sg_list[0].bytes; #if CC_VERY_LONG_SG_LIST /* * If sg_head->entry_cnt is greater than ASC_MAX_SG_LIST * then not all SG elements will fit in the allocated queues. * The rest of the SG elements will be copied when the RISC * completes the SG elements that fit and halts. */ if (sg_head->entry_cnt > ASC_MAX_SG_LIST) { /* * Set sg_entry_cnt to be the number of SG elements that * will fit in the allocated SG queues. It is minus 1, because * the first SG element is handled above. ASC_MAX_SG_LIST is * already inflated by 1 to account for this. For example it * may be 50 which is 1 + 7 queues * 7 SG elements. */ sg_entry_cnt = ASC_MAX_SG_LIST - 1; /* * Keep track of remaining number of SG elements that will * need to be handled from a_isr.c. */ scsiq->remain_sg_entry_cnt = sg_head->entry_cnt - ASC_MAX_SG_LIST; } else { #endif /* CC_VERY_LONG_SG_LIST */ /* * Set sg_entry_cnt to be the number of SG elements that * will fit in the allocated SG queues. It is minus 1, because * the first SG element is handled above. */ sg_entry_cnt = sg_head->entry_cnt - 1; #if CC_VERY_LONG_SG_LIST } #endif /* CC_VERY_LONG_SG_LIST */ if (sg_entry_cnt != 0) { scsiq->q1.cntl |= QC_SG_HEAD; q_addr = ASC_QNO_TO_QADDR(q_no); sg_index = 1; scsiq->q1.sg_queue_cnt = sg_head->queue_cnt; scsi_sg_q.sg_head_qp = q_no; scsi_sg_q.cntl = QCSG_SG_XFER_LIST; for (i = 0; i < sg_head->queue_cnt; i++) { scsi_sg_q.seq_no = i + 1; if (sg_entry_cnt > ASC_SG_LIST_PER_Q) { sg_list_dwords = (uchar)(ASC_SG_LIST_PER_Q * 2); sg_entry_cnt -= ASC_SG_LIST_PER_Q; if (i == 0) { scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q; scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q; } else { scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q - 1; scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q - 1; } } else { #if CC_VERY_LONG_SG_LIST /* * This is the last SG queue in the list of * allocated SG queues. If there are more * SG elements than will fit in the allocated * queues, then set the QCSG_SG_XFER_MORE flag. */ if (sg_head->entry_cnt > ASC_MAX_SG_LIST) { scsi_sg_q.cntl |= QCSG_SG_XFER_MORE; } else { #endif /* CC_VERY_LONG_SG_LIST */ scsi_sg_q.cntl |= QCSG_SG_XFER_END; #if CC_VERY_LONG_SG_LIST } #endif /* CC_VERY_LONG_SG_LIST */ sg_list_dwords = sg_entry_cnt << 1; if (i == 0) { scsi_sg_q.sg_list_cnt = sg_entry_cnt; scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt; } else { scsi_sg_q.sg_list_cnt = sg_entry_cnt - 1; scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt - 1; } sg_entry_cnt = 0; } next_qp = AscReadLramByte(iop_base, (ushort)(q_addr + ASC_SCSIQ_B_FWD)); scsi_sg_q.q_no = next_qp; q_addr = ASC_QNO_TO_QADDR(next_qp); AscMemWordCopyPtrToLram(iop_base, q_addr + ASC_SCSIQ_SGHD_CPY_BEG, (uchar *)&scsi_sg_q, sizeof(ASC_SG_LIST_Q) >> 1); AscMemDWordCopyPtrToLram(iop_base, q_addr + ASC_SGQ_LIST_BEG, (uchar *)&sg_head-> sg_list[sg_index], sg_list_dwords); sg_index += ASC_SG_LIST_PER_Q; scsiq->next_sg_index = sg_index; } } else { scsiq->q1.cntl &= ~QC_SG_HEAD; } sta = AscPutReadyQueue(asc_dvc, scsiq, q_no); scsiq->q1.data_addr = saved_data_addr; scsiq->q1.data_cnt = saved_data_cnt; return (sta); } static int AscSendScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar n_q_required) { PortAddr iop_base; uchar free_q_head; uchar next_qp; uchar tid_no; uchar target_ix; int sta; iop_base = asc_dvc->iop_base; target_ix = scsiq->q2.target_ix; tid_no = ASC_TIX_TO_TID(target_ix); sta = 0; free_q_head = (uchar)AscGetVarFreeQHead(iop_base); if (n_q_required > 1) { next_qp = AscAllocMultipleFreeQueue(iop_base, free_q_head, (uchar)n_q_required); if (next_qp != ASC_QLINK_END) { asc_dvc->last_q_shortage = 0; scsiq->sg_head->queue_cnt = n_q_required - 1; scsiq->q1.q_no = free_q_head; sta = AscPutReadySgListQueue(asc_dvc, scsiq, free_q_head); } } else if (n_q_required == 1) { next_qp = AscAllocFreeQueue(iop_base, free_q_head); if (next_qp != ASC_QLINK_END) { scsiq->q1.q_no = free_q_head; sta = AscPutReadyQueue(asc_dvc, scsiq, free_q_head); } } if (sta == 1) { AscPutVarFreeQHead(iop_base, next_qp); asc_dvc->cur_total_qng += n_q_required; asc_dvc->cur_dvc_qng[tid_no]++; } return sta; } #define ASC_SYN_OFFSET_ONE_DISABLE_LIST 16 static uchar _syn_offset_one_disable_cmd[ASC_SYN_OFFSET_ONE_DISABLE_LIST] = { INQUIRY, REQUEST_SENSE, READ_CAPACITY, READ_TOC, MODE_SELECT, MODE_SENSE, MODE_SELECT_10, MODE_SENSE_10, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) { PortAddr iop_base; int sta; int n_q_required; int disable_syn_offset_one_fix; int i; ASC_PADDR addr; ushort sg_entry_cnt = 0; ushort sg_entry_cnt_minus_one = 0; uchar target_ix; uchar tid_no; uchar sdtr_data; uchar extra_bytes; uchar scsi_cmd; uchar disable_cmd; ASC_SG_HEAD *sg_head; ASC_DCNT data_cnt; iop_base = asc_dvc->iop_base; sg_head = scsiq->sg_head; if (asc_dvc->err_code != 0) return (ERR); scsiq->q1.q_no = 0; if ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0) { scsiq->q1.extra_bytes = 0; } sta = 0; target_ix = scsiq->q2.target_ix; tid_no = ASC_TIX_TO_TID(target_ix); n_q_required = 1; if (scsiq->cdbptr[0] == REQUEST_SENSE) { if ((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) { asc_dvc->sdtr_done &= ~scsiq->q1.target_id; sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no); AscMsgOutSDTR(asc_dvc, asc_dvc-> sdtr_period_tbl[(sdtr_data >> 4) & (uchar)(asc_dvc-> max_sdtr_index - 1)], (uchar)(sdtr_data & (uchar) ASC_SYN_MAX_OFFSET)); scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT); } } if (asc_dvc->in_critical_cnt != 0) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CRITICAL_RE_ENTRY); return (ERR); } asc_dvc->in_critical_cnt++; if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) { if ((sg_entry_cnt = sg_head->entry_cnt) == 0) { asc_dvc->in_critical_cnt--; return (ERR); } #if !CC_VERY_LONG_SG_LIST if (sg_entry_cnt > ASC_MAX_SG_LIST) { asc_dvc->in_critical_cnt--; return (ERR); } #endif /* !CC_VERY_LONG_SG_LIST */ if (sg_entry_cnt == 1) { scsiq->q1.data_addr = (ADV_PADDR)sg_head->sg_list[0].addr; scsiq->q1.data_cnt = (ADV_DCNT)sg_head->sg_list[0].bytes; scsiq->q1.cntl &= ~(QC_SG_HEAD | QC_SG_SWAP_QUEUE); } sg_entry_cnt_minus_one = sg_entry_cnt - 1; } scsi_cmd = scsiq->cdbptr[0]; disable_syn_offset_one_fix = FALSE; if ((asc_dvc->pci_fix_asyn_xfer & scsiq->q1.target_id) && !(asc_dvc->pci_fix_asyn_xfer_always & scsiq->q1.target_id)) { if (scsiq->q1.cntl & QC_SG_HEAD) { data_cnt = 0; for (i = 0; i < sg_entry_cnt; i++) { data_cnt += (ADV_DCNT)le32_to_cpu(sg_head->sg_list[i]. bytes); } } else { data_cnt = le32_to_cpu(scsiq->q1.data_cnt); } if (data_cnt != 0UL) { if (data_cnt < 512UL) { disable_syn_offset_one_fix = TRUE; } else { for (i = 0; i < ASC_SYN_OFFSET_ONE_DISABLE_LIST; i++) { disable_cmd = _syn_offset_one_disable_cmd[i]; if (disable_cmd == 0xFF) { break; } if (scsi_cmd == disable_cmd) { disable_syn_offset_one_fix = TRUE; break; } } } } } if (disable_syn_offset_one_fix) { scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG; scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX | ASC_TAG_FLAG_DISABLE_DISCONNECT); } else { scsiq->q2.tag_code &= 0x27; } if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) { if (asc_dvc->bug_fix_cntl) { if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) { if ((scsi_cmd == READ_6) || (scsi_cmd == READ_10)) { addr = (ADV_PADDR)le32_to_cpu(sg_head-> sg_list [sg_entry_cnt_minus_one]. addr) + (ADV_DCNT)le32_to_cpu(sg_head-> sg_list [sg_entry_cnt_minus_one]. bytes); extra_bytes = (uchar)((ushort)addr & 0x0003); if ((extra_bytes != 0) && ((scsiq->q2. tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0)) { scsiq->q2.tag_code |= ASC_TAG_FLAG_EXTRA_BYTES; scsiq->q1.extra_bytes = extra_bytes; data_cnt = le32_to_cpu(sg_head-> sg_list [sg_entry_cnt_minus_one]. bytes); data_cnt -= (ASC_DCNT) extra_bytes; sg_head-> sg_list [sg_entry_cnt_minus_one]. bytes = cpu_to_le32(data_cnt); } } } } sg_head->entry_to_copy = sg_head->entry_cnt; #if CC_VERY_LONG_SG_LIST /* * Set the sg_entry_cnt to the maximum possible. The rest of * the SG elements will be copied when the RISC completes the * SG elements that fit and halts. */ if (sg_entry_cnt > ASC_MAX_SG_LIST) { sg_entry_cnt = ASC_MAX_SG_LIST; } #endif /* CC_VERY_LONG_SG_LIST */ n_q_required = AscSgListToQueue(sg_entry_cnt); if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, n_q_required) >= (uint) n_q_required) || ((scsiq->q1.cntl & QC_URGENT) != 0)) { if ((sta = AscSendScsiQueue(asc_dvc, scsiq, n_q_required)) == 1) { asc_dvc->in_critical_cnt--; return (sta); } } } else { if (asc_dvc->bug_fix_cntl) { if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) { if ((scsi_cmd == READ_6) || (scsi_cmd == READ_10)) { addr = le32_to_cpu(scsiq->q1.data_addr) + le32_to_cpu(scsiq->q1.data_cnt); extra_bytes = (uchar)((ushort)addr & 0x0003); if ((extra_bytes != 0) && ((scsiq->q2. tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0)) { data_cnt = le32_to_cpu(scsiq->q1. data_cnt); if (((ushort)data_cnt & 0x01FF) == 0) { scsiq->q2.tag_code |= ASC_TAG_FLAG_EXTRA_BYTES; data_cnt -= (ASC_DCNT) extra_bytes; scsiq->q1.data_cnt = cpu_to_le32 (data_cnt); scsiq->q1.extra_bytes = extra_bytes; } } } } } n_q_required = 1; if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, 1) >= 1) || ((scsiq->q1.cntl & QC_URGENT) != 0)) { if ((sta = AscSendScsiQueue(asc_dvc, scsiq, n_q_required)) == 1) { asc_dvc->in_critical_cnt--; return (sta); } } } asc_dvc->in_critical_cnt--; return (sta); } /* * AdvExeScsiQueue() - Send a request to the RISC microcode program. * * Allocate a carrier structure, point the carrier to the ADV_SCSI_REQ_Q, * add the carrier to the ICQ (Initiator Command Queue), and tickle the * RISC to notify it a new command is ready to be executed. * * If 'done_status' is not set to QD_DO_RETRY, then 'error_retry' will be * set to SCSI_MAX_RETRY. * * Multi-byte fields in the ASC_SCSI_REQ_Q that are used by the microcode * for DMA addresses or math operations are byte swapped to little-endian * order. * * Return: * ADV_SUCCESS(1) - The request was successfully queued. * ADV_BUSY(0) - Resource unavailable; Retry again after pending * request completes. * ADV_ERROR(-1) - Invalid ADV_SCSI_REQ_Q request structure * host IC error. */ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq) { AdvPortAddr iop_base; ADV_PADDR req_paddr; ADV_CARR_T *new_carrp; /* * The ADV_SCSI_REQ_Q 'target_id' field should never exceed ADV_MAX_TID. */ if (scsiq->target_id > ADV_MAX_TID) { scsiq->host_status = QHSTA_M_INVALID_DEVICE; scsiq->done_status = QD_WITH_ERROR; return ADV_ERROR; } iop_base = asc_dvc->iop_base; /* * Allocate a carrier ensuring at least one carrier always * remains on the freelist and initialize fields. */ if ((new_carrp = asc_dvc->carr_freelist) == NULL) { return ADV_BUSY; } asc_dvc->carr_freelist = (ADV_CARR_T *) ADV_U32_TO_VADDR(le32_to_cpu(new_carrp->next_vpa)); asc_dvc->carr_pending_cnt++; /* * Set the carrier to be a stopper by setting 'next_vpa' * to the stopper value. The current stopper will be changed * below to point to the new stopper. */ new_carrp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Clear the ADV_SCSI_REQ_Q done flag. */ scsiq->a_flag &= ~ADV_SCSIQ_DONE; req_paddr = virt_to_bus(scsiq); BUG_ON(req_paddr & 31); /* Wait for assertion before making little-endian */ req_paddr = cpu_to_le32(req_paddr); /* Save virtual and physical address of ADV_SCSI_REQ_Q and carrier. */ scsiq->scsiq_ptr = cpu_to_le32(ADV_VADDR_TO_U32(scsiq)); scsiq->scsiq_rptr = req_paddr; scsiq->carr_va = cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->icq_sp)); /* * Every ADV_CARR_T.carr_pa is byte swapped to little-endian * order during initialization. */ scsiq->carr_pa = asc_dvc->icq_sp->carr_pa; /* * Use the current stopper to send the ADV_SCSI_REQ_Q command to * the microcode. The newly allocated stopper will become the new * stopper. */ asc_dvc->icq_sp->areq_vpa = req_paddr; /* * Set the 'next_vpa' pointer for the old stopper to be the * physical address of the new stopper. The RISC can only * follow physical addresses. */ asc_dvc->icq_sp->next_vpa = new_carrp->carr_pa; /* * Set the host adapter stopper pointer to point to the new carrier. */ asc_dvc->icq_sp = new_carrp; if (asc_dvc->chip_type == ADV_CHIP_ASC3550 || asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { /* * Tickle the RISC to tell it to read its Command Queue Head pointer. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_A); if (asc_dvc->chip_type == ADV_CHIP_ASC3550) { /* * Clear the tickle value. In the ASC-3550 the RISC flag * command 'clr_tickle_a' does not work unless the host * value is cleared. */ AdvWriteByteRegister(iop_base, IOPB_TICKLE, ADV_TICKLE_NOP); } } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { /* * Notify the RISC a carrier is ready by writing the physical * address of the new carrier stopper to the COMMA register. */ AdvWriteDWordRegister(iop_base, IOPDW_COMMA, le32_to_cpu(new_carrp->carr_pa)); } return ADV_SUCCESS; } /* * Execute a single 'Scsi_Cmnd'. */ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp) { int ret, err_code; struct asc_board *boardp = shost_priv(scp->device->host); ASC_DBG(1, "scp 0x%p\n", scp); if (ASC_NARROW_BOARD(boardp)) { ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var; struct asc_scsi_q asc_scsi_q; /* asc_build_req() can not return ASC_BUSY. */ ret = asc_build_req(boardp, scp, &asc_scsi_q); if (ret == ASC_ERROR) { ASC_STATS(scp->device->host, build_error); return ASC_ERROR; } ret = AscExeScsiQueue(asc_dvc, &asc_scsi_q); kfree(asc_scsi_q.sg_head); err_code = asc_dvc->err_code; } else { ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var; ADV_SCSI_REQ_Q *adv_scsiqp; switch (adv_build_req(boardp, scp, &adv_scsiqp)) { case ASC_NOERROR: ASC_DBG(3, "adv_build_req ASC_NOERROR\n"); break; case ASC_BUSY: ASC_DBG(1, "adv_build_req ASC_BUSY\n"); /* * The asc_stats fields 'adv_build_noreq' and * 'adv_build_nosg' count wide board busy conditions. * They are updated in adv_build_req and * adv_get_sglist, respectively. */ return ASC_BUSY; case ASC_ERROR: default: ASC_DBG(1, "adv_build_req ASC_ERROR\n"); ASC_STATS(scp->device->host, build_error); return ASC_ERROR; } ret = AdvExeScsiQueue(adv_dvc, adv_scsiqp); err_code = adv_dvc->err_code; } switch (ret) { case ASC_NOERROR: ASC_STATS(scp->device->host, exe_noerror); /* * Increment monotonically increasing per device * successful request counter. Wrapping doesn't matter. */ boardp->reqcnt[scp->device->id]++; ASC_DBG(1, "ExeScsiQueue() ASC_NOERROR\n"); break; case ASC_BUSY: ASC_STATS(scp->device->host, exe_busy); break; case ASC_ERROR: scmd_printk(KERN_ERR, scp, "ExeScsiQueue() ASC_ERROR, " "err_code 0x%x\n", err_code); ASC_STATS(scp->device->host, exe_error); scp->result = HOST_BYTE(DID_ERROR); break; default: scmd_printk(KERN_ERR, scp, "ExeScsiQueue() unknown, " "err_code 0x%x\n", err_code); ASC_STATS(scp->device->host, exe_unknown); scp->result = HOST_BYTE(DID_ERROR); break; } ASC_DBG(1, "end\n"); return ret; } /* * advansys_queuecommand() - interrupt-driven I/O entrypoint. * * This function always returns 0. Command return status is saved * in the 'scp' result field. */ static int advansys_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) { struct Scsi_Host *shost = scp->device->host; int asc_res, result = 0; ASC_STATS(shost, queuecommand); scp->scsi_done = done; asc_res = asc_execute_scsi_cmnd(scp); switch (asc_res) { case ASC_NOERROR: break; case ASC_BUSY: result = SCSI_MLQUEUE_HOST_BUSY; break; case ASC_ERROR: default: asc_scsi_done(scp); break; } return result; } static DEF_SCSI_QCMD(advansys_queuecommand) static ushort __devinit AscGetEisaChipCfg(PortAddr iop_base) { PortAddr eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | (PortAddr) (ASC_EISA_CFG_IOP_MASK); return inpw(eisa_cfg_iop); } /* * Return the BIOS address of the adapter at the specified * I/O port and with the specified bus type. */ static unsigned short __devinit AscGetChipBiosAddress(PortAddr iop_base, unsigned short bus_type) { unsigned short cfg_lsw; unsigned short bios_addr; /* * The PCI BIOS is re-located by the motherboard BIOS. Because * of this the driver can not determine where a PCI BIOS is * loaded and executes. */ if (bus_type & ASC_IS_PCI) return 0; if ((bus_type & ASC_IS_EISA) != 0) { cfg_lsw = AscGetEisaChipCfg(iop_base); cfg_lsw &= 0x000F; bios_addr = ASC_BIOS_MIN_ADDR + cfg_lsw * ASC_BIOS_BANK_SIZE; return bios_addr; } cfg_lsw = AscGetChipCfgLsw(iop_base); /* * ISA PnP uses the top bit as the 32K BIOS flag */ if (bus_type == ASC_IS_ISAPNP) cfg_lsw &= 0x7FFF; bios_addr = ASC_BIOS_MIN_ADDR + (cfg_lsw >> 12) * ASC_BIOS_BANK_SIZE; return bios_addr; } static uchar __devinit AscSetChipScsiID(PortAddr iop_base, uchar new_host_id) { ushort cfg_lsw; if (AscGetChipScsiID(iop_base) == new_host_id) { return (new_host_id); } cfg_lsw = AscGetChipCfgLsw(iop_base); cfg_lsw &= 0xF8FF; cfg_lsw |= (ushort)((new_host_id & ASC_MAX_TID) << 8); AscSetChipCfgLsw(iop_base, cfg_lsw); return (AscGetChipScsiID(iop_base)); } static unsigned char __devinit AscGetChipScsiCtrl(PortAddr iop_base) { unsigned char sc; AscSetBank(iop_base, 1); sc = inp(iop_base + IOP_REG_SC); AscSetBank(iop_base, 0); return sc; } static unsigned char __devinit AscGetChipVersion(PortAddr iop_base, unsigned short bus_type) { if (bus_type & ASC_IS_EISA) { PortAddr eisa_iop; unsigned char revision; eisa_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | (PortAddr) ASC_EISA_REV_IOP_MASK; revision = inp(eisa_iop); return ASC_CHIP_MIN_VER_EISA - 1 + revision; } return AscGetChipVerNo(iop_base); } #ifdef CONFIG_ISA static void __devinit AscEnableIsaDma(uchar dma_channel) { if (dma_channel < 4) { outp(0x000B, (ushort)(0xC0 | dma_channel)); outp(0x000A, dma_channel); } else if (dma_channel < 8) { outp(0x00D6, (ushort)(0xC0 | (dma_channel - 4))); outp(0x00D4, (ushort)(dma_channel - 4)); } } #endif /* CONFIG_ISA */ static int AscStopQueueExe(PortAddr iop_base) { int count = 0; if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) == 0) { AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, ASC_STOP_REQ_RISC_STOP); do { if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) & ASC_STOP_ACK_RISC_STOP) { return (1); } mdelay(100); } while (count++ < 20); } return (0); } static ASC_DCNT __devinit AscGetMaxDmaCount(ushort bus_type) { if (bus_type & ASC_IS_ISA) return ASC_MAX_ISA_DMA_COUNT; else if (bus_type & (ASC_IS_EISA | ASC_IS_VL)) return ASC_MAX_VL_DMA_COUNT; return ASC_MAX_PCI_DMA_COUNT; } #ifdef CONFIG_ISA static ushort __devinit AscGetIsaDmaChannel(PortAddr iop_base) { ushort channel; channel = AscGetChipCfgLsw(iop_base) & 0x0003; if (channel == 0x03) return (0); else if (channel == 0x00) return (7); return (channel + 4); } static ushort __devinit AscSetIsaDmaChannel(PortAddr iop_base, ushort dma_channel) { ushort cfg_lsw; uchar value; if ((dma_channel >= 5) && (dma_channel <= 7)) { if (dma_channel == 7) value = 0x00; else value = dma_channel - 4; cfg_lsw = AscGetChipCfgLsw(iop_base) & 0xFFFC; cfg_lsw |= value; AscSetChipCfgLsw(iop_base, cfg_lsw); return (AscGetIsaDmaChannel(iop_base)); } return 0; } static uchar __devinit AscGetIsaDmaSpeed(PortAddr iop_base) { uchar speed_value; AscSetBank(iop_base, 1); speed_value = AscReadChipDmaSpeed(iop_base); speed_value &= 0x07; AscSetBank(iop_base, 0); return speed_value; } static uchar __devinit AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value) { speed_value &= 0x07; AscSetBank(iop_base, 1); AscWriteChipDmaSpeed(iop_base, speed_value); AscSetBank(iop_base, 0); return AscGetIsaDmaSpeed(iop_base); } #endif /* CONFIG_ISA */ static ushort __devinit AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc) { int i; PortAddr iop_base; ushort warn_code; uchar chip_version; iop_base = asc_dvc->iop_base; warn_code = 0; asc_dvc->err_code = 0; if ((asc_dvc->bus_type & (ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) { asc_dvc->err_code |= ASC_IERR_NO_BUS_TYPE; } AscSetChipControl(iop_base, CC_HALT); AscSetChipStatus(iop_base, 0); asc_dvc->bug_fix_cntl = 0; asc_dvc->pci_fix_asyn_xfer = 0; asc_dvc->pci_fix_asyn_xfer_always = 0; /* asc_dvc->init_state initialized in AscInitGetConfig(). */ asc_dvc->sdtr_done = 0; asc_dvc->cur_total_qng = 0; asc_dvc->is_in_int = 0; asc_dvc->in_critical_cnt = 0; asc_dvc->last_q_shortage = 0; asc_dvc->use_tagged_qng = 0; asc_dvc->no_scam = 0; asc_dvc->unit_not_ready = 0; asc_dvc->queue_full_or_busy = 0; asc_dvc->redo_scam = 0; asc_dvc->res2 = 0; asc_dvc->min_sdtr_index = 0; asc_dvc->cfg->can_tagged_qng = 0; asc_dvc->cfg->cmd_qng_enabled = 0; asc_dvc->dvc_cntl = ASC_DEF_DVC_CNTL; asc_dvc->init_sdtr = 0; asc_dvc->max_total_qng = ASC_DEF_MAX_TOTAL_QNG; asc_dvc->scsi_reset_wait = 3; asc_dvc->start_motor = ASC_SCSI_WIDTH_BIT_SET; asc_dvc->max_dma_count = AscGetMaxDmaCount(asc_dvc->bus_type); asc_dvc->cfg->sdtr_enable = ASC_SCSI_WIDTH_BIT_SET; asc_dvc->cfg->disc_enable = ASC_SCSI_WIDTH_BIT_SET; asc_dvc->cfg->chip_scsi_id = ASC_DEF_CHIP_SCSI_ID; chip_version = AscGetChipVersion(iop_base, asc_dvc->bus_type); asc_dvc->cfg->chip_version = chip_version; asc_dvc->sdtr_period_tbl = asc_syn_xfer_period; asc_dvc->max_sdtr_index = 7; if ((asc_dvc->bus_type & ASC_IS_PCI) && (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3150)) { asc_dvc->bus_type = ASC_IS_PCI_ULTRA; asc_dvc->sdtr_period_tbl = asc_syn_ultra_xfer_period; asc_dvc->max_sdtr_index = 15; if (chip_version == ASC_CHIP_VER_PCI_ULTRA_3150) { AscSetExtraControl(iop_base, (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE)); } else if (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3050) { AscSetExtraControl(iop_base, (SEC_ACTIVE_NEGATE | SEC_ENABLE_FILTER)); } } if (asc_dvc->bus_type == ASC_IS_PCI) { AscSetExtraControl(iop_base, (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE)); } asc_dvc->cfg->isa_dma_speed = ASC_DEF_ISA_DMA_SPEED; #ifdef CONFIG_ISA if ((asc_dvc->bus_type & ASC_IS_ISA) != 0) { if (chip_version >= ASC_CHIP_MIN_VER_ISA_PNP) { AscSetChipIFC(iop_base, IFC_INIT_DEFAULT); asc_dvc->bus_type = ASC_IS_ISAPNP; } asc_dvc->cfg->isa_dma_channel = (uchar)AscGetIsaDmaChannel(iop_base); } #endif /* CONFIG_ISA */ for (i = 0; i <= ASC_MAX_TID; i++) { asc_dvc->cur_dvc_qng[i] = 0; asc_dvc->max_dvc_qng[i] = ASC_MAX_SCSI1_QNG; asc_dvc->scsiq_busy_head[i] = (ASC_SCSI_Q *)0L; asc_dvc->scsiq_busy_tail[i] = (ASC_SCSI_Q *)0L; asc_dvc->cfg->max_tag_qng[i] = ASC_MAX_INRAM_TAG_QNG; } return warn_code; } static int __devinit AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg) { int retry; for (retry = 0; retry < ASC_EEP_MAX_RETRY; retry++) { unsigned char read_back; AscSetChipEEPCmd(iop_base, cmd_reg); mdelay(1); read_back = AscGetChipEEPCmd(iop_base); if (read_back == cmd_reg) return 1; } return 0; } static void __devinit AscWaitEEPRead(void) { mdelay(1); } static ushort __devinit AscReadEEPWord(PortAddr iop_base, uchar addr) { ushort read_wval; uchar cmd_reg; AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE); AscWaitEEPRead(); cmd_reg = addr | ASC_EEP_CMD_READ; AscWriteEEPCmdReg(iop_base, cmd_reg); AscWaitEEPRead(); read_wval = AscGetChipEEPData(iop_base); AscWaitEEPRead(); return read_wval; } static ushort __devinit AscGetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type) { ushort wval; ushort sum; ushort *wbuf; int cfg_beg; int cfg_end; int uchar_end_in_config = ASC_EEP_MAX_DVC_ADDR - 2; int s_addr; wbuf = (ushort *)cfg_buf; sum = 0; /* Read two config words; Byte-swapping done by AscReadEEPWord(). */ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { *wbuf = AscReadEEPWord(iop_base, (uchar)s_addr); sum += *wbuf; } if (bus_type & ASC_IS_VL) { cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; } else { cfg_beg = ASC_EEP_DVC_CFG_BEG; cfg_end = ASC_EEP_MAX_DVC_ADDR; } for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { wval = AscReadEEPWord(iop_base, (uchar)s_addr); if (s_addr <= uchar_end_in_config) { /* * Swap all char fields - must unswap bytes already swapped * by AscReadEEPWord(). */ *wbuf = le16_to_cpu(wval); } else { /* Don't swap word field at the end - cntl field. */ *wbuf = wval; } sum += wval; /* Checksum treats all EEPROM data as words. */ } /* * Read the checksum word which will be compared against 'sum' * by the caller. Word field already swapped. */ *wbuf = AscReadEEPWord(iop_base, (uchar)s_addr); return sum; } static int __devinit AscTestExternalLram(ASC_DVC_VAR *asc_dvc) { PortAddr iop_base; ushort q_addr; ushort saved_word; int sta; iop_base = asc_dvc->iop_base; sta = 0; q_addr = ASC_QNO_TO_QADDR(241); saved_word = AscReadLramWord(iop_base, q_addr); AscSetChipLramAddr(iop_base, q_addr); AscSetChipLramData(iop_base, 0x55AA); mdelay(10); AscSetChipLramAddr(iop_base, q_addr); if (AscGetChipLramData(iop_base) == 0x55AA) { sta = 1; AscWriteLramWord(iop_base, q_addr, saved_word); } return (sta); } static void __devinit AscWaitEEPWrite(void) { mdelay(20); } static int __devinit AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg) { ushort read_back; int retry; retry = 0; while (TRUE) { AscSetChipEEPData(iop_base, data_reg); mdelay(1); read_back = AscGetChipEEPData(iop_base); if (read_back == data_reg) { return (1); } if (retry++ > ASC_EEP_MAX_RETRY) { return (0); } } } static ushort __devinit AscWriteEEPWord(PortAddr iop_base, uchar addr, ushort word_val) { ushort read_wval; read_wval = AscReadEEPWord(iop_base, addr); if (read_wval != word_val) { AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_ABLE); AscWaitEEPRead(); AscWriteEEPDataReg(iop_base, word_val); AscWaitEEPRead(); AscWriteEEPCmdReg(iop_base, (uchar)((uchar)ASC_EEP_CMD_WRITE | addr)); AscWaitEEPWrite(); AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE); AscWaitEEPRead(); return (AscReadEEPWord(iop_base, addr)); } return (read_wval); } static int __devinit AscSetEEPConfigOnce(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type) { int n_error; ushort *wbuf; ushort word; ushort sum; int s_addr; int cfg_beg; int cfg_end; int uchar_end_in_config = ASC_EEP_MAX_DVC_ADDR - 2; wbuf = (ushort *)cfg_buf; n_error = 0; sum = 0; /* Write two config words; AscWriteEEPWord() will swap bytes. */ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { sum += *wbuf; if (*wbuf != AscWriteEEPWord(iop_base, (uchar)s_addr, *wbuf)) { n_error++; } } if (bus_type & ASC_IS_VL) { cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; } else { cfg_beg = ASC_EEP_DVC_CFG_BEG; cfg_end = ASC_EEP_MAX_DVC_ADDR; } for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { if (s_addr <= uchar_end_in_config) { /* * This is a char field. Swap char fields before they are * swapped again by AscWriteEEPWord(). */ word = cpu_to_le16(*wbuf); if (word != AscWriteEEPWord(iop_base, (uchar)s_addr, word)) { n_error++; } } else { /* Don't swap word field at the end - cntl field. */ if (*wbuf != AscWriteEEPWord(iop_base, (uchar)s_addr, *wbuf)) { n_error++; } } sum += *wbuf; /* Checksum calculated from word values. */ } /* Write checksum word. It will be swapped by AscWriteEEPWord(). */ *wbuf = sum; if (sum != AscWriteEEPWord(iop_base, (uchar)s_addr, sum)) { n_error++; } /* Read EEPROM back again. */ wbuf = (ushort *)cfg_buf; /* * Read two config words; Byte-swapping done by AscReadEEPWord(). */ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { if (*wbuf != AscReadEEPWord(iop_base, (uchar)s_addr)) { n_error++; } } if (bus_type & ASC_IS_VL) { cfg_beg = ASC_EEP_DVC_CFG_BEG_VL; cfg_end = ASC_EEP_MAX_DVC_ADDR_VL; } else { cfg_beg = ASC_EEP_DVC_CFG_BEG; cfg_end = ASC_EEP_MAX_DVC_ADDR; } for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { if (s_addr <= uchar_end_in_config) { /* * Swap all char fields. Must unswap bytes already swapped * by AscReadEEPWord(). */ word = le16_to_cpu(AscReadEEPWord (iop_base, (uchar)s_addr)); } else { /* Don't swap word field at the end - cntl field. */ word = AscReadEEPWord(iop_base, (uchar)s_addr); } if (*wbuf != word) { n_error++; } } /* Read checksum; Byte swapping not needed. */ if (AscReadEEPWord(iop_base, (uchar)s_addr) != sum) { n_error++; } return n_error; } static int __devinit AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type) { int retry; int n_error; retry = 0; while (TRUE) { if ((n_error = AscSetEEPConfigOnce(iop_base, cfg_buf, bus_type)) == 0) { break; } if (++retry > ASC_EEP_MAX_RETRY) { break; } } return n_error; } static ushort __devinit AscInitFromEEP(ASC_DVC_VAR *asc_dvc) { ASCEEP_CONFIG eep_config_buf; ASCEEP_CONFIG *eep_config; PortAddr iop_base; ushort chksum; ushort warn_code; ushort cfg_msw, cfg_lsw; int i; int write_eep = 0; iop_base = asc_dvc->iop_base; warn_code = 0; AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0x00FE); AscStopQueueExe(iop_base); if ((AscStopChip(iop_base) == FALSE) || (AscGetChipScsiCtrl(iop_base) != 0)) { asc_dvc->init_state |= ASC_INIT_RESET_SCSI_DONE; AscResetChipAndScsiBus(asc_dvc); mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ } if (AscIsChipHalted(iop_base) == FALSE) { asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; return (warn_code); } AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; return (warn_code); } eep_config = (ASCEEP_CONFIG *)&eep_config_buf; cfg_msw = AscGetChipCfgMsw(iop_base); cfg_lsw = AscGetChipCfgLsw(iop_base); if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) { cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; warn_code |= ASC_WARN_CFG_MSW_RECOVER; AscSetChipCfgMsw(iop_base, cfg_msw); } chksum = AscGetEEPConfig(iop_base, eep_config, asc_dvc->bus_type); ASC_DBG(1, "chksum 0x%x\n", chksum); if (chksum == 0) { chksum = 0xaa55; } if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) { warn_code |= ASC_WARN_AUTO_CONFIG; if (asc_dvc->cfg->chip_version == 3) { if (eep_config->cfg_lsw != cfg_lsw) { warn_code |= ASC_WARN_EEPROM_RECOVER; eep_config->cfg_lsw = AscGetChipCfgLsw(iop_base); } if (eep_config->cfg_msw != cfg_msw) { warn_code |= ASC_WARN_EEPROM_RECOVER; eep_config->cfg_msw = AscGetChipCfgMsw(iop_base); } } } eep_config->cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; eep_config->cfg_lsw |= ASC_CFG0_HOST_INT_ON; ASC_DBG(1, "eep_config->chksum 0x%x\n", eep_config->chksum); if (chksum != eep_config->chksum) { if (AscGetChipVersion(iop_base, asc_dvc->bus_type) == ASC_CHIP_VER_PCI_ULTRA_3050) { ASC_DBG(1, "chksum error ignored; EEPROM-less board\n"); eep_config->init_sdtr = 0xFF; eep_config->disc_enable = 0xFF; eep_config->start_motor = 0xFF; eep_config->use_cmd_qng = 0; eep_config->max_total_qng = 0xF0; eep_config->max_tag_qng = 0x20; eep_config->cntl = 0xBFFF; ASC_EEP_SET_CHIP_ID(eep_config, 7); eep_config->no_scam = 0; eep_config->adapter_info[0] = 0; eep_config->adapter_info[1] = 0; eep_config->adapter_info[2] = 0; eep_config->adapter_info[3] = 0; eep_config->adapter_info[4] = 0; /* Indicate EEPROM-less board. */ eep_config->adapter_info[5] = 0xBB; } else { ASC_PRINT ("AscInitFromEEP: EEPROM checksum error; Will try to re-write EEPROM.\n"); write_eep = 1; warn_code |= ASC_WARN_EEPROM_CHKSUM; } } asc_dvc->cfg->sdtr_enable = eep_config->init_sdtr; asc_dvc->cfg->disc_enable = eep_config->disc_enable; asc_dvc->cfg->cmd_qng_enabled = eep_config->use_cmd_qng; asc_dvc->cfg->isa_dma_speed = ASC_EEP_GET_DMA_SPD(eep_config); asc_dvc->start_motor = eep_config->start_motor; asc_dvc->dvc_cntl = eep_config->cntl; asc_dvc->no_scam = eep_config->no_scam; asc_dvc->cfg->adapter_info[0] = eep_config->adapter_info[0]; asc_dvc->cfg->adapter_info[1] = eep_config->adapter_info[1]; asc_dvc->cfg->adapter_info[2] = eep_config->adapter_info[2]; asc_dvc->cfg->adapter_info[3] = eep_config->adapter_info[3]; asc_dvc->cfg->adapter_info[4] = eep_config->adapter_info[4]; asc_dvc->cfg->adapter_info[5] = eep_config->adapter_info[5]; if (!AscTestExternalLram(asc_dvc)) { if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA)) { eep_config->max_total_qng = ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG; eep_config->max_tag_qng = ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG; } else { eep_config->cfg_msw |= 0x0800; cfg_msw |= 0x0800; AscSetChipCfgMsw(iop_base, cfg_msw); eep_config->max_total_qng = ASC_MAX_PCI_INRAM_TOTAL_QNG; eep_config->max_tag_qng = ASC_MAX_INRAM_TAG_QNG; } } else { } if (eep_config->max_total_qng < ASC_MIN_TOTAL_QNG) { eep_config->max_total_qng = ASC_MIN_TOTAL_QNG; } if (eep_config->max_total_qng > ASC_MAX_TOTAL_QNG) { eep_config->max_total_qng = ASC_MAX_TOTAL_QNG; } if (eep_config->max_tag_qng > eep_config->max_total_qng) { eep_config->max_tag_qng = eep_config->max_total_qng; } if (eep_config->max_tag_qng < ASC_MIN_TAG_Q_PER_DVC) { eep_config->max_tag_qng = ASC_MIN_TAG_Q_PER_DVC; } asc_dvc->max_total_qng = eep_config->max_total_qng; if ((eep_config->use_cmd_qng & eep_config->disc_enable) != eep_config->use_cmd_qng) { eep_config->disc_enable = eep_config->use_cmd_qng; warn_code |= ASC_WARN_CMD_QNG_CONFLICT; } ASC_EEP_SET_CHIP_ID(eep_config, ASC_EEP_GET_CHIP_ID(eep_config) & ASC_MAX_TID); asc_dvc->cfg->chip_scsi_id = ASC_EEP_GET_CHIP_ID(eep_config); if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) && !(asc_dvc->dvc_cntl & ASC_CNTL_SDTR_ENABLE_ULTRA)) { asc_dvc->min_sdtr_index = ASC_SDTR_ULTRA_PCI_10MB_INDEX; } for (i = 0; i <= ASC_MAX_TID; i++) { asc_dvc->dos_int13_table[i] = eep_config->dos_int13_table[i]; asc_dvc->cfg->max_tag_qng[i] = eep_config->max_tag_qng; asc_dvc->cfg->sdtr_period_offset[i] = (uchar)(ASC_DEF_SDTR_OFFSET | (asc_dvc->min_sdtr_index << 4)); } eep_config->cfg_msw = AscGetChipCfgMsw(iop_base); if (write_eep) { if ((i = AscSetEEPConfig(iop_base, eep_config, asc_dvc->bus_type)) != 0) { ASC_PRINT1 ("AscInitFromEEP: Failed to re-write EEPROM with %d errors.\n", i); } else { ASC_PRINT ("AscInitFromEEP: Successfully re-wrote EEPROM.\n"); } } return (warn_code); } static int __devinit AscInitGetConfig(struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var; unsigned short warn_code = 0; asc_dvc->init_state = ASC_INIT_STATE_BEG_GET_CFG; if (asc_dvc->err_code != 0) return asc_dvc->err_code; if (AscFindSignature(asc_dvc->iop_base)) { warn_code |= AscInitAscDvcVar(asc_dvc); warn_code |= AscInitFromEEP(asc_dvc); asc_dvc->init_state |= ASC_INIT_STATE_END_GET_CFG; if (asc_dvc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT) asc_dvc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT; } else { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; } switch (warn_code) { case 0: /* No error */ break; case ASC_WARN_IO_PORT_ROTATE: shost_printk(KERN_WARNING, shost, "I/O port address " "modified\n"); break; case ASC_WARN_AUTO_CONFIG: shost_printk(KERN_WARNING, shost, "I/O port increment switch " "enabled\n"); break; case ASC_WARN_EEPROM_CHKSUM: shost_printk(KERN_WARNING, shost, "EEPROM checksum error\n"); break; case ASC_WARN_IRQ_MODIFIED: shost_printk(KERN_WARNING, shost, "IRQ modified\n"); break; case ASC_WARN_CMD_QNG_CONFLICT: shost_printk(KERN_WARNING, shost, "tag queuing enabled w/o " "disconnects\n"); break; default: shost_printk(KERN_WARNING, shost, "unknown warning: 0x%x\n", warn_code); break; } if (asc_dvc->err_code != 0) shost_printk(KERN_ERR, shost, "error 0x%x at init_state " "0x%x\n", asc_dvc->err_code, asc_dvc->init_state); return asc_dvc->err_code; } static int __devinit AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var; PortAddr iop_base = asc_dvc->iop_base; unsigned short cfg_msw; unsigned short warn_code = 0; asc_dvc->init_state |= ASC_INIT_STATE_BEG_SET_CFG; if (asc_dvc->err_code != 0) return asc_dvc->err_code; if (!AscFindSignature(asc_dvc->iop_base)) { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; return asc_dvc->err_code; } cfg_msw = AscGetChipCfgMsw(iop_base); if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) { cfg_msw &= ~ASC_CFG_MSW_CLR_MASK; warn_code |= ASC_WARN_CFG_MSW_RECOVER; AscSetChipCfgMsw(iop_base, cfg_msw); } if ((asc_dvc->cfg->cmd_qng_enabled & asc_dvc->cfg->disc_enable) != asc_dvc->cfg->cmd_qng_enabled) { asc_dvc->cfg->disc_enable = asc_dvc->cfg->cmd_qng_enabled; warn_code |= ASC_WARN_CMD_QNG_CONFLICT; } if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) { warn_code |= ASC_WARN_AUTO_CONFIG; } #ifdef CONFIG_PCI if (asc_dvc->bus_type & ASC_IS_PCI) { cfg_msw &= 0xFFC0; AscSetChipCfgMsw(iop_base, cfg_msw); if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) { } else { if ((pdev->device == PCI_DEVICE_ID_ASP_1200A) || (pdev->device == PCI_DEVICE_ID_ASP_ABP940)) { asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB; asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN; } } } else #endif /* CONFIG_PCI */ if (asc_dvc->bus_type == ASC_IS_ISAPNP) { if (AscGetChipVersion(iop_base, asc_dvc->bus_type) == ASC_CHIP_VER_ASYN_BUG) { asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN; } } if (AscSetChipScsiID(iop_base, asc_dvc->cfg->chip_scsi_id) != asc_dvc->cfg->chip_scsi_id) { asc_dvc->err_code |= ASC_IERR_SET_SCSI_ID; } #ifdef CONFIG_ISA if (asc_dvc->bus_type & ASC_IS_ISA) { AscSetIsaDmaChannel(iop_base, asc_dvc->cfg->isa_dma_channel); AscSetIsaDmaSpeed(iop_base, asc_dvc->cfg->isa_dma_speed); } #endif /* CONFIG_ISA */ asc_dvc->init_state |= ASC_INIT_STATE_END_SET_CFG; switch (warn_code) { case 0: /* No error. */ break; case ASC_WARN_IO_PORT_ROTATE: shost_printk(KERN_WARNING, shost, "I/O port address " "modified\n"); break; case ASC_WARN_AUTO_CONFIG: shost_printk(KERN_WARNING, shost, "I/O port increment switch " "enabled\n"); break; case ASC_WARN_EEPROM_CHKSUM: shost_printk(KERN_WARNING, shost, "EEPROM checksum error\n"); break; case ASC_WARN_IRQ_MODIFIED: shost_printk(KERN_WARNING, shost, "IRQ modified\n"); break; case ASC_WARN_CMD_QNG_CONFLICT: shost_printk(KERN_WARNING, shost, "tag queuing w/o " "disconnects\n"); break; default: shost_printk(KERN_WARNING, shost, "unknown warning: 0x%x\n", warn_code); break; } if (asc_dvc->err_code != 0) shost_printk(KERN_ERR, shost, "error 0x%x at init_state " "0x%x\n", asc_dvc->err_code, asc_dvc->init_state); return asc_dvc->err_code; } /* * EEPROM Configuration. * * All drivers should use this structure to set the default EEPROM * configuration. The BIOS now uses this structure when it is built. * Additional structure information can be found in a_condor.h where * the structure is defined. * * The *_Field_IsChar structs are needed to correct for endianness. * These values are read from the board 16 bits at a time directly * into the structs. Because some fields are char, the values will be * in the wrong order. The *_Field_IsChar tells when to flip the * bytes. Data read and written to PCI memory is automatically swapped * on big-endian platforms so char fields read as words are actually being * unswapped on big-endian platforms. */ static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config __devinitdata = { ADV_EEPROM_BIOS_ENABLE, /* cfg_lsw */ 0x0000, /* cfg_msw */ 0xFFFF, /* disc_enable */ 0xFFFF, /* wdtr_able */ 0xFFFF, /* sdtr_able */ 0xFFFF, /* start_motor */ 0xFFFF, /* tagqng_able */ 0xFFFF, /* bios_scan */ 0, /* scam_tolerant */ 7, /* adapter_scsi_id */ 0, /* bios_boot_delay */ 3, /* scsi_reset_delay */ 0, /* bios_id_lun */ 0, /* termination */ 0, /* reserved1 */ 0xFFE7, /* bios_ctrl */ 0xFFFF, /* ultra_able */ 0, /* reserved2 */ ASC_DEF_MAX_HOST_QNG, /* max_host_qng */ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ 0, /* dvc_cntl */ 0, /* bug_fix */ 0, /* serial_number_word1 */ 0, /* serial_number_word2 */ 0, /* serial_number_word3 */ 0, /* check_sum */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , /* oem_name[16] */ 0, /* dvc_err_code */ 0, /* adv_err_code */ 0, /* adv_err_addr */ 0, /* saved_dvc_err_code */ 0, /* saved_adv_err_code */ 0, /* saved_adv_err_addr */ 0 /* num_of_err */ }; static ADVEEP_3550_CONFIG ADVEEP_3550_Config_Field_IsChar __devinitdata = { 0, /* cfg_lsw */ 0, /* cfg_msw */ 0, /* -disc_enable */ 0, /* wdtr_able */ 0, /* sdtr_able */ 0, /* start_motor */ 0, /* tagqng_able */ 0, /* bios_scan */ 0, /* scam_tolerant */ 1, /* adapter_scsi_id */ 1, /* bios_boot_delay */ 1, /* scsi_reset_delay */ 1, /* bios_id_lun */ 1, /* termination */ 1, /* reserved1 */ 0, /* bios_ctrl */ 0, /* ultra_able */ 0, /* reserved2 */ 1, /* max_host_qng */ 1, /* max_dvc_qng */ 0, /* dvc_cntl */ 0, /* bug_fix */ 0, /* serial_number_word1 */ 0, /* serial_number_word2 */ 0, /* serial_number_word3 */ 0, /* check_sum */ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} , /* oem_name[16] */ 0, /* dvc_err_code */ 0, /* adv_err_code */ 0, /* adv_err_addr */ 0, /* saved_dvc_err_code */ 0, /* saved_adv_err_code */ 0, /* saved_adv_err_addr */ 0 /* num_of_err */ }; static ADVEEP_38C0800_CONFIG Default_38C0800_EEPROM_Config __devinitdata = { ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */ 0x0000, /* 01 cfg_msw */ 0xFFFF, /* 02 disc_enable */ 0xFFFF, /* 03 wdtr_able */ 0x4444, /* 04 sdtr_speed1 */ 0xFFFF, /* 05 start_motor */ 0xFFFF, /* 06 tagqng_able */ 0xFFFF, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 7, /* 09 adapter_scsi_id */ 0, /* bios_boot_delay */ 3, /* 10 scsi_reset_delay */ 0, /* bios_id_lun */ 0, /* 11 termination_se */ 0, /* termination_lvd */ 0xFFE7, /* 12 bios_ctrl */ 0x4444, /* 13 sdtr_speed2 */ 0x4444, /* 14 sdtr_speed3 */ ASC_DEF_MAX_HOST_QNG, /* 15 max_host_qng */ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0x4444, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ PCI_VENDOR_ID_ASP, /* 58 subsysvid */ PCI_DEVICE_ID_38C0800_REV1, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; static ADVEEP_38C0800_CONFIG ADVEEP_38C0800_Config_Field_IsChar __devinitdata = { 0, /* 00 cfg_lsw */ 0, /* 01 cfg_msw */ 0, /* 02 disc_enable */ 0, /* 03 wdtr_able */ 0, /* 04 sdtr_speed1 */ 0, /* 05 start_motor */ 0, /* 06 tagqng_able */ 0, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 1, /* 09 adapter_scsi_id */ 1, /* bios_boot_delay */ 1, /* 10 scsi_reset_delay */ 1, /* bios_id_lun */ 1, /* 11 termination_se */ 1, /* termination_lvd */ 0, /* 12 bios_ctrl */ 0, /* 13 sdtr_speed2 */ 0, /* 14 sdtr_speed3 */ 1, /* 15 max_host_qng */ 1, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ 0, /* 58 subsysvid */ 0, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; static ADVEEP_38C1600_CONFIG Default_38C1600_EEPROM_Config __devinitdata = { ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */ 0x0000, /* 01 cfg_msw */ 0xFFFF, /* 02 disc_enable */ 0xFFFF, /* 03 wdtr_able */ 0x5555, /* 04 sdtr_speed1 */ 0xFFFF, /* 05 start_motor */ 0xFFFF, /* 06 tagqng_able */ 0xFFFF, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 7, /* 09 adapter_scsi_id */ 0, /* bios_boot_delay */ 3, /* 10 scsi_reset_delay */ 0, /* bios_id_lun */ 0, /* 11 termination_se */ 0, /* termination_lvd */ 0xFFE7, /* 12 bios_ctrl */ 0x5555, /* 13 sdtr_speed2 */ 0x5555, /* 14 sdtr_speed3 */ ASC_DEF_MAX_HOST_QNG, /* 15 max_host_qng */ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0x5555, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ PCI_VENDOR_ID_ASP, /* 58 subsysvid */ PCI_DEVICE_ID_38C1600_REV1, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar __devinitdata = { 0, /* 00 cfg_lsw */ 0, /* 01 cfg_msw */ 0, /* 02 disc_enable */ 0, /* 03 wdtr_able */ 0, /* 04 sdtr_speed1 */ 0, /* 05 start_motor */ 0, /* 06 tagqng_able */ 0, /* 07 bios_scan */ 0, /* 08 scam_tolerant */ 1, /* 09 adapter_scsi_id */ 1, /* bios_boot_delay */ 1, /* 10 scsi_reset_delay */ 1, /* bios_id_lun */ 1, /* 11 termination_se */ 1, /* termination_lvd */ 0, /* 12 bios_ctrl */ 0, /* 13 sdtr_speed2 */ 0, /* 14 sdtr_speed3 */ 1, /* 15 max_host_qng */ 1, /* max_dvc_qng */ 0, /* 16 dvc_cntl */ 0, /* 17 sdtr_speed4 */ 0, /* 18 serial_number_word1 */ 0, /* 19 serial_number_word2 */ 0, /* 20 serial_number_word3 */ 0, /* 21 check_sum */ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} , /* 22-29 oem_name[16] */ 0, /* 30 dvc_err_code */ 0, /* 31 adv_err_code */ 0, /* 32 adv_err_addr */ 0, /* 33 saved_dvc_err_code */ 0, /* 34 saved_adv_err_code */ 0, /* 35 saved_adv_err_addr */ 0, /* 36 reserved */ 0, /* 37 reserved */ 0, /* 38 reserved */ 0, /* 39 reserved */ 0, /* 40 reserved */ 0, /* 41 reserved */ 0, /* 42 reserved */ 0, /* 43 reserved */ 0, /* 44 reserved */ 0, /* 45 reserved */ 0, /* 46 reserved */ 0, /* 47 reserved */ 0, /* 48 reserved */ 0, /* 49 reserved */ 0, /* 50 reserved */ 0, /* 51 reserved */ 0, /* 52 reserved */ 0, /* 53 reserved */ 0, /* 54 reserved */ 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ 0, /* 58 subsysvid */ 0, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ 0 /* 63 reserved */ }; #ifdef CONFIG_PCI /* * Wait for EEPROM command to complete */ static void __devinit AdvWaitEEPCmd(AdvPortAddr iop_base) { int eep_delay_ms; for (eep_delay_ms = 0; eep_delay_ms < ADV_EEP_DELAY_MS; eep_delay_ms++) { if (AdvReadWordRegister(iop_base, IOPW_EE_CMD) & ASC_EEP_CMD_DONE) { break; } mdelay(1); } if ((AdvReadWordRegister(iop_base, IOPW_EE_CMD) & ASC_EEP_CMD_DONE) == 0) BUG(); } /* * Read the EEPROM from specified location */ static ushort __devinit AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr) { AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_READ | eep_word_addr); AdvWaitEEPCmd(iop_base); return AdvReadWordRegister(iop_base, IOPW_EE_DATA); } /* * Write the EEPROM from 'cfg_buf'. */ static void __devinit AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf) { ushort *wbuf; ushort addr, chksum; ushort *charfields; wbuf = (ushort *)cfg_buf; charfields = (ushort *)&ADVEEP_3550_Config_Field_IsChar; chksum = 0; AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); AdvWaitEEPCmd(iop_base); /* * Write EEPROM from word 0 to word 20. */ for (addr = ADV_EEP_DVC_CFG_BEGIN; addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } chksum += *wbuf; /* Checksum is calculated from word values. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); mdelay(ADV_EEP_DELAY_MS); } /* * Write EEPROM checksum at word 21. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); wbuf++; charfields++; /* * Write EEPROM OEM name at words 22 to 29. */ for (addr = ADV_EEP_DVC_CTL_BEGIN; addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); } AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); AdvWaitEEPCmd(iop_base); } /* * Write the EEPROM from 'cfg_buf'. */ static void __devinit AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf) { ushort *wbuf; ushort *charfields; ushort addr, chksum; wbuf = (ushort *)cfg_buf; charfields = (ushort *)&ADVEEP_38C0800_Config_Field_IsChar; chksum = 0; AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); AdvWaitEEPCmd(iop_base); /* * Write EEPROM from word 0 to word 20. */ for (addr = ADV_EEP_DVC_CFG_BEGIN; addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } chksum += *wbuf; /* Checksum is calculated from word values. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); mdelay(ADV_EEP_DELAY_MS); } /* * Write EEPROM checksum at word 21. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); wbuf++; charfields++; /* * Write EEPROM OEM name at words 22 to 29. */ for (addr = ADV_EEP_DVC_CTL_BEGIN; addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); } AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); AdvWaitEEPCmd(iop_base); } /* * Write the EEPROM from 'cfg_buf'. */ static void __devinit AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf) { ushort *wbuf; ushort *charfields; ushort addr, chksum; wbuf = (ushort *)cfg_buf; charfields = (ushort *)&ADVEEP_38C1600_Config_Field_IsChar; chksum = 0; AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE); AdvWaitEEPCmd(iop_base); /* * Write EEPROM from word 0 to word 20. */ for (addr = ADV_EEP_DVC_CFG_BEGIN; addr < ADV_EEP_DVC_CFG_END; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } chksum += *wbuf; /* Checksum is calculated from word values. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); mdelay(ADV_EEP_DELAY_MS); } /* * Write EEPROM checksum at word 21. */ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); wbuf++; charfields++; /* * Write EEPROM OEM name at words 22 to 29. */ for (addr = ADV_EEP_DVC_CTL_BEGIN; addr < ADV_EEP_MAX_WORD_ADDR; addr++, wbuf++) { ushort word; if (*charfields++) { word = cpu_to_le16(*wbuf); } else { word = *wbuf; } AdvWriteWordRegister(iop_base, IOPW_EE_DATA, word); AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr); AdvWaitEEPCmd(iop_base); } AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE); AdvWaitEEPCmd(iop_base); } /* * Read EEPROM configuration into the specified buffer. * * Return a checksum based on the EEPROM configuration read. */ static ushort __devinit AdvGet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf) { ushort wval, chksum; ushort *wbuf; int eep_addr; ushort *charfields; charfields = (ushort *)&ADVEEP_3550_Config_Field_IsChar; wbuf = (ushort *)cfg_buf; chksum = 0; for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { wval = AdvReadEEPWord(iop_base, eep_addr); chksum += wval; /* Checksum is calculated from word values. */ if (*charfields++) { *wbuf = le16_to_cpu(wval); } else { *wbuf = wval; } } /* Read checksum word. */ *wbuf = AdvReadEEPWord(iop_base, eep_addr); wbuf++; charfields++; /* Read rest of EEPROM not covered by the checksum. */ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { *wbuf = AdvReadEEPWord(iop_base, eep_addr); if (*charfields++) { *wbuf = le16_to_cpu(*wbuf); } } return chksum; } /* * Read EEPROM configuration into the specified buffer. * * Return a checksum based on the EEPROM configuration read. */ static ushort __devinit AdvGet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf) { ushort wval, chksum; ushort *wbuf; int eep_addr; ushort *charfields; charfields = (ushort *)&ADVEEP_38C0800_Config_Field_IsChar; wbuf = (ushort *)cfg_buf; chksum = 0; for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { wval = AdvReadEEPWord(iop_base, eep_addr); chksum += wval; /* Checksum is calculated from word values. */ if (*charfields++) { *wbuf = le16_to_cpu(wval); } else { *wbuf = wval; } } /* Read checksum word. */ *wbuf = AdvReadEEPWord(iop_base, eep_addr); wbuf++; charfields++; /* Read rest of EEPROM not covered by the checksum. */ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { *wbuf = AdvReadEEPWord(iop_base, eep_addr); if (*charfields++) { *wbuf = le16_to_cpu(*wbuf); } } return chksum; } /* * Read EEPROM configuration into the specified buffer. * * Return a checksum based on the EEPROM configuration read. */ static ushort __devinit AdvGet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf) { ushort wval, chksum; ushort *wbuf; int eep_addr; ushort *charfields; charfields = (ushort *)&ADVEEP_38C1600_Config_Field_IsChar; wbuf = (ushort *)cfg_buf; chksum = 0; for (eep_addr = ADV_EEP_DVC_CFG_BEGIN; eep_addr < ADV_EEP_DVC_CFG_END; eep_addr++, wbuf++) { wval = AdvReadEEPWord(iop_base, eep_addr); chksum += wval; /* Checksum is calculated from word values. */ if (*charfields++) { *wbuf = le16_to_cpu(wval); } else { *wbuf = wval; } } /* Read checksum word. */ *wbuf = AdvReadEEPWord(iop_base, eep_addr); wbuf++; charfields++; /* Read rest of EEPROM not covered by the checksum. */ for (eep_addr = ADV_EEP_DVC_CTL_BEGIN; eep_addr < ADV_EEP_MAX_WORD_ADDR; eep_addr++, wbuf++) { *wbuf = AdvReadEEPWord(iop_base, eep_addr); if (*charfields++) { *wbuf = le16_to_cpu(*wbuf); } } return chksum; } /* * Read the board's EEPROM configuration. Set fields in ADV_DVC_VAR and * ADV_DVC_CFG based on the EEPROM settings. The chip is stopped while * all of this is done. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Note: Chip is stopped on entry. */ static int __devinit AdvInitFrom3550EEP(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; ushort warn_code; ADVEEP_3550_CONFIG eep_config; iop_base = asc_dvc->iop_base; warn_code = 0; /* * Read the board's EEPROM configuration. * * Set default values if a bad checksum is found. */ if (AdvGet3550EEPConfig(iop_base, &eep_config) != eep_config.check_sum) { warn_code |= ASC_WARN_EEPROM_CHKSUM; /* * Set EEPROM default values. */ memcpy(&eep_config, &Default_3550_EEPROM_Config, sizeof(ADVEEP_3550_CONFIG)); /* * Assume the 6 byte board serial number that was read from * EEPROM is correct even if the EEPROM checksum failed. */ eep_config.serial_number_word3 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); eep_config.serial_number_word2 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); eep_config.serial_number_word1 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); AdvSet3550EEPConfig(iop_base, &eep_config); } /* * Set ASC_DVC_VAR and ASC_DVC_CFG variables from the * EEPROM configuration that was read. * * This is the mapping of EEPROM fields to Adv Library fields. */ asc_dvc->wdtr_able = eep_config.wdtr_able; asc_dvc->sdtr_able = eep_config.sdtr_able; asc_dvc->ultra_able = eep_config.ultra_able; asc_dvc->tagqng_able = eep_config.tagqng_able; asc_dvc->cfg->disc_enable = eep_config.disc_enable; asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ADV_MAX_TID); asc_dvc->start_motor = eep_config.start_motor; asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; asc_dvc->bios_ctrl = eep_config.bios_ctrl; asc_dvc->no_scam = eep_config.scam_tolerant; asc_dvc->cfg->serial1 = eep_config.serial_number_word1; asc_dvc->cfg->serial2 = eep_config.serial_number_word2; asc_dvc->cfg->serial3 = eep_config.serial_number_word3; /* * Set the host maximum queuing (max. 253, min. 16) and the per device * maximum queuing (max. 63, min. 4). */ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_host_qng == 0) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else { eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; } } if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_dvc_qng == 0) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else { eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; } } /* * If 'max_dvc_qng' is greater than 'max_host_qng', then * set 'max_dvc_qng' to 'max_host_qng'. */ if (eep_config.max_dvc_qng > eep_config.max_host_qng) { eep_config.max_dvc_qng = eep_config.max_host_qng; } /* * Set ADV_DVC_VAR 'max_host_qng' and ADV_DVC_VAR 'max_dvc_qng' * values based on possibly adjusted EEPROM values. */ asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; /* * If the EEPROM 'termination' field is set to automatic (0), then set * the ADV_DVC_CFG 'termination' field to automatic also. * * If the termination is specified with a non-zero 'termination' * value check that a legal value is set and set the ADV_DVC_CFG * 'termination' field appropriately. */ if (eep_config.termination == 0) { asc_dvc->cfg->termination = 0; /* auto termination */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination == 1) { asc_dvc->cfg->termination = TERM_CTL_SEL; /* Enable manual control with low off / high on. */ } else if (eep_config.termination == 2) { asc_dvc->cfg->termination = TERM_CTL_SEL | TERM_CTL_H; /* Enable manual control with low on / high on. */ } else if (eep_config.termination == 3) { asc_dvc->cfg->termination = TERM_CTL_SEL | TERM_CTL_H | TERM_CTL_L; } else { /* * The EEPROM 'termination' field contains a bad value. Use * automatic termination instead. */ asc_dvc->cfg->termination = 0; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } return warn_code; } /* * Read the board's EEPROM configuration. Set fields in ADV_DVC_VAR and * ADV_DVC_CFG based on the EEPROM settings. The chip is stopped while * all of this is done. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Note: Chip is stopped on entry. */ static int __devinit AdvInitFrom38C0800EEP(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; ushort warn_code; ADVEEP_38C0800_CONFIG eep_config; uchar tid, termination; ushort sdtr_speed = 0; iop_base = asc_dvc->iop_base; warn_code = 0; /* * Read the board's EEPROM configuration. * * Set default values if a bad checksum is found. */ if (AdvGet38C0800EEPConfig(iop_base, &eep_config) != eep_config.check_sum) { warn_code |= ASC_WARN_EEPROM_CHKSUM; /* * Set EEPROM default values. */ memcpy(&eep_config, &Default_38C0800_EEPROM_Config, sizeof(ADVEEP_38C0800_CONFIG)); /* * Assume the 6 byte board serial number that was read from * EEPROM is correct even if the EEPROM checksum failed. */ eep_config.serial_number_word3 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); eep_config.serial_number_word2 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); eep_config.serial_number_word1 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); AdvSet38C0800EEPConfig(iop_base, &eep_config); } /* * Set ADV_DVC_VAR and ADV_DVC_CFG variables from the * EEPROM configuration that was read. * * This is the mapping of EEPROM fields to Adv Library fields. */ asc_dvc->wdtr_able = eep_config.wdtr_able; asc_dvc->sdtr_speed1 = eep_config.sdtr_speed1; asc_dvc->sdtr_speed2 = eep_config.sdtr_speed2; asc_dvc->sdtr_speed3 = eep_config.sdtr_speed3; asc_dvc->sdtr_speed4 = eep_config.sdtr_speed4; asc_dvc->tagqng_able = eep_config.tagqng_able; asc_dvc->cfg->disc_enable = eep_config.disc_enable; asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ADV_MAX_TID); asc_dvc->start_motor = eep_config.start_motor; asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; asc_dvc->bios_ctrl = eep_config.bios_ctrl; asc_dvc->no_scam = eep_config.scam_tolerant; asc_dvc->cfg->serial1 = eep_config.serial_number_word1; asc_dvc->cfg->serial2 = eep_config.serial_number_word2; asc_dvc->cfg->serial3 = eep_config.serial_number_word3; /* * For every Target ID if any of its 'sdtr_speed[1234]' bits * are set, then set an 'sdtr_able' bit for it. */ asc_dvc->sdtr_able = 0; for (tid = 0; tid <= ADV_MAX_TID; tid++) { if (tid == 0) { sdtr_speed = asc_dvc->sdtr_speed1; } else if (tid == 4) { sdtr_speed = asc_dvc->sdtr_speed2; } else if (tid == 8) { sdtr_speed = asc_dvc->sdtr_speed3; } else if (tid == 12) { sdtr_speed = asc_dvc->sdtr_speed4; } if (sdtr_speed & ADV_MAX_TID) { asc_dvc->sdtr_able |= (1 << tid); } sdtr_speed >>= 4; } /* * Set the host maximum queuing (max. 253, min. 16) and the per device * maximum queuing (max. 63, min. 4). */ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_host_qng == 0) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else { eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; } } if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_dvc_qng == 0) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else { eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; } } /* * If 'max_dvc_qng' is greater than 'max_host_qng', then * set 'max_dvc_qng' to 'max_host_qng'. */ if (eep_config.max_dvc_qng > eep_config.max_host_qng) { eep_config.max_dvc_qng = eep_config.max_host_qng; } /* * Set ADV_DVC_VAR 'max_host_qng' and ADV_DVC_VAR 'max_dvc_qng' * values based on possibly adjusted EEPROM values. */ asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; /* * If the EEPROM 'termination' field is set to automatic (0), then set * the ADV_DVC_CFG 'termination' field to automatic also. * * If the termination is specified with a non-zero 'termination' * value check that a legal value is set and set the ADV_DVC_CFG * 'termination' field appropriately. */ if (eep_config.termination_se == 0) { termination = 0; /* auto termination for SE */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_se == 1) { termination = 0; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_se == 2) { termination = TERM_SE_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_se == 3) { termination = TERM_SE; } else { /* * The EEPROM 'termination_se' field contains a bad value. * Use automatic termination instead. */ termination = 0; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } if (eep_config.termination_lvd == 0) { asc_dvc->cfg->termination = termination; /* auto termination for LVD */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_lvd == 1) { asc_dvc->cfg->termination = termination; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_lvd == 2) { asc_dvc->cfg->termination = termination | TERM_LVD_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_lvd == 3) { asc_dvc->cfg->termination = termination | TERM_LVD; } else { /* * The EEPROM 'termination_lvd' field contains a bad value. * Use automatic termination instead. */ asc_dvc->cfg->termination = termination; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } return warn_code; } /* * Read the board's EEPROM configuration. Set fields in ASC_DVC_VAR and * ASC_DVC_CFG based on the EEPROM settings. The chip is stopped while * all of this is done. * * On failure set the ASC_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. * * Note: Chip is stopped on entry. */ static int __devinit AdvInitFrom38C1600EEP(ADV_DVC_VAR *asc_dvc) { AdvPortAddr iop_base; ushort warn_code; ADVEEP_38C1600_CONFIG eep_config; uchar tid, termination; ushort sdtr_speed = 0; iop_base = asc_dvc->iop_base; warn_code = 0; /* * Read the board's EEPROM configuration. * * Set default values if a bad checksum is found. */ if (AdvGet38C1600EEPConfig(iop_base, &eep_config) != eep_config.check_sum) { struct pci_dev *pdev = adv_dvc_to_pdev(asc_dvc); warn_code |= ASC_WARN_EEPROM_CHKSUM; /* * Set EEPROM default values. */ memcpy(&eep_config, &Default_38C1600_EEPROM_Config, sizeof(ADVEEP_38C1600_CONFIG)); if (PCI_FUNC(pdev->devfn) != 0) { u8 ints; /* * Disable Bit 14 (BIOS_ENABLE) to fix SPARC Ultra 60 * and old Mac system booting problem. The Expansion * ROM must be disabled in Function 1 for these systems */ eep_config.cfg_lsw &= ~ADV_EEPROM_BIOS_ENABLE; /* * Clear the INTAB (bit 11) if the GPIO 0 input * indicates the Function 1 interrupt line is wired * to INTB. * * Set/Clear Bit 11 (INTAB) from the GPIO bit 0 input: * 1 - Function 1 interrupt line wired to INT A. * 0 - Function 1 interrupt line wired to INT B. * * Note: Function 0 is always wired to INTA. * Put all 5 GPIO bits in input mode and then read * their input values. */ AdvWriteByteRegister(iop_base, IOPB_GPIO_CNTL, 0); ints = AdvReadByteRegister(iop_base, IOPB_GPIO_DATA); if ((ints & 0x01) == 0) eep_config.cfg_lsw &= ~ADV_EEPROM_INTAB; } /* * Assume the 6 byte board serial number that was read from * EEPROM is correct even if the EEPROM checksum failed. */ eep_config.serial_number_word3 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 1); eep_config.serial_number_word2 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 2); eep_config.serial_number_word1 = AdvReadEEPWord(iop_base, ADV_EEP_DVC_CFG_END - 3); AdvSet38C1600EEPConfig(iop_base, &eep_config); } /* * Set ASC_DVC_VAR and ASC_DVC_CFG variables from the * EEPROM configuration that was read. * * This is the mapping of EEPROM fields to Adv Library fields. */ asc_dvc->wdtr_able = eep_config.wdtr_able; asc_dvc->sdtr_speed1 = eep_config.sdtr_speed1; asc_dvc->sdtr_speed2 = eep_config.sdtr_speed2; asc_dvc->sdtr_speed3 = eep_config.sdtr_speed3; asc_dvc->sdtr_speed4 = eep_config.sdtr_speed4; asc_dvc->ppr_able = 0; asc_dvc->tagqng_able = eep_config.tagqng_able; asc_dvc->cfg->disc_enable = eep_config.disc_enable; asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ASC_MAX_TID); asc_dvc->start_motor = eep_config.start_motor; asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay; asc_dvc->bios_ctrl = eep_config.bios_ctrl; asc_dvc->no_scam = eep_config.scam_tolerant; /* * For every Target ID if any of its 'sdtr_speed[1234]' bits * are set, then set an 'sdtr_able' bit for it. */ asc_dvc->sdtr_able = 0; for (tid = 0; tid <= ASC_MAX_TID; tid++) { if (tid == 0) { sdtr_speed = asc_dvc->sdtr_speed1; } else if (tid == 4) { sdtr_speed = asc_dvc->sdtr_speed2; } else if (tid == 8) { sdtr_speed = asc_dvc->sdtr_speed3; } else if (tid == 12) { sdtr_speed = asc_dvc->sdtr_speed4; } if (sdtr_speed & ASC_MAX_TID) { asc_dvc->sdtr_able |= (1 << tid); } sdtr_speed >>= 4; } /* * Set the host maximum queuing (max. 253, min. 16) and the per device * maximum queuing (max. 63, min. 4). */ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_host_qng == 0) { eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG; } else { eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG; } } if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG) { /* If the value is zero, assume it is uninitialized. */ if (eep_config.max_dvc_qng == 0) { eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG; } else { eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG; } } /* * If 'max_dvc_qng' is greater than 'max_host_qng', then * set 'max_dvc_qng' to 'max_host_qng'. */ if (eep_config.max_dvc_qng > eep_config.max_host_qng) { eep_config.max_dvc_qng = eep_config.max_host_qng; } /* * Set ASC_DVC_VAR 'max_host_qng' and ASC_DVC_VAR 'max_dvc_qng' * values based on possibly adjusted EEPROM values. */ asc_dvc->max_host_qng = eep_config.max_host_qng; asc_dvc->max_dvc_qng = eep_config.max_dvc_qng; /* * If the EEPROM 'termination' field is set to automatic (0), then set * the ASC_DVC_CFG 'termination' field to automatic also. * * If the termination is specified with a non-zero 'termination' * value check that a legal value is set and set the ASC_DVC_CFG * 'termination' field appropriately. */ if (eep_config.termination_se == 0) { termination = 0; /* auto termination for SE */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_se == 1) { termination = 0; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_se == 2) { termination = TERM_SE_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_se == 3) { termination = TERM_SE; } else { /* * The EEPROM 'termination_se' field contains a bad value. * Use automatic termination instead. */ termination = 0; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } if (eep_config.termination_lvd == 0) { asc_dvc->cfg->termination = termination; /* auto termination for LVD */ } else { /* Enable manual control with low off / high off. */ if (eep_config.termination_lvd == 1) { asc_dvc->cfg->termination = termination; /* Enable manual control with low off / high on. */ } else if (eep_config.termination_lvd == 2) { asc_dvc->cfg->termination = termination | TERM_LVD_HI; /* Enable manual control with low on / high on. */ } else if (eep_config.termination_lvd == 3) { asc_dvc->cfg->termination = termination | TERM_LVD; } else { /* * The EEPROM 'termination_lvd' field contains a bad value. * Use automatic termination instead. */ asc_dvc->cfg->termination = termination; warn_code |= ASC_WARN_EEPROM_TERMINATION; } } return warn_code; } /* * Initialize the ADV_DVC_VAR structure. * * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR. * * For a non-fatal error return a warning code. If there are no warnings * then 0 is returned. */ static int __devinit AdvInitGetConfig(struct pci_dev *pdev, struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ADV_DVC_VAR *asc_dvc = &board->dvc_var.adv_dvc_var; unsigned short warn_code = 0; AdvPortAddr iop_base = asc_dvc->iop_base; u16 cmd; int status; asc_dvc->err_code = 0; /* * Save the state of the PCI Configuration Command Register * "Parity Error Response Control" Bit. If the bit is clear (0), * in AdvInitAsc3550/38C0800Driver() tell the microcode to ignore * DMA parity errors. */ asc_dvc->cfg->control_flag = 0; pci_read_config_word(pdev, PCI_COMMAND, &cmd); if ((cmd & PCI_COMMAND_PARITY) == 0) asc_dvc->cfg->control_flag |= CONTROL_FLAG_IGNORE_PERR; asc_dvc->cfg->chip_version = AdvGetChipVersion(iop_base, asc_dvc->bus_type); ASC_DBG(1, "iopb_chip_id_1: 0x%x 0x%x\n", (ushort)AdvReadByteRegister(iop_base, IOPB_CHIP_ID_1), (ushort)ADV_CHIP_ID_BYTE); ASC_DBG(1, "iopw_chip_id_0: 0x%x 0x%x\n", (ushort)AdvReadWordRegister(iop_base, IOPW_CHIP_ID_0), (ushort)ADV_CHIP_ID_WORD); /* * Reset the chip to start and allow register writes. */ if (AdvFindSignature(iop_base) == 0) { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; return ADV_ERROR; } else { /* * The caller must set 'chip_type' to a valid setting. */ if (asc_dvc->chip_type != ADV_CHIP_ASC3550 && asc_dvc->chip_type != ADV_CHIP_ASC38C0800 && asc_dvc->chip_type != ADV_CHIP_ASC38C1600) { asc_dvc->err_code |= ASC_IERR_BAD_CHIPTYPE; return ADV_ERROR; } /* * Reset Chip. */ AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_RESET); mdelay(100); AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_WR_IO_REG); if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600) { status = AdvInitFrom38C1600EEP(asc_dvc); } else if (asc_dvc->chip_type == ADV_CHIP_ASC38C0800) { status = AdvInitFrom38C0800EEP(asc_dvc); } else { status = AdvInitFrom3550EEP(asc_dvc); } warn_code |= status; } if (warn_code != 0) shost_printk(KERN_WARNING, shost, "warning: 0x%x\n", warn_code); if (asc_dvc->err_code) shost_printk(KERN_ERR, shost, "error code 0x%x\n", asc_dvc->err_code); return asc_dvc->err_code; } #endif static struct scsi_host_template advansys_template = { .proc_name = DRV_NAME, #ifdef CONFIG_PROC_FS .proc_info = advansys_proc_info, #endif .name = DRV_NAME, .info = advansys_info, .queuecommand = advansys_queuecommand, .eh_bus_reset_handler = advansys_reset, .bios_param = advansys_biosparam, .slave_configure = advansys_slave_configure, /* * Because the driver may control an ISA adapter 'unchecked_isa_dma' * must be set. The flag will be cleared in advansys_board_found * for non-ISA adapters. */ .unchecked_isa_dma = 1, /* * All adapters controlled by this driver are capable of large * scatter-gather lists. According to the mid-level SCSI documentation * this obviates any performance gain provided by setting * 'use_clustering'. But empirically while CPU utilization is increased * by enabling clustering, I/O throughput increases as well. */ .use_clustering = ENABLE_CLUSTERING, }; static int __devinit advansys_wide_init_chip(struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var; int req_cnt = 0; adv_req_t *reqp = NULL; int sg_cnt = 0; adv_sgblk_t *sgp; int warn_code, err_code; /* * Allocate buffer carrier structures. The total size * is about 4 KB, so allocate all at once. */ adv_dvc->carrier_buf = kmalloc(ADV_CARRIER_BUFSIZE, GFP_KERNEL); ASC_DBG(1, "carrier_buf 0x%p\n", adv_dvc->carrier_buf); if (!adv_dvc->carrier_buf) goto kmalloc_failed; /* * Allocate up to 'max_host_qng' request structures for the Wide * board. The total size is about 16 KB, so allocate all at once. * If the allocation fails decrement and try again. */ for (req_cnt = adv_dvc->max_host_qng; req_cnt > 0; req_cnt--) { reqp = kmalloc(sizeof(adv_req_t) * req_cnt, GFP_KERNEL); ASC_DBG(1, "reqp 0x%p, req_cnt %d, bytes %lu\n", reqp, req_cnt, (ulong)sizeof(adv_req_t) * req_cnt); if (reqp) break; } if (!reqp) goto kmalloc_failed; adv_dvc->orig_reqp = reqp; /* * Allocate up to ADV_TOT_SG_BLOCK request structures for * the Wide board. Each structure is about 136 bytes. */ board->adv_sgblkp = NULL; for (sg_cnt = 0; sg_cnt < ADV_TOT_SG_BLOCK; sg_cnt++) { sgp = kmalloc(sizeof(adv_sgblk_t), GFP_KERNEL); if (!sgp) break; sgp->next_sgblkp = board->adv_sgblkp; board->adv_sgblkp = sgp; } ASC_DBG(1, "sg_cnt %d * %lu = %lu bytes\n", sg_cnt, sizeof(adv_sgblk_t), sizeof(adv_sgblk_t) * sg_cnt); if (!board->adv_sgblkp) goto kmalloc_failed; /* * Point 'adv_reqp' to the request structures and * link them together. */ req_cnt--; reqp[req_cnt].next_reqp = NULL; for (; req_cnt > 0; req_cnt--) { reqp[req_cnt - 1].next_reqp = &reqp[req_cnt]; } board->adv_reqp = &reqp[0]; if (adv_dvc->chip_type == ADV_CHIP_ASC3550) { ASC_DBG(2, "AdvInitAsc3550Driver()\n"); warn_code = AdvInitAsc3550Driver(adv_dvc); } else if (adv_dvc->chip_type == ADV_CHIP_ASC38C0800) { ASC_DBG(2, "AdvInitAsc38C0800Driver()\n"); warn_code = AdvInitAsc38C0800Driver(adv_dvc); } else { ASC_DBG(2, "AdvInitAsc38C1600Driver()\n"); warn_code = AdvInitAsc38C1600Driver(adv_dvc); } err_code = adv_dvc->err_code; if (warn_code || err_code) { shost_printk(KERN_WARNING, shost, "error: warn 0x%x, error " "0x%x\n", warn_code, err_code); } goto exit; kmalloc_failed: shost_printk(KERN_ERR, shost, "error: kmalloc() failed\n"); err_code = ADV_ERROR; exit: return err_code; } static void advansys_wide_free_mem(struct asc_board *board) { struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var; kfree(adv_dvc->carrier_buf); adv_dvc->carrier_buf = NULL; kfree(adv_dvc->orig_reqp); adv_dvc->orig_reqp = board->adv_reqp = NULL; while (board->adv_sgblkp) { adv_sgblk_t *sgp = board->adv_sgblkp; board->adv_sgblkp = sgp->next_sgblkp; kfree(sgp); } } static int __devinit advansys_board_found(struct Scsi_Host *shost, unsigned int iop, int bus_type) { struct pci_dev *pdev; struct asc_board *boardp = shost_priv(shost); ASC_DVC_VAR *asc_dvc_varp = NULL; ADV_DVC_VAR *adv_dvc_varp = NULL; int share_irq, warn_code, ret; pdev = (bus_type == ASC_IS_PCI) ? to_pci_dev(boardp->dev) : NULL; if (ASC_NARROW_BOARD(boardp)) { ASC_DBG(1, "narrow board\n"); asc_dvc_varp = &boardp->dvc_var.asc_dvc_var; asc_dvc_varp->bus_type = bus_type; asc_dvc_varp->drv_ptr = boardp; asc_dvc_varp->cfg = &boardp->dvc_cfg.asc_dvc_cfg; asc_dvc_varp->iop_base = iop; } else { #ifdef CONFIG_PCI adv_dvc_varp = &boardp->dvc_var.adv_dvc_var; adv_dvc_varp->drv_ptr = boardp; adv_dvc_varp->cfg = &boardp->dvc_cfg.adv_dvc_cfg; if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW) { ASC_DBG(1, "wide board ASC-3550\n"); adv_dvc_varp->chip_type = ADV_CHIP_ASC3550; } else if (pdev->device == PCI_DEVICE_ID_38C0800_REV1) { ASC_DBG(1, "wide board ASC-38C0800\n"); adv_dvc_varp->chip_type = ADV_CHIP_ASC38C0800; } else { ASC_DBG(1, "wide board ASC-38C1600\n"); adv_dvc_varp->chip_type = ADV_CHIP_ASC38C1600; } boardp->asc_n_io_port = pci_resource_len(pdev, 1); boardp->ioremap_addr = pci_ioremap_bar(pdev, 1); if (!boardp->ioremap_addr) { shost_printk(KERN_ERR, shost, "ioremap(%lx, %d) " "returned NULL\n", (long)pci_resource_start(pdev, 1), boardp->asc_n_io_port); ret = -ENODEV; goto err_shost; } adv_dvc_varp->iop_base = (AdvPortAddr)boardp->ioremap_addr; ASC_DBG(1, "iop_base: 0x%p\n", adv_dvc_varp->iop_base); /* * Even though it isn't used to access wide boards, other * than for the debug line below, save I/O Port address so * that it can be reported. */ boardp->ioport = iop; ASC_DBG(1, "iopb_chip_id_1 0x%x, iopw_chip_id_0 0x%x\n", (ushort)inp(iop + 1), (ushort)inpw(iop)); #endif /* CONFIG_PCI */ } #ifdef CONFIG_PROC_FS /* * Allocate buffer for printing information from * /proc/scsi/advansys/[0...]. */ boardp->prtbuf = kmalloc(ASC_PRTBUF_SIZE, GFP_KERNEL); if (!boardp->prtbuf) { shost_printk(KERN_ERR, shost, "kmalloc(%d) returned NULL\n", ASC_PRTBUF_SIZE); ret = -ENOMEM; goto err_unmap; } #endif /* CONFIG_PROC_FS */ if (ASC_NARROW_BOARD(boardp)) { /* * Set the board bus type and PCI IRQ before * calling AscInitGetConfig(). */ switch (asc_dvc_varp->bus_type) { #ifdef CONFIG_ISA case ASC_IS_ISA: shost->unchecked_isa_dma = TRUE; share_irq = 0; break; case ASC_IS_VL: shost->unchecked_isa_dma = FALSE; share_irq = 0; break; case ASC_IS_EISA: shost->unchecked_isa_dma = FALSE; share_irq = IRQF_SHARED; break; #endif /* CONFIG_ISA */ #ifdef CONFIG_PCI case ASC_IS_PCI: shost->unchecked_isa_dma = FALSE; share_irq = IRQF_SHARED; break; #endif /* CONFIG_PCI */ default: shost_printk(KERN_ERR, shost, "unknown adapter type: " "%d\n", asc_dvc_varp->bus_type); shost->unchecked_isa_dma = TRUE; share_irq = 0; break; } /* * NOTE: AscInitGetConfig() may change the board's * bus_type value. The bus_type value should no * longer be used. If the bus_type field must be * referenced only use the bit-wise AND operator "&". */ ASC_DBG(2, "AscInitGetConfig()\n"); ret = AscInitGetConfig(shost) ? -ENODEV : 0; } else { #ifdef CONFIG_PCI /* * For Wide boards set PCI information before calling * AdvInitGetConfig(). */ shost->unchecked_isa_dma = FALSE; share_irq = IRQF_SHARED; ASC_DBG(2, "AdvInitGetConfig()\n"); ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0; #endif /* CONFIG_PCI */ } if (ret) goto err_free_proc; /* * Save the EEPROM configuration so that it can be displayed * from /proc/scsi/advansys/[0...]. */ if (ASC_NARROW_BOARD(boardp)) { ASCEEP_CONFIG *ep; /* * Set the adapter's target id bit in the 'init_tidmask' field. */ boardp->init_tidmask |= ADV_TID_TO_TIDMASK(asc_dvc_varp->cfg->chip_scsi_id); /* * Save EEPROM settings for the board. */ ep = &boardp->eep_config.asc_eep; ep->init_sdtr = asc_dvc_varp->cfg->sdtr_enable; ep->disc_enable = asc_dvc_varp->cfg->disc_enable; ep->use_cmd_qng = asc_dvc_varp->cfg->cmd_qng_enabled; ASC_EEP_SET_DMA_SPD(ep, asc_dvc_varp->cfg->isa_dma_speed); ep->start_motor = asc_dvc_varp->start_motor; ep->cntl = asc_dvc_varp->dvc_cntl; ep->no_scam = asc_dvc_varp->no_scam; ep->max_total_qng = asc_dvc_varp->max_total_qng; ASC_EEP_SET_CHIP_ID(ep, asc_dvc_varp->cfg->chip_scsi_id); /* 'max_tag_qng' is set to the same value for every device. */ ep->max_tag_qng = asc_dvc_varp->cfg->max_tag_qng[0]; ep->adapter_info[0] = asc_dvc_varp->cfg->adapter_info[0]; ep->adapter_info[1] = asc_dvc_varp->cfg->adapter_info[1]; ep->adapter_info[2] = asc_dvc_varp->cfg->adapter_info[2]; ep->adapter_info[3] = asc_dvc_varp->cfg->adapter_info[3]; ep->adapter_info[4] = asc_dvc_varp->cfg->adapter_info[4]; ep->adapter_info[5] = asc_dvc_varp->cfg->adapter_info[5]; /* * Modify board configuration. */ ASC_DBG(2, "AscInitSetConfig()\n"); ret = AscInitSetConfig(pdev, shost) ? -ENODEV : 0; if (ret) goto err_free_proc; } else { ADVEEP_3550_CONFIG *ep_3550; ADVEEP_38C0800_CONFIG *ep_38C0800; ADVEEP_38C1600_CONFIG *ep_38C1600; /* * Save Wide EEP Configuration Information. */ if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { ep_3550 = &boardp->eep_config.adv_3550_eep; ep_3550->adapter_scsi_id = adv_dvc_varp->chip_scsi_id; ep_3550->max_host_qng = adv_dvc_varp->max_host_qng; ep_3550->max_dvc_qng = adv_dvc_varp->max_dvc_qng; ep_3550->termination = adv_dvc_varp->cfg->termination; ep_3550->disc_enable = adv_dvc_varp->cfg->disc_enable; ep_3550->bios_ctrl = adv_dvc_varp->bios_ctrl; ep_3550->wdtr_able = adv_dvc_varp->wdtr_able; ep_3550->sdtr_able = adv_dvc_varp->sdtr_able; ep_3550->ultra_able = adv_dvc_varp->ultra_able; ep_3550->tagqng_able = adv_dvc_varp->tagqng_able; ep_3550->start_motor = adv_dvc_varp->start_motor; ep_3550->scsi_reset_delay = adv_dvc_varp->scsi_reset_wait; ep_3550->serial_number_word1 = adv_dvc_varp->cfg->serial1; ep_3550->serial_number_word2 = adv_dvc_varp->cfg->serial2; ep_3550->serial_number_word3 = adv_dvc_varp->cfg->serial3; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { ep_38C0800 = &boardp->eep_config.adv_38C0800_eep; ep_38C0800->adapter_scsi_id = adv_dvc_varp->chip_scsi_id; ep_38C0800->max_host_qng = adv_dvc_varp->max_host_qng; ep_38C0800->max_dvc_qng = adv_dvc_varp->max_dvc_qng; ep_38C0800->termination_lvd = adv_dvc_varp->cfg->termination; ep_38C0800->disc_enable = adv_dvc_varp->cfg->disc_enable; ep_38C0800->bios_ctrl = adv_dvc_varp->bios_ctrl; ep_38C0800->wdtr_able = adv_dvc_varp->wdtr_able; ep_38C0800->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C0800->sdtr_speed1 = adv_dvc_varp->sdtr_speed1; ep_38C0800->sdtr_speed2 = adv_dvc_varp->sdtr_speed2; ep_38C0800->sdtr_speed3 = adv_dvc_varp->sdtr_speed3; ep_38C0800->sdtr_speed4 = adv_dvc_varp->sdtr_speed4; ep_38C0800->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C0800->start_motor = adv_dvc_varp->start_motor; ep_38C0800->scsi_reset_delay = adv_dvc_varp->scsi_reset_wait; ep_38C0800->serial_number_word1 = adv_dvc_varp->cfg->serial1; ep_38C0800->serial_number_word2 = adv_dvc_varp->cfg->serial2; ep_38C0800->serial_number_word3 = adv_dvc_varp->cfg->serial3; } else { ep_38C1600 = &boardp->eep_config.adv_38C1600_eep; ep_38C1600->adapter_scsi_id = adv_dvc_varp->chip_scsi_id; ep_38C1600->max_host_qng = adv_dvc_varp->max_host_qng; ep_38C1600->max_dvc_qng = adv_dvc_varp->max_dvc_qng; ep_38C1600->termination_lvd = adv_dvc_varp->cfg->termination; ep_38C1600->disc_enable = adv_dvc_varp->cfg->disc_enable; ep_38C1600->bios_ctrl = adv_dvc_varp->bios_ctrl; ep_38C1600->wdtr_able = adv_dvc_varp->wdtr_able; ep_38C1600->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C1600->sdtr_speed1 = adv_dvc_varp->sdtr_speed1; ep_38C1600->sdtr_speed2 = adv_dvc_varp->sdtr_speed2; ep_38C1600->sdtr_speed3 = adv_dvc_varp->sdtr_speed3; ep_38C1600->sdtr_speed4 = adv_dvc_varp->sdtr_speed4; ep_38C1600->tagqng_able = adv_dvc_varp->tagqng_able; ep_38C1600->start_motor = adv_dvc_varp->start_motor; ep_38C1600->scsi_reset_delay = adv_dvc_varp->scsi_reset_wait; ep_38C1600->serial_number_word1 = adv_dvc_varp->cfg->serial1; ep_38C1600->serial_number_word2 = adv_dvc_varp->cfg->serial2; ep_38C1600->serial_number_word3 = adv_dvc_varp->cfg->serial3; } /* * Set the adapter's target id bit in the 'init_tidmask' field. */ boardp->init_tidmask |= ADV_TID_TO_TIDMASK(adv_dvc_varp->chip_scsi_id); } /* * Channels are numbered beginning with 0. For AdvanSys one host * structure supports one channel. Multi-channel boards have a * separate host structure for each channel. */ shost->max_channel = 0; if (ASC_NARROW_BOARD(boardp)) { shost->max_id = ASC_MAX_TID + 1; shost->max_lun = ASC_MAX_LUN + 1; shost->max_cmd_len = ASC_MAX_CDB_LEN; shost->io_port = asc_dvc_varp->iop_base; boardp->asc_n_io_port = ASC_IOADR_GAP; shost->this_id = asc_dvc_varp->cfg->chip_scsi_id; /* Set maximum number of queues the adapter can handle. */ shost->can_queue = asc_dvc_varp->max_total_qng; } else { shost->max_id = ADV_MAX_TID + 1; shost->max_lun = ADV_MAX_LUN + 1; shost->max_cmd_len = ADV_MAX_CDB_LEN; /* * Save the I/O Port address and length even though * I/O ports are not used to access Wide boards. * Instead the Wide boards are accessed with * PCI Memory Mapped I/O. */ shost->io_port = iop; shost->this_id = adv_dvc_varp->chip_scsi_id; /* Set maximum number of queues the adapter can handle. */ shost->can_queue = adv_dvc_varp->max_host_qng; } /* * Following v1.3.89, 'cmd_per_lun' is no longer needed * and should be set to zero. * * But because of a bug introduced in v1.3.89 if the driver is * compiled as a module and 'cmd_per_lun' is zero, the Mid-Level * SCSI function 'allocate_device' will panic. To allow the driver * to work as a module in these kernels set 'cmd_per_lun' to 1. * * Note: This is wrong. cmd_per_lun should be set to the depth * you want on untagged devices always. #ifdef MODULE */ shost->cmd_per_lun = 1; /* #else shost->cmd_per_lun = 0; #endif */ /* * Set the maximum number of scatter-gather elements the * adapter can handle. */ if (ASC_NARROW_BOARD(boardp)) { /* * Allow two commands with 'sg_tablesize' scatter-gather * elements to be executed simultaneously. This value is * the theoretical hardware limit. It may be decreased * below. */ shost->sg_tablesize = (((asc_dvc_varp->max_total_qng - 2) / 2) * ASC_SG_LIST_PER_Q) + 1; } else { shost->sg_tablesize = ADV_MAX_SG_LIST; } /* * The value of 'sg_tablesize' can not exceed the SCSI * mid-level driver definition of SG_ALL. SG_ALL also * must not be exceeded, because it is used to define the * size of the scatter-gather table in 'struct asc_sg_head'. */ if (shost->sg_tablesize > SG_ALL) { shost->sg_tablesize = SG_ALL; } ASC_DBG(1, "sg_tablesize: %d\n", shost->sg_tablesize); /* BIOS start address. */ if (ASC_NARROW_BOARD(boardp)) { shost->base = AscGetChipBiosAddress(asc_dvc_varp->iop_base, asc_dvc_varp->bus_type); } else { /* * Fill-in BIOS board variables. The Wide BIOS saves * information in LRAM that is used by the driver. */ AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_SIGNATURE, boardp->bios_signature); AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_VERSION, boardp->bios_version); AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_CODESEG, boardp->bios_codeseg); AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_CODELEN, boardp->bios_codelen); ASC_DBG(1, "bios_signature 0x%x, bios_version 0x%x\n", boardp->bios_signature, boardp->bios_version); ASC_DBG(1, "bios_codeseg 0x%x, bios_codelen 0x%x\n", boardp->bios_codeseg, boardp->bios_codelen); /* * If the BIOS saved a valid signature, then fill in * the BIOS code segment base address. */ if (boardp->bios_signature == 0x55AA) { /* * Convert x86 realmode code segment to a linear * address by shifting left 4. */ shost->base = ((ulong)boardp->bios_codeseg << 4); } else { shost->base = 0; } } /* * Register Board Resources - I/O Port, DMA, IRQ */ /* Register DMA Channel for Narrow boards. */ shost->dma_channel = NO_ISA_DMA; /* Default to no ISA DMA. */ #ifdef CONFIG_ISA if (ASC_NARROW_BOARD(boardp)) { /* Register DMA channel for ISA bus. */ if (asc_dvc_varp->bus_type & ASC_IS_ISA) { shost->dma_channel = asc_dvc_varp->cfg->isa_dma_channel; ret = request_dma(shost->dma_channel, DRV_NAME); if (ret) { shost_printk(KERN_ERR, shost, "request_dma() " "%d failed %d\n", shost->dma_channel, ret); goto err_free_proc; } AscEnableIsaDma(shost->dma_channel); } } #endif /* CONFIG_ISA */ /* Register IRQ Number. */ ASC_DBG(2, "request_irq(%d, %p)\n", boardp->irq, shost); ret = request_irq(boardp->irq, advansys_interrupt, share_irq, DRV_NAME, shost); if (ret) { if (ret == -EBUSY) { shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " "already in use\n", boardp->irq); } else if (ret == -EINVAL) { shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " "not valid\n", boardp->irq); } else { shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x " "failed with %d\n", boardp->irq, ret); } goto err_free_dma; } /* * Initialize board RISC chip and enable interrupts. */ if (ASC_NARROW_BOARD(boardp)) { ASC_DBG(2, "AscInitAsc1000Driver()\n"); asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL); if (!asc_dvc_varp->overrun_buf) { ret = -ENOMEM; goto err_free_irq; } warn_code = AscInitAsc1000Driver(asc_dvc_varp); if (warn_code || asc_dvc_varp->err_code) { shost_printk(KERN_ERR, shost, "error: init_state 0x%x, " "warn 0x%x, error 0x%x\n", asc_dvc_varp->init_state, warn_code, asc_dvc_varp->err_code); if (!asc_dvc_varp->overrun_dma) { ret = -ENODEV; goto err_free_mem; } } } else { if (advansys_wide_init_chip(shost)) { ret = -ENODEV; goto err_free_mem; } } ASC_DBG_PRT_SCSI_HOST(2, shost); ret = scsi_add_host(shost, boardp->dev); if (ret) goto err_free_mem; scsi_scan_host(shost); return 0; err_free_mem: if (ASC_NARROW_BOARD(boardp)) { if (asc_dvc_varp->overrun_dma) dma_unmap_single(boardp->dev, asc_dvc_varp->overrun_dma, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); kfree(asc_dvc_varp->overrun_buf); } else advansys_wide_free_mem(boardp); err_free_irq: free_irq(boardp->irq, shost); err_free_dma: #ifdef CONFIG_ISA if (shost->dma_channel != NO_ISA_DMA) free_dma(shost->dma_channel); #endif err_free_proc: kfree(boardp->prtbuf); err_unmap: if (boardp->ioremap_addr) iounmap(boardp->ioremap_addr); err_shost: return ret; } /* * advansys_release() * * Release resources allocated for a single AdvanSys adapter. */ static int advansys_release(struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); ASC_DBG(1, "begin\n"); scsi_remove_host(shost); free_irq(board->irq, shost); #ifdef CONFIG_ISA if (shost->dma_channel != NO_ISA_DMA) { ASC_DBG(1, "free_dma()\n"); free_dma(shost->dma_channel); } #endif if (ASC_NARROW_BOARD(board)) { dma_unmap_single(board->dev, board->dvc_var.asc_dvc_var.overrun_dma, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); kfree(board->dvc_var.asc_dvc_var.overrun_buf); } else { iounmap(board->ioremap_addr); advansys_wide_free_mem(board); } kfree(board->prtbuf); scsi_host_put(shost); ASC_DBG(1, "end\n"); return 0; } #define ASC_IOADR_TABLE_MAX_IX 11 static PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX] = { 0x100, 0x0110, 0x120, 0x0130, 0x140, 0x0150, 0x0190, 0x0210, 0x0230, 0x0250, 0x0330 }; /* * The ISA IRQ number is found in bits 2 and 3 of the CfgLsw. It decodes as: * 00: 10 * 01: 11 * 10: 12 * 11: 15 */ static unsigned int __devinit advansys_isa_irq_no(PortAddr iop_base) { unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base); unsigned int chip_irq = ((cfg_lsw >> 2) & 0x03) + 10; if (chip_irq == 13) chip_irq = 15; return chip_irq; } static int __devinit advansys_isa_probe(struct device *dev, unsigned int id) { int err = -ENODEV; PortAddr iop_base = _asc_def_iop_base[id]; struct Scsi_Host *shost; struct asc_board *board; if (!request_region(iop_base, ASC_IOADR_GAP, DRV_NAME)) { ASC_DBG(1, "I/O port 0x%x busy\n", iop_base); return -ENODEV; } ASC_DBG(1, "probing I/O port 0x%x\n", iop_base); if (!AscFindSignature(iop_base)) goto release_region; if (!(AscGetChipVersion(iop_base, ASC_IS_ISA) & ASC_CHIP_VER_ISA_BIT)) goto release_region; err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = advansys_isa_irq_no(iop_base); board->dev = dev; err = advansys_board_found(shost, iop_base, ASC_IS_ISA); if (err) goto free_host; dev_set_drvdata(dev, shost); return 0; free_host: scsi_host_put(shost); release_region: release_region(iop_base, ASC_IOADR_GAP); return err; } static int __devexit advansys_isa_remove(struct device *dev, unsigned int id) { int ioport = _asc_def_iop_base[id]; advansys_release(dev_get_drvdata(dev)); release_region(ioport, ASC_IOADR_GAP); return 0; } static struct isa_driver advansys_isa_driver = { .probe = advansys_isa_probe, .remove = __devexit_p(advansys_isa_remove), .driver = { .owner = THIS_MODULE, .name = DRV_NAME, }, }; /* * The VLB IRQ number is found in bits 2 to 4 of the CfgLsw. It decodes as: * 000: invalid * 001: 10 * 010: 11 * 011: 12 * 100: invalid * 101: 14 * 110: 15 * 111: invalid */ static unsigned int __devinit advansys_vlb_irq_no(PortAddr iop_base) { unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base); unsigned int chip_irq = ((cfg_lsw >> 2) & 0x07) + 9; if ((chip_irq < 10) || (chip_irq == 13) || (chip_irq > 15)) return 0; return chip_irq; } static int __devinit advansys_vlb_probe(struct device *dev, unsigned int id) { int err = -ENODEV; PortAddr iop_base = _asc_def_iop_base[id]; struct Scsi_Host *shost; struct asc_board *board; if (!request_region(iop_base, ASC_IOADR_GAP, DRV_NAME)) { ASC_DBG(1, "I/O port 0x%x busy\n", iop_base); return -ENODEV; } ASC_DBG(1, "probing I/O port 0x%x\n", iop_base); if (!AscFindSignature(iop_base)) goto release_region; /* * I don't think this condition can actually happen, but the old * driver did it, and the chances of finding a VLB setup in 2007 * to do testing with is slight to none. */ if (AscGetChipVersion(iop_base, ASC_IS_VL) > ASC_CHIP_MAX_VER_VL) goto release_region; err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = advansys_vlb_irq_no(iop_base); board->dev = dev; err = advansys_board_found(shost, iop_base, ASC_IS_VL); if (err) goto free_host; dev_set_drvdata(dev, shost); return 0; free_host: scsi_host_put(shost); release_region: release_region(iop_base, ASC_IOADR_GAP); return -ENODEV; } static struct isa_driver advansys_vlb_driver = { .probe = advansys_vlb_probe, .remove = __devexit_p(advansys_isa_remove), .driver = { .owner = THIS_MODULE, .name = "advansys_vlb", }, }; static struct eisa_device_id advansys_eisa_table[] __devinitdata = { { "ABP7401" }, { "ABP7501" }, { "" } }; MODULE_DEVICE_TABLE(eisa, advansys_eisa_table); /* * EISA is a little more tricky than PCI; each EISA device may have two * channels, and this driver is written to make each channel its own Scsi_Host */ struct eisa_scsi_data { struct Scsi_Host *host[2]; }; /* * The EISA IRQ number is found in bits 8 to 10 of the CfgLsw. It decodes as: * 000: 10 * 001: 11 * 010: 12 * 011: invalid * 100: 14 * 101: 15 * 110: invalid * 111: invalid */ static unsigned int __devinit advansys_eisa_irq_no(struct eisa_device *edev) { unsigned short cfg_lsw = inw(edev->base_addr + 0xc86); unsigned int chip_irq = ((cfg_lsw >> 8) & 0x07) + 10; if ((chip_irq == 13) || (chip_irq > 15)) return 0; return chip_irq; } static int __devinit advansys_eisa_probe(struct device *dev) { int i, ioport, irq = 0; int err; struct eisa_device *edev = to_eisa_device(dev); struct eisa_scsi_data *data; err = -ENOMEM; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) goto fail; ioport = edev->base_addr + 0xc30; err = -ENODEV; for (i = 0; i < 2; i++, ioport += 0x20) { struct asc_board *board; struct Scsi_Host *shost; if (!request_region(ioport, ASC_IOADR_GAP, DRV_NAME)) { printk(KERN_WARNING "Region %x-%x busy\n", ioport, ioport + ASC_IOADR_GAP - 1); continue; } if (!AscFindSignature(ioport)) { release_region(ioport, ASC_IOADR_GAP); continue; } /* * I don't know why we need to do this for EISA chips, but * not for any others. It looks to be equivalent to * AscGetChipCfgMsw, but I may have overlooked something, * so I'm not converting it until I get an EISA board to * test with. */ inw(ioport + 4); if (!irq) irq = advansys_eisa_irq_no(edev); err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = irq; board->dev = dev; err = advansys_board_found(shost, ioport, ASC_IS_EISA); if (!err) { data->host[i] = shost; continue; } scsi_host_put(shost); release_region: release_region(ioport, ASC_IOADR_GAP); break; } if (err) goto free_data; dev_set_drvdata(dev, data); return 0; free_data: kfree(data->host[0]); kfree(data->host[1]); kfree(data); fail: return err; } static __devexit int advansys_eisa_remove(struct device *dev) { int i; struct eisa_scsi_data *data = dev_get_drvdata(dev); for (i = 0; i < 2; i++) { int ioport; struct Scsi_Host *shost = data->host[i]; if (!shost) continue; ioport = shost->io_port; advansys_release(shost); release_region(ioport, ASC_IOADR_GAP); } kfree(data); return 0; } static struct eisa_driver advansys_eisa_driver = { .id_table = advansys_eisa_table, .driver = { .name = DRV_NAME, .probe = advansys_eisa_probe, .remove = __devexit_p(advansys_eisa_remove), } }; /* PCI Devices supported by this driver */ static struct pci_device_id advansys_pci_tbl[] __devinitdata = { {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940UW, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C0800_REV1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C1600_REV1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {} }; MODULE_DEVICE_TABLE(pci, advansys_pci_tbl); static void __devinit advansys_set_latency(struct pci_dev *pdev) { if ((pdev->device == PCI_DEVICE_ID_ASP_1200A) || (pdev->device == PCI_DEVICE_ID_ASP_ABP940)) { pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0); } else { u8 latency; pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency); if (latency < 0x20) pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20); } } static int __devinit advansys_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err, ioport; struct Scsi_Host *shost; struct asc_board *board; err = pci_enable_device(pdev); if (err) goto fail; err = pci_request_regions(pdev, DRV_NAME); if (err) goto disable_device; pci_set_master(pdev); advansys_set_latency(pdev); err = -ENODEV; if (pci_resource_len(pdev, 0) == 0) goto release_region; ioport = pci_resource_start(pdev, 0); err = -ENOMEM; shost = scsi_host_alloc(&advansys_template, sizeof(*board)); if (!shost) goto release_region; board = shost_priv(shost); board->irq = pdev->irq; board->dev = &pdev->dev; if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW || pdev->device == PCI_DEVICE_ID_38C0800_REV1 || pdev->device == PCI_DEVICE_ID_38C1600_REV1) { board->flags |= ASC_IS_WIDE_BOARD; } err = advansys_board_found(shost, ioport, ASC_IS_PCI); if (err) goto free_host; pci_set_drvdata(pdev, shost); return 0; free_host: scsi_host_put(shost); release_region: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); fail: return err; } static void __devexit advansys_pci_remove(struct pci_dev *pdev) { advansys_release(pci_get_drvdata(pdev)); pci_release_regions(pdev); pci_disable_device(pdev); } static struct pci_driver advansys_pci_driver = { .name = DRV_NAME, .id_table = advansys_pci_tbl, .probe = advansys_pci_probe, .remove = __devexit_p(advansys_pci_remove), }; static int __init advansys_init(void) { int error; error = isa_register_driver(&advansys_isa_driver, ASC_IOADR_TABLE_MAX_IX); if (error) goto fail; error = isa_register_driver(&advansys_vlb_driver, ASC_IOADR_TABLE_MAX_IX); if (error) goto unregister_isa; error = eisa_driver_register(&advansys_eisa_driver); if (error) goto unregister_vlb; error = pci_register_driver(&advansys_pci_driver); if (error) goto unregister_eisa; return 0; unregister_eisa: eisa_driver_unregister(&advansys_eisa_driver); unregister_vlb: isa_unregister_driver(&advansys_vlb_driver); unregister_isa: isa_unregister_driver(&advansys_isa_driver); fail: return error; } static void __exit advansys_exit(void) { pci_unregister_driver(&advansys_pci_driver); eisa_driver_unregister(&advansys_eisa_driver); isa_unregister_driver(&advansys_vlb_driver); isa_unregister_driver(&advansys_isa_driver); } module_init(advansys_init); module_exit(advansys_exit); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("advansys/mcode.bin"); MODULE_FIRMWARE("advansys/3550.bin"); MODULE_FIRMWARE("advansys/38C0800.bin"); MODULE_FIRMWARE("advansys/38C1600.bin");
gpl-2.0
thicklizard/Komodo_new_source
sound/pci/oxygen/oxygen_mixer.c
3181
31723
/* * C-Media CMI8788 driver - mixer code * * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this driver; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/mutex.h> #include <sound/ac97_codec.h> #include <sound/asoundef.h> #include <sound/control.h> #include <sound/tlv.h> #include "oxygen.h" #include "cm9780.h" static int dac_volume_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { struct oxygen *chip = ctl->private_data; info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = chip->model.dac_channels_mixer; info->value.integer.min = chip->model.dac_volume_min; info->value.integer.max = chip->model.dac_volume_max; return 0; } static int dac_volume_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; unsigned int i; mutex_lock(&chip->mutex); for (i = 0; i < chip->model.dac_channels_mixer; ++i) value->value.integer.value[i] = chip->dac_volume[i]; mutex_unlock(&chip->mutex); return 0; } static int dac_volume_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; unsigned int i; int changed; changed = 0; mutex_lock(&chip->mutex); for (i = 0; i < chip->model.dac_channels_mixer; ++i) if (value->value.integer.value[i] != chip->dac_volume[i]) { chip->dac_volume[i] = value->value.integer.value[i]; changed = 1; } if (changed) chip->model.update_dac_volume(chip); mutex_unlock(&chip->mutex); return changed; } static int dac_mute_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; mutex_lock(&chip->mutex); value->value.integer.value[0] = !chip->dac_mute; mutex_unlock(&chip->mutex); return 0; } static int dac_mute_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; int changed; mutex_lock(&chip->mutex); changed = !value->value.integer.value[0] != chip->dac_mute; if (changed) { chip->dac_mute = !value->value.integer.value[0]; chip->model.update_dac_mute(chip); } mutex_unlock(&chip->mutex); return changed; } static unsigned int upmix_item_count(struct oxygen *chip) { if (chip->model.dac_channels_pcm < 8) return 2; else if (chip->model.update_center_lfe_mix) return 5; else return 3; } static int upmix_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[5] = { "Front", "Front+Surround", "Front+Surround+Back", "Front+Surround+Center/LFE", "Front+Surround+Center/LFE+Back", }; struct oxygen *chip = ctl->private_data; unsigned int count = upmix_item_count(chip); return snd_ctl_enum_info(info, 1, count, names); } static int upmix_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; mutex_lock(&chip->mutex); value->value.enumerated.item[0] = chip->dac_routing; mutex_unlock(&chip->mutex); return 0; } void oxygen_update_dac_routing(struct oxygen *chip) { /* DAC 0: front, DAC 1: surround, DAC 2: center/LFE, DAC 3: back */ static const unsigned int reg_values[5] = { /* stereo -> front */ (0 << OXYGEN_PLAY_DAC0_SOURCE_SHIFT) | (1 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) | (2 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) | (3 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT), /* stereo -> front+surround */ (0 << OXYGEN_PLAY_DAC0_SOURCE_SHIFT) | (0 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) | (2 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) | (3 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT), /* stereo -> front+surround+back */ (0 << OXYGEN_PLAY_DAC0_SOURCE_SHIFT) | (0 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) | (2 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) | (0 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT), /* stereo -> front+surround+center/LFE */ (0 << OXYGEN_PLAY_DAC0_SOURCE_SHIFT) | (0 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) | (0 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) | (3 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT), /* stereo -> front+surround+center/LFE+back */ (0 << OXYGEN_PLAY_DAC0_SOURCE_SHIFT) | (0 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) | (0 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) | (0 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT), }; u8 channels; unsigned int reg_value; channels = oxygen_read8(chip, OXYGEN_PLAY_CHANNELS) & OXYGEN_PLAY_CHANNELS_MASK; if (channels == OXYGEN_PLAY_CHANNELS_2) reg_value = reg_values[chip->dac_routing]; else if (channels == OXYGEN_PLAY_CHANNELS_8) /* in 7.1 mode, "rear" channels go to the "back" jack */ reg_value = (0 << OXYGEN_PLAY_DAC0_SOURCE_SHIFT) | (3 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) | (2 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) | (1 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT); else reg_value = (0 << OXYGEN_PLAY_DAC0_SOURCE_SHIFT) | (1 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) | (2 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) | (3 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT); if (chip->model.adjust_dac_routing) reg_value = chip->model.adjust_dac_routing(chip, reg_value); oxygen_write16_masked(chip, OXYGEN_PLAY_ROUTING, reg_value, OXYGEN_PLAY_DAC0_SOURCE_MASK | OXYGEN_PLAY_DAC1_SOURCE_MASK | OXYGEN_PLAY_DAC2_SOURCE_MASK | OXYGEN_PLAY_DAC3_SOURCE_MASK); if (chip->model.update_center_lfe_mix) chip->model.update_center_lfe_mix(chip, chip->dac_routing > 2); } static int upmix_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; unsigned int count = upmix_item_count(chip); int changed; if (value->value.enumerated.item[0] >= count) return -EINVAL; mutex_lock(&chip->mutex); changed = value->value.enumerated.item[0] != chip->dac_routing; if (changed) { chip->dac_routing = value->value.enumerated.item[0]; oxygen_update_dac_routing(chip); } mutex_unlock(&chip->mutex); return changed; } static int spdif_switch_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; mutex_lock(&chip->mutex); value->value.integer.value[0] = chip->spdif_playback_enable; mutex_unlock(&chip->mutex); return 0; } static unsigned int oxygen_spdif_rate(unsigned int oxygen_rate) { switch (oxygen_rate) { case OXYGEN_RATE_32000: return IEC958_AES3_CON_FS_32000 << OXYGEN_SPDIF_CS_RATE_SHIFT; case OXYGEN_RATE_44100: return IEC958_AES3_CON_FS_44100 << OXYGEN_SPDIF_CS_RATE_SHIFT; default: /* OXYGEN_RATE_48000 */ return IEC958_AES3_CON_FS_48000 << OXYGEN_SPDIF_CS_RATE_SHIFT; case OXYGEN_RATE_64000: return 0xb << OXYGEN_SPDIF_CS_RATE_SHIFT; case OXYGEN_RATE_88200: return IEC958_AES3_CON_FS_88200 << OXYGEN_SPDIF_CS_RATE_SHIFT; case OXYGEN_RATE_96000: return IEC958_AES3_CON_FS_96000 << OXYGEN_SPDIF_CS_RATE_SHIFT; case OXYGEN_RATE_176400: return IEC958_AES3_CON_FS_176400 << OXYGEN_SPDIF_CS_RATE_SHIFT; case OXYGEN_RATE_192000: return IEC958_AES3_CON_FS_192000 << OXYGEN_SPDIF_CS_RATE_SHIFT; } } void oxygen_update_spdif_source(struct oxygen *chip) { u32 old_control, new_control; u16 old_routing, new_routing; unsigned int oxygen_rate; old_control = oxygen_read32(chip, OXYGEN_SPDIF_CONTROL); old_routing = oxygen_read16(chip, OXYGEN_PLAY_ROUTING); if (chip->pcm_active & (1 << PCM_SPDIF)) { new_control = old_control | OXYGEN_SPDIF_OUT_ENABLE; new_routing = (old_routing & ~OXYGEN_PLAY_SPDIF_MASK) | OXYGEN_PLAY_SPDIF_SPDIF; oxygen_rate = (old_control >> OXYGEN_SPDIF_OUT_RATE_SHIFT) & OXYGEN_I2S_RATE_MASK; /* S/PDIF rate was already set by the caller */ } else if ((chip->pcm_active & (1 << PCM_MULTICH)) && chip->spdif_playback_enable) { new_routing = (old_routing & ~OXYGEN_PLAY_SPDIF_MASK) | OXYGEN_PLAY_SPDIF_MULTICH_01; oxygen_rate = oxygen_read16(chip, OXYGEN_I2S_MULTICH_FORMAT) & OXYGEN_I2S_RATE_MASK; new_control = (old_control & ~OXYGEN_SPDIF_OUT_RATE_MASK) | (oxygen_rate << OXYGEN_SPDIF_OUT_RATE_SHIFT) | OXYGEN_SPDIF_OUT_ENABLE; } else { new_control = old_control & ~OXYGEN_SPDIF_OUT_ENABLE; new_routing = old_routing; oxygen_rate = OXYGEN_RATE_44100; } if (old_routing != new_routing) { oxygen_write32(chip, OXYGEN_SPDIF_CONTROL, new_control & ~OXYGEN_SPDIF_OUT_ENABLE); oxygen_write16(chip, OXYGEN_PLAY_ROUTING, new_routing); } if (new_control & OXYGEN_SPDIF_OUT_ENABLE) oxygen_write32(chip, OXYGEN_SPDIF_OUTPUT_BITS, oxygen_spdif_rate(oxygen_rate) | ((chip->pcm_active & (1 << PCM_SPDIF)) ? chip->spdif_pcm_bits : chip->spdif_bits)); oxygen_write32(chip, OXYGEN_SPDIF_CONTROL, new_control); } static int spdif_switch_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; int changed; mutex_lock(&chip->mutex); changed = value->value.integer.value[0] != chip->spdif_playback_enable; if (changed) { chip->spdif_playback_enable = !!value->value.integer.value[0]; spin_lock_irq(&chip->reg_lock); oxygen_update_spdif_source(chip); spin_unlock_irq(&chip->reg_lock); } mutex_unlock(&chip->mutex); return changed; } static int spdif_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { info->type = SNDRV_CTL_ELEM_TYPE_IEC958; info->count = 1; return 0; } static void oxygen_to_iec958(u32 bits, struct snd_ctl_elem_value *value) { value->value.iec958.status[0] = bits & (OXYGEN_SPDIF_NONAUDIO | OXYGEN_SPDIF_C | OXYGEN_SPDIF_PREEMPHASIS); value->value.iec958.status[1] = /* category and original */ bits >> OXYGEN_SPDIF_CATEGORY_SHIFT; } static u32 iec958_to_oxygen(struct snd_ctl_elem_value *value) { u32 bits; bits = value->value.iec958.status[0] & (OXYGEN_SPDIF_NONAUDIO | OXYGEN_SPDIF_C | OXYGEN_SPDIF_PREEMPHASIS); bits |= value->value.iec958.status[1] << OXYGEN_SPDIF_CATEGORY_SHIFT; if (bits & OXYGEN_SPDIF_NONAUDIO) bits |= OXYGEN_SPDIF_V; return bits; } static inline void write_spdif_bits(struct oxygen *chip, u32 bits) { oxygen_write32_masked(chip, OXYGEN_SPDIF_OUTPUT_BITS, bits, OXYGEN_SPDIF_NONAUDIO | OXYGEN_SPDIF_C | OXYGEN_SPDIF_PREEMPHASIS | OXYGEN_SPDIF_CATEGORY_MASK | OXYGEN_SPDIF_ORIGINAL | OXYGEN_SPDIF_V); } static int spdif_default_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; mutex_lock(&chip->mutex); oxygen_to_iec958(chip->spdif_bits, value); mutex_unlock(&chip->mutex); return 0; } static int spdif_default_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; u32 new_bits; int changed; new_bits = iec958_to_oxygen(value); mutex_lock(&chip->mutex); changed = new_bits != chip->spdif_bits; if (changed) { chip->spdif_bits = new_bits; if (!(chip->pcm_active & (1 << PCM_SPDIF))) write_spdif_bits(chip, new_bits); } mutex_unlock(&chip->mutex); return changed; } static int spdif_mask_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { value->value.iec958.status[0] = IEC958_AES0_NONAUDIO | IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS; value->value.iec958.status[1] = IEC958_AES1_CON_CATEGORY | IEC958_AES1_CON_ORIGINAL; return 0; } static int spdif_pcm_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; mutex_lock(&chip->mutex); oxygen_to_iec958(chip->spdif_pcm_bits, value); mutex_unlock(&chip->mutex); return 0; } static int spdif_pcm_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; u32 new_bits; int changed; new_bits = iec958_to_oxygen(value); mutex_lock(&chip->mutex); changed = new_bits != chip->spdif_pcm_bits; if (changed) { chip->spdif_pcm_bits = new_bits; if (chip->pcm_active & (1 << PCM_SPDIF)) write_spdif_bits(chip, new_bits); } mutex_unlock(&chip->mutex); return changed; } static int spdif_input_mask_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { value->value.iec958.status[0] = 0xff; value->value.iec958.status[1] = 0xff; value->value.iec958.status[2] = 0xff; value->value.iec958.status[3] = 0xff; return 0; } static int spdif_input_default_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; u32 bits; bits = oxygen_read32(chip, OXYGEN_SPDIF_INPUT_BITS); value->value.iec958.status[0] = bits; value->value.iec958.status[1] = bits >> 8; value->value.iec958.status[2] = bits >> 16; value->value.iec958.status[3] = bits >> 24; return 0; } static int spdif_bit_switch_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; u32 bit = ctl->private_value; value->value.integer.value[0] = !!(oxygen_read32(chip, OXYGEN_SPDIF_CONTROL) & bit); return 0; } static int spdif_bit_switch_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; u32 bit = ctl->private_value; u32 oldreg, newreg; int changed; spin_lock_irq(&chip->reg_lock); oldreg = oxygen_read32(chip, OXYGEN_SPDIF_CONTROL); if (value->value.integer.value[0]) newreg = oldreg | bit; else newreg = oldreg & ~bit; changed = newreg != oldreg; if (changed) oxygen_write32(chip, OXYGEN_SPDIF_CONTROL, newreg); spin_unlock_irq(&chip->reg_lock); return changed; } static int monitor_volume_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 1; info->value.integer.min = 0; info->value.integer.max = 1; return 0; } static int monitor_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; u8 bit = ctl->private_value; int invert = ctl->private_value & (1 << 8); value->value.integer.value[0] = !!invert ^ !!(oxygen_read8(chip, OXYGEN_ADC_MONITOR) & bit); return 0; } static int monitor_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; u8 bit = ctl->private_value; int invert = ctl->private_value & (1 << 8); u8 oldreg, newreg; int changed; spin_lock_irq(&chip->reg_lock); oldreg = oxygen_read8(chip, OXYGEN_ADC_MONITOR); if ((!!value->value.integer.value[0] ^ !!invert) != 0) newreg = oldreg | bit; else newreg = oldreg & ~bit; changed = newreg != oldreg; if (changed) oxygen_write8(chip, OXYGEN_ADC_MONITOR, newreg); spin_unlock_irq(&chip->reg_lock); return changed; } static int ac97_switch_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; unsigned int codec = (ctl->private_value >> 24) & 1; unsigned int index = ctl->private_value & 0xff; unsigned int bitnr = (ctl->private_value >> 8) & 0xff; int invert = ctl->private_value & (1 << 16); u16 reg; mutex_lock(&chip->mutex); reg = oxygen_read_ac97(chip, codec, index); mutex_unlock(&chip->mutex); if (!(reg & (1 << bitnr)) ^ !invert) value->value.integer.value[0] = 1; else value->value.integer.value[0] = 0; return 0; } static void mute_ac97_ctl(struct oxygen *chip, unsigned int control) { unsigned int priv_idx; u16 value; if (!chip->controls[control]) return; priv_idx = chip->controls[control]->private_value & 0xff; value = oxygen_read_ac97(chip, 0, priv_idx); if (!(value & 0x8000)) { oxygen_write_ac97(chip, 0, priv_idx, value | 0x8000); if (chip->model.ac97_switch) chip->model.ac97_switch(chip, priv_idx, 0x8000); snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->controls[control]->id); } } static int ac97_switch_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; unsigned int codec = (ctl->private_value >> 24) & 1; unsigned int index = ctl->private_value & 0xff; unsigned int bitnr = (ctl->private_value >> 8) & 0xff; int invert = ctl->private_value & (1 << 16); u16 oldreg, newreg; int change; mutex_lock(&chip->mutex); oldreg = oxygen_read_ac97(chip, codec, index); newreg = oldreg; if (!value->value.integer.value[0] ^ !invert) newreg |= 1 << bitnr; else newreg &= ~(1 << bitnr); change = newreg != oldreg; if (change) { oxygen_write_ac97(chip, codec, index, newreg); if (codec == 0 && chip->model.ac97_switch) chip->model.ac97_switch(chip, index, newreg & 0x8000); if (index == AC97_LINE) { oxygen_write_ac97_masked(chip, 0, CM9780_GPIO_STATUS, newreg & 0x8000 ? CM9780_GPO0 : 0, CM9780_GPO0); if (!(newreg & 0x8000)) { mute_ac97_ctl(chip, CONTROL_MIC_CAPTURE_SWITCH); mute_ac97_ctl(chip, CONTROL_CD_CAPTURE_SWITCH); mute_ac97_ctl(chip, CONTROL_AUX_CAPTURE_SWITCH); } } else if ((index == AC97_MIC || index == AC97_CD || index == AC97_VIDEO || index == AC97_AUX) && bitnr == 15 && !(newreg & 0x8000)) { mute_ac97_ctl(chip, CONTROL_LINE_CAPTURE_SWITCH); oxygen_write_ac97_masked(chip, 0, CM9780_GPIO_STATUS, CM9780_GPO0, CM9780_GPO0); } } mutex_unlock(&chip->mutex); return change; } static int ac97_volume_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { int stereo = (ctl->private_value >> 16) & 1; info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = stereo ? 2 : 1; info->value.integer.min = 0; info->value.integer.max = 0x1f; return 0; } static int ac97_volume_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; unsigned int codec = (ctl->private_value >> 24) & 1; int stereo = (ctl->private_value >> 16) & 1; unsigned int index = ctl->private_value & 0xff; u16 reg; mutex_lock(&chip->mutex); reg = oxygen_read_ac97(chip, codec, index); mutex_unlock(&chip->mutex); value->value.integer.value[0] = 31 - (reg & 0x1f); if (stereo) value->value.integer.value[1] = 31 - ((reg >> 8) & 0x1f); return 0; } static int ac97_volume_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; unsigned int codec = (ctl->private_value >> 24) & 1; int stereo = (ctl->private_value >> 16) & 1; unsigned int index = ctl->private_value & 0xff; u16 oldreg, newreg; int change; mutex_lock(&chip->mutex); oldreg = oxygen_read_ac97(chip, codec, index); newreg = oldreg; newreg = (newreg & ~0x1f) | (31 - (value->value.integer.value[0] & 0x1f)); if (stereo) newreg = (newreg & ~0x1f00) | ((31 - (value->value.integer.value[1] & 0x1f)) << 8); else newreg = (newreg & ~0x1f00) | ((newreg & 0x1f) << 8); change = newreg != oldreg; if (change) oxygen_write_ac97(chip, codec, index, newreg); mutex_unlock(&chip->mutex); return change; } static int mic_fmic_source_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { static const char *const names[] = { "Mic Jack", "Front Panel" }; return snd_ctl_enum_info(info, 1, 2, names); } static int mic_fmic_source_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; mutex_lock(&chip->mutex); value->value.enumerated.item[0] = !!(oxygen_read_ac97(chip, 0, CM9780_JACK) & CM9780_FMIC2MIC); mutex_unlock(&chip->mutex); return 0; } static int mic_fmic_source_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; u16 oldreg, newreg; int change; mutex_lock(&chip->mutex); oldreg = oxygen_read_ac97(chip, 0, CM9780_JACK); if (value->value.enumerated.item[0]) newreg = oldreg | CM9780_FMIC2MIC; else newreg = oldreg & ~CM9780_FMIC2MIC; change = newreg != oldreg; if (change) oxygen_write_ac97(chip, 0, CM9780_JACK, newreg); mutex_unlock(&chip->mutex); return change; } static int ac97_fp_rec_volume_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 2; info->value.integer.min = 0; info->value.integer.max = 7; return 0; } static int ac97_fp_rec_volume_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; u16 reg; mutex_lock(&chip->mutex); reg = oxygen_read_ac97(chip, 1, AC97_REC_GAIN); mutex_unlock(&chip->mutex); value->value.integer.value[0] = reg & 7; value->value.integer.value[1] = (reg >> 8) & 7; return 0; } static int ac97_fp_rec_volume_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; u16 oldreg, newreg; int change; mutex_lock(&chip->mutex); oldreg = oxygen_read_ac97(chip, 1, AC97_REC_GAIN); newreg = oldreg & ~0x0707; newreg = newreg | (value->value.integer.value[0] & 7); newreg = newreg | ((value->value.integer.value[0] & 7) << 8); change = newreg != oldreg; if (change) oxygen_write_ac97(chip, 1, AC97_REC_GAIN, newreg); mutex_unlock(&chip->mutex); return change; } #define AC97_SWITCH(xname, codec, index, bitnr, invert) { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .info = snd_ctl_boolean_mono_info, \ .get = ac97_switch_get, \ .put = ac97_switch_put, \ .private_value = ((codec) << 24) | ((invert) << 16) | \ ((bitnr) << 8) | (index), \ } #define AC97_VOLUME(xname, codec, index, stereo) { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = xname, \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \ SNDRV_CTL_ELEM_ACCESS_TLV_READ, \ .info = ac97_volume_info, \ .get = ac97_volume_get, \ .put = ac97_volume_put, \ .tlv = { .p = ac97_db_scale, }, \ .private_value = ((codec) << 24) | ((stereo) << 16) | (index), \ } static DECLARE_TLV_DB_SCALE(monitor_db_scale, -600, 600, 0); static DECLARE_TLV_DB_SCALE(ac97_db_scale, -3450, 150, 0); static DECLARE_TLV_DB_SCALE(ac97_rec_db_scale, 0, 150, 0); static const struct snd_kcontrol_new controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Master Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .info = dac_volume_info, .get = dac_volume_get, .put = dac_volume_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Master Playback Switch", .info = snd_ctl_boolean_mono_info, .get = dac_mute_get, .put = dac_mute_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Stereo Upmixing", .info = upmix_info, .get = upmix_get, .put = upmix_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, SWITCH), .info = snd_ctl_boolean_mono_info, .get = spdif_switch_get, .put = spdif_switch_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .device = 1, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT), .info = spdif_info, .get = spdif_default_get, .put = spdif_default_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .device = 1, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, CON_MASK), .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = spdif_info, .get = spdif_mask_get, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .device = 1, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, PCM_STREAM), .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .info = spdif_info, .get = spdif_pcm_get, .put = spdif_pcm_put, }, }; static const struct snd_kcontrol_new spdif_input_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .device = 1, .name = SNDRV_CTL_NAME_IEC958("", CAPTURE, MASK), .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = spdif_info, .get = spdif_input_mask_get, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .device = 1, .name = SNDRV_CTL_NAME_IEC958("", CAPTURE, DEFAULT), .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = spdif_info, .get = spdif_input_default_get, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("Loopback ", NONE, SWITCH), .info = snd_ctl_boolean_mono_info, .get = spdif_bit_switch_get, .put = spdif_bit_switch_put, .private_value = OXYGEN_SPDIF_LOOPBACK, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("Validity Check ",CAPTURE,SWITCH), .info = snd_ctl_boolean_mono_info, .get = spdif_bit_switch_get, .put = spdif_bit_switch_put, .private_value = OXYGEN_SPDIF_SPDVALID, }, }; static const struct { unsigned int pcm_dev; struct snd_kcontrol_new controls[2]; } monitor_controls[] = { { .pcm_dev = CAPTURE_0_FROM_I2S_1, .controls = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Input Monitor Playback Switch", .info = snd_ctl_boolean_mono_info, .get = monitor_get, .put = monitor_put, .private_value = OXYGEN_ADC_MONITOR_A, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Input Monitor Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .info = monitor_volume_info, .get = monitor_get, .put = monitor_put, .private_value = OXYGEN_ADC_MONITOR_A_HALF_VOL | (1 << 8), .tlv = { .p = monitor_db_scale, }, }, }, }, { .pcm_dev = CAPTURE_0_FROM_I2S_2, .controls = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Input Monitor Playback Switch", .info = snd_ctl_boolean_mono_info, .get = monitor_get, .put = monitor_put, .private_value = OXYGEN_ADC_MONITOR_B, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Input Monitor Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .info = monitor_volume_info, .get = monitor_get, .put = monitor_put, .private_value = OXYGEN_ADC_MONITOR_B_HALF_VOL | (1 << 8), .tlv = { .p = monitor_db_scale, }, }, }, }, { .pcm_dev = CAPTURE_2_FROM_I2S_2, .controls = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Input Monitor Playback Switch", .index = 1, .info = snd_ctl_boolean_mono_info, .get = monitor_get, .put = monitor_put, .private_value = OXYGEN_ADC_MONITOR_B, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Analog Input Monitor Playback Volume", .index = 1, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .info = monitor_volume_info, .get = monitor_get, .put = monitor_put, .private_value = OXYGEN_ADC_MONITOR_B_HALF_VOL | (1 << 8), .tlv = { .p = monitor_db_scale, }, }, }, }, { .pcm_dev = CAPTURE_1_FROM_SPDIF, .controls = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Digital Input Monitor Playback Switch", .info = snd_ctl_boolean_mono_info, .get = monitor_get, .put = monitor_put, .private_value = OXYGEN_ADC_MONITOR_C, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Digital Input Monitor Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .info = monitor_volume_info, .get = monitor_get, .put = monitor_put, .private_value = OXYGEN_ADC_MONITOR_C_HALF_VOL | (1 << 8), .tlv = { .p = monitor_db_scale, }, }, }, }, }; static const struct snd_kcontrol_new ac97_controls[] = { AC97_VOLUME("Mic Capture Volume", 0, AC97_MIC, 0), AC97_SWITCH("Mic Capture Switch", 0, AC97_MIC, 15, 1), AC97_SWITCH("Mic Boost (+20dB)", 0, AC97_MIC, 6, 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Mic Source Capture Enum", .info = mic_fmic_source_info, .get = mic_fmic_source_get, .put = mic_fmic_source_put, }, AC97_SWITCH("Line Capture Switch", 0, AC97_LINE, 15, 1), AC97_VOLUME("CD Capture Volume", 0, AC97_CD, 1), AC97_SWITCH("CD Capture Switch", 0, AC97_CD, 15, 1), AC97_VOLUME("Aux Capture Volume", 0, AC97_AUX, 1), AC97_SWITCH("Aux Capture Switch", 0, AC97_AUX, 15, 1), }; static const struct snd_kcontrol_new ac97_fp_controls[] = { AC97_VOLUME("Front Panel Playback Volume", 1, AC97_HEADPHONE, 1), AC97_SWITCH("Front Panel Playback Switch", 1, AC97_HEADPHONE, 15, 1), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Front Panel Capture Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .info = ac97_fp_rec_volume_info, .get = ac97_fp_rec_volume_get, .put = ac97_fp_rec_volume_put, .tlv = { .p = ac97_rec_db_scale, }, }, AC97_SWITCH("Front Panel Capture Switch", 1, AC97_REC_GAIN, 15, 1), }; static void oxygen_any_ctl_free(struct snd_kcontrol *ctl) { struct oxygen *chip = ctl->private_data; unsigned int i; /* I'm too lazy to write a function for each control :-) */ for (i = 0; i < ARRAY_SIZE(chip->controls); ++i) chip->controls[i] = NULL; } static int add_controls(struct oxygen *chip, const struct snd_kcontrol_new controls[], unsigned int count) { static const char *const known_ctl_names[CONTROL_COUNT] = { [CONTROL_SPDIF_PCM] = SNDRV_CTL_NAME_IEC958("", PLAYBACK, PCM_STREAM), [CONTROL_SPDIF_INPUT_BITS] = SNDRV_CTL_NAME_IEC958("", CAPTURE, DEFAULT), [CONTROL_MIC_CAPTURE_SWITCH] = "Mic Capture Switch", [CONTROL_LINE_CAPTURE_SWITCH] = "Line Capture Switch", [CONTROL_CD_CAPTURE_SWITCH] = "CD Capture Switch", [CONTROL_AUX_CAPTURE_SWITCH] = "Aux Capture Switch", }; unsigned int i, j; struct snd_kcontrol_new template; struct snd_kcontrol *ctl; int err; for (i = 0; i < count; ++i) { template = controls[i]; if (chip->model.control_filter) { err = chip->model.control_filter(&template); if (err < 0) return err; if (err == 1) continue; } if (!strcmp(template.name, "Stereo Upmixing") && chip->model.dac_channels_pcm == 2) continue; if (!strcmp(template.name, "Mic Source Capture Enum") && !(chip->model.device_config & AC97_FMIC_SWITCH)) continue; if (!strncmp(template.name, "CD Capture ", 11) && !(chip->model.device_config & AC97_CD_INPUT)) continue; if (!strcmp(template.name, "Master Playback Volume") && chip->model.dac_tlv) { template.tlv.p = chip->model.dac_tlv; template.access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ; } ctl = snd_ctl_new1(&template, chip); if (!ctl) return -ENOMEM; err = snd_ctl_add(chip->card, ctl); if (err < 0) return err; for (j = 0; j < CONTROL_COUNT; ++j) if (!strcmp(ctl->id.name, known_ctl_names[j])) { chip->controls[j] = ctl; ctl->private_free = oxygen_any_ctl_free; } } return 0; } int oxygen_mixer_init(struct oxygen *chip) { unsigned int i; int err; err = add_controls(chip, controls, ARRAY_SIZE(controls)); if (err < 0) return err; if (chip->model.device_config & CAPTURE_1_FROM_SPDIF) { err = add_controls(chip, spdif_input_controls, ARRAY_SIZE(spdif_input_controls)); if (err < 0) return err; } for (i = 0; i < ARRAY_SIZE(monitor_controls); ++i) { if (!(chip->model.device_config & monitor_controls[i].pcm_dev)) continue; err = add_controls(chip, monitor_controls[i].controls, ARRAY_SIZE(monitor_controls[i].controls)); if (err < 0) return err; } if (chip->has_ac97_0) { err = add_controls(chip, ac97_controls, ARRAY_SIZE(ac97_controls)); if (err < 0) return err; } if (chip->has_ac97_1) { err = add_controls(chip, ac97_fp_controls, ARRAY_SIZE(ac97_fp_controls)); if (err < 0) return err; } return chip->model.mixer_init ? chip->model.mixer_init(chip) : 0; }
gpl-2.0
artcotto/ElementalX-N5
arch/alpha/kernel/pci.c
4461
11296
/* * linux/arch/alpha/kernel/pci.c * * Extruded from code written by * Dave Rusling (david.rusling@reo.mts.dec.com) * David Mosberger (davidm@cs.arizona.edu) */ /* 2.3.x PCI/resources, 1999 Andrea Arcangeli <andrea@suse.de> */ /* * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru> * PCI-PCI bridges cleanup */ #include <linux/string.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/bootmem.h> #include <linux/module.h> #include <linux/cache.h> #include <linux/slab.h> #include <asm/machvec.h> #include "proto.h" #include "pci_impl.h" /* * Some string constants used by the various core logics. */ const char *const pci_io_names[] = { "PCI IO bus 0", "PCI IO bus 1", "PCI IO bus 2", "PCI IO bus 3", "PCI IO bus 4", "PCI IO bus 5", "PCI IO bus 6", "PCI IO bus 7" }; const char *const pci_mem_names[] = { "PCI mem bus 0", "PCI mem bus 1", "PCI mem bus 2", "PCI mem bus 3", "PCI mem bus 4", "PCI mem bus 5", "PCI mem bus 6", "PCI mem bus 7" }; const char pci_hae0_name[] = "HAE0"; /* * If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource * assignments. */ /* * The PCI controller list. */ struct pci_controller *hose_head, **hose_tail = &hose_head; struct pci_controller *pci_isa_hose; /* * Quirks. */ static void __init quirk_isa_bridge(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_ISA << 8; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge); static void __init quirk_cypress(struct pci_dev *dev) { /* The Notorious Cy82C693 chip. */ /* The generic legacy mode IDE fixup in drivers/pci/probe.c doesn't work correctly with the Cypress IDE controller as it has non-standard register layout. Fix that. */ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) { dev->resource[2].start = dev->resource[3].start = 0; dev->resource[2].end = dev->resource[3].end = 0; dev->resource[2].flags = dev->resource[3].flags = 0; if (PCI_FUNC(dev->devfn) == 2) { dev->resource[0].start = 0x170; dev->resource[0].end = 0x177; dev->resource[1].start = 0x376; dev->resource[1].end = 0x376; } } /* The Cypress bridge responds on the PCI bus in the address range 0xffff0000-0xffffffff (conventional x86 BIOS ROM). There is no way to turn this off. The bridge also supports several extended BIOS ranges (disabled after power-up), and some consoles do turn them on. So if we use a large direct-map window, or a large SG window, we must avoid the entire 0xfff00000-0xffffffff region. */ if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) { if (__direct_map_base + __direct_map_size >= 0xfff00000UL) __direct_map_size = 0xfff00000UL - __direct_map_base; else { struct pci_controller *hose = dev->sysdata; struct pci_iommu_arena *pci = hose->sg_pci; if (pci && pci->dma_base + pci->size >= 0xfff00000UL) pci->size = 0xfff00000UL - pci->dma_base; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, quirk_cypress); /* Called for each device after PCI setup is done. */ static void __init pcibios_fixup_final(struct pci_dev *dev) { unsigned int class = dev->class >> 8; if (class == PCI_CLASS_BRIDGE_ISA || class == PCI_CLASS_BRIDGE_EISA) { dev->dma_mask = MAX_ISA_DMA_ADDRESS - 1; isa_bridge = dev; } } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final); /* Just declaring that the power-of-ten prefixes are actually the power-of-two ones doesn't make it true :) */ #define KB 1024 #define MB (1024*KB) #define GB (1024*MB) resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; struct pci_controller *hose = dev->sysdata; unsigned long alignto; resource_size_t start = res->start; if (res->flags & IORESOURCE_IO) { /* Make sure we start at our min on all hoses */ if (start - hose->io_space->start < PCIBIOS_MIN_IO) start = PCIBIOS_MIN_IO + hose->io_space->start; /* * Put everything into 0x00-0xff region modulo 0x400 */ if (start & 0x300) start = (start + 0x3ff) & ~0x3ff; } else if (res->flags & IORESOURCE_MEM) { /* Make sure we start at our min on all hoses */ if (start - hose->mem_space->start < PCIBIOS_MIN_MEM) start = PCIBIOS_MIN_MEM + hose->mem_space->start; /* * The following holds at least for the Low Cost * Alpha implementation of the PCI interface: * * In sparse memory address space, the first * octant (16MB) of every 128MB segment is * aliased to the very first 16 MB of the * address space (i.e., it aliases the ISA * memory address space). Thus, we try to * avoid allocating PCI devices in that range. * Can be allocated in 2nd-7th octant only. * Devices that need more than 112MB of * address space must be accessed through * dense memory space only! */ /* Align to multiple of size of minimum base. */ alignto = max_t(resource_size_t, 0x1000, align); start = ALIGN(start, alignto); if (hose->sparse_mem_base && size <= 7 * 16*MB) { if (((start / (16*MB)) & 0x7) == 0) { start &= ~(128*MB - 1); start += 16*MB; start = ALIGN(start, alignto); } if (start/(128*MB) != (start + size - 1)/(128*MB)) { start &= ~(128*MB - 1); start += (128 + 16)*MB; start = ALIGN(start, alignto); } } } return start; } #undef KB #undef MB #undef GB static int __init pcibios_init(void) { if (alpha_mv.init_pci) alpha_mv.init_pci(); return 0; } subsys_initcall(pcibios_init); char * __devinit pcibios_setup(char *str) { return str; } #ifdef ALPHA_RESTORE_SRM_SETUP static struct pdev_srm_saved_conf *srm_saved_configs; void __devinit pdev_save_srm_config(struct pci_dev *dev) { struct pdev_srm_saved_conf *tmp; static int printed = 0; if (!alpha_using_srm || pci_has_flag(PCI_PROBE_ONLY)) return; if (!printed) { printk(KERN_INFO "pci: enabling save/restore of SRM state\n"); printed = 1; } tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) { printk(KERN_ERR "%s: kmalloc() failed!\n", __func__); return; } tmp->next = srm_saved_configs; tmp->dev = dev; pci_save_state(dev); srm_saved_configs = tmp; } void pci_restore_srm_config(void) { struct pdev_srm_saved_conf *tmp; /* No need to restore if probed only. */ if (pci_has_flag(PCI_PROBE_ONLY)) return; /* Restore SRM config. */ for (tmp = srm_saved_configs; tmp; tmp = tmp->next) { pci_restore_state(tmp->dev); } } #endif void __devinit pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev = bus->self; if (pci_has_flag(PCI_PROBE_ONLY) && dev && (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { pci_read_bridge_bases(bus); } list_for_each_entry(dev, &bus->devices, bus_list) { pdev_save_srm_config(dev); } } void __init pcibios_update_irq(struct pci_dev *dev, int irq) { pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); } int pcibios_enable_device(struct pci_dev *dev, int mask) { return pci_enable_resources(dev, mask); } /* * If we set up a device for bus mastering, we need to check the latency * timer as certain firmware forgets to set it properly, as seen * on SX164 and LX164 with SRM. */ void pcibios_set_master(struct pci_dev *dev) { u8 lat; pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); if (lat >= 16) return; printk("PCI: Setting latency timer of device %s to 64\n", pci_name(dev)); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); } void __init pcibios_claim_one_bus(struct pci_bus *b) { struct pci_dev *dev; struct pci_bus *child_bus; list_for_each_entry(dev, &b->devices, bus_list) { int i; for (i = 0; i < PCI_NUM_RESOURCES; i++) { struct resource *r = &dev->resource[i]; if (r->parent || !r->start || !r->flags) continue; if (pci_has_flag(PCI_PROBE_ONLY) || (r->flags & IORESOURCE_PCI_FIXED)) pci_claim_resource(dev, i); } } list_for_each_entry(child_bus, &b->children, node) pcibios_claim_one_bus(child_bus); } static void __init pcibios_claim_console_setup(void) { struct pci_bus *b; list_for_each_entry(b, &pci_root_buses, node) pcibios_claim_one_bus(b); } void __init common_init_pci(void) { struct pci_controller *hose; struct list_head resources; struct pci_bus *bus; int next_busno; int need_domain_info = 0; u32 pci_mem_end; u32 sg_base; unsigned long end; /* Scan all of the recorded PCI controllers. */ for (next_busno = 0, hose = hose_head; hose; hose = hose->next) { sg_base = hose->sg_pci ? hose->sg_pci->dma_base : ~0; /* Adjust hose mem_space limit to prevent PCI allocations in the iommu windows. */ pci_mem_end = min((u32)__direct_map_base, sg_base) - 1; end = hose->mem_space->start + pci_mem_end; if (hose->mem_space->end > end) hose->mem_space->end = end; INIT_LIST_HEAD(&resources); pci_add_resource_offset(&resources, hose->io_space, hose->io_space->start); pci_add_resource_offset(&resources, hose->mem_space, hose->mem_space->start); bus = pci_scan_root_bus(NULL, next_busno, alpha_mv.pci_ops, hose, &resources); hose->bus = bus; hose->need_domain_info = need_domain_info; next_busno = bus->subordinate + 1; /* Don't allow 8-bit bus number overflow inside the hose - reserve some space for bridges. */ if (next_busno > 224) { next_busno = 0; need_domain_info = 1; } } pcibios_claim_console_setup(); pci_assign_unassigned_resources(); pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq); } struct pci_controller * __init alloc_pci_controller(void) { struct pci_controller *hose; hose = alloc_bootmem(sizeof(*hose)); *hose_tail = hose; hose_tail = &hose->next; return hose; } struct resource * __init alloc_resource(void) { struct resource *res; res = alloc_bootmem(sizeof(*res)); return res; } /* Provide information on locations of various I/O regions in physical memory. Do this on a per-card basis so that we choose the right hose. */ asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) { struct pci_controller *hose; struct pci_dev *dev; /* from hose or from bus.devfn */ if (which & IOBASE_FROM_HOSE) { for(hose = hose_head; hose; hose = hose->next) if (hose->index == bus) break; if (!hose) return -ENODEV; } else { /* Special hook for ISA access. */ if (bus == 0 && dfn == 0) { hose = pci_isa_hose; } else { dev = pci_get_bus_and_slot(bus, dfn); if (!dev) return -ENODEV; hose = dev->sysdata; pci_dev_put(dev); } } switch (which & ~IOBASE_FROM_HOSE) { case IOBASE_HOSE: return hose->index; case IOBASE_SPARSE_MEM: return hose->sparse_mem_base; case IOBASE_DENSE_MEM: return hose->dense_mem_base; case IOBASE_SPARSE_IO: return hose->sparse_io_base; case IOBASE_DENSE_IO: return hose->dense_io_base; case IOBASE_ROOT_BUS: return hose->bus->number; } return -EOPNOTSUPP; } /* Destroy an __iomem token. Not copied from lib/iomap.c. */ void pci_iounmap(struct pci_dev *dev, void __iomem * addr) { if (__is_mmio(addr)) iounmap(addr); } EXPORT_SYMBOL(pci_iounmap); /* FIXME: Some boxes have multiple ISA bridges! */ struct pci_dev *isa_bridge; EXPORT_SYMBOL(isa_bridge);
gpl-2.0
f12c/android_kernel_fujitsu_f11eif
drivers/isdn/hardware/eicon/os_bri.c
4973
20397
/* $Id: os_bri.c,v 1.21 2004/03/21 17:26:01 armin Exp $ */ #include "platform.h" #include "debuglib.h" #include "cardtype.h" #include "pc.h" #include "pr_pc.h" #include "di_defs.h" #include "dsp_defs.h" #include "di.h" #include "io.h" #include "xdi_msg.h" #include "xdi_adapter.h" #include "os_bri.h" #include "diva_pci.h" #include "mi_pc.h" #include "pc_maint.h" #include "dsrv_bri.h" /* ** IMPORTS */ extern void prepare_maestra_functions(PISDN_ADAPTER IoAdapter); extern void diva_xdi_display_adapter_features(int card); extern int diva_card_read_xlog(diva_os_xdi_adapter_t * a); /* ** LOCALS */ static int bri_bar_length[3] = { 0x80, 0x80, 0x20 }; static int diva_bri_cleanup_adapter(diva_os_xdi_adapter_t * a); static dword diva_bri_get_serial_number(diva_os_xdi_adapter_t * a); static int diva_bri_cmd_card_proc(struct _diva_os_xdi_adapter *a, diva_xdi_um_cfg_cmd_t * cmd, int length); static int diva_bri_reregister_io(diva_os_xdi_adapter_t * a); static int diva_bri_reset_adapter(PISDN_ADAPTER IoAdapter); static int diva_bri_write_sdram_block(PISDN_ADAPTER IoAdapter, dword address, const byte * data, dword length); static int diva_bri_start_adapter(PISDN_ADAPTER IoAdapter, dword start_address, dword features); static int diva_bri_stop_adapter(diva_os_xdi_adapter_t * a); static void diva_bri_set_addresses(diva_os_xdi_adapter_t * a) { a->resources.pci.mem_type_id[MEM_TYPE_RAM] = 0; a->resources.pci.mem_type_id[MEM_TYPE_CFG] = 1; a->resources.pci.mem_type_id[MEM_TYPE_ADDRESS] = 2; a->resources.pci.mem_type_id[MEM_TYPE_RESET] = 1; a->resources.pci.mem_type_id[MEM_TYPE_PORT] = 2; a->resources.pci.mem_type_id[MEM_TYPE_CTLREG] = 2; a->xdi_adapter.ram = a->resources.pci.addr[0]; a->xdi_adapter.cfg = a->resources.pci.addr[1]; a->xdi_adapter.Address = a->resources.pci.addr[2]; a->xdi_adapter.reset = a->xdi_adapter.cfg; a->xdi_adapter.port = a->xdi_adapter.Address; a->xdi_adapter.ctlReg = a->xdi_adapter.port + M_PCI_RESET; a->xdi_adapter.reset += 0x4C; /* PLX 9050 !! */ } /* ** BAR0 - MEM Addr - 0x80 - NOT USED ** BAR1 - I/O Addr - 0x80 ** BAR2 - I/O Addr - 0x20 */ int diva_bri_init_card(diva_os_xdi_adapter_t * a) { int bar; dword bar2 = 0, bar2_length = 0xffffffff; word cmd = 0, cmd_org; byte Bus, Slot; void *hdev; byte __iomem *p; /* Set properties */ a->xdi_adapter.Properties = CardProperties[a->CardOrdinal]; DBG_LOG(("Load %s", a->xdi_adapter.Properties.Name)) /* Get resources */ for (bar = 0; bar < 3; bar++) { a->resources.pci.bar[bar] = divasa_get_pci_bar(a->resources.pci.bus, a->resources.pci.func, bar, a->resources.pci.hdev); if (!a->resources.pci.bar[bar]) { DBG_ERR(("A: can't get BAR[%d]", bar)) return (-1); } } a->resources.pci.irq = (byte) divasa_get_pci_irq(a->resources.pci.bus, a->resources.pci.func, a->resources.pci.hdev); if (!a->resources.pci.irq) { DBG_ERR(("A: invalid irq")); return (-1); } /* Get length of I/O bar 2 - it is different by older EEPROM version */ Bus = a->resources.pci.bus; Slot = a->resources.pci.func; hdev = a->resources.pci.hdev; /* Get plain original values of the BAR2 CDM registers */ PCIread(Bus, Slot, 0x18, &bar2, sizeof(bar2), hdev); PCIread(Bus, Slot, 0x04, &cmd_org, sizeof(cmd_org), hdev); /* Disable device and get BAR2 length */ PCIwrite(Bus, Slot, 0x04, &cmd, sizeof(cmd), hdev); PCIwrite(Bus, Slot, 0x18, &bar2_length, sizeof(bar2_length), hdev); PCIread(Bus, Slot, 0x18, &bar2_length, sizeof(bar2_length), hdev); /* Restore BAR2 and CMD registers */ PCIwrite(Bus, Slot, 0x18, &bar2, sizeof(bar2), hdev); PCIwrite(Bus, Slot, 0x04, &cmd_org, sizeof(cmd_org), hdev); /* Calculate BAR2 length */ bar2_length = (~(bar2_length & ~7)) + 1; DBG_LOG(("BAR[2] length=%lx", bar2_length)) /* Map and register resources */ if (!(a->resources.pci.addr[0] = divasa_remap_pci_bar(a, 0, a->resources.pci.bar[0], bri_bar_length[0]))) { DBG_ERR(("A: BRI, can't map BAR[0]")) diva_bri_cleanup_adapter(a); return (-1); } sprintf(&a->port_name[0], "BRI %02x:%02x", a->resources.pci.bus, a->resources.pci.func); if (diva_os_register_io_port(a, 1, a->resources.pci.bar[1], bri_bar_length[1], &a->port_name[0], 1)) { DBG_ERR(("A: BRI, can't register BAR[1]")) diva_bri_cleanup_adapter(a); return (-1); } a->resources.pci.addr[1] = (void *) (unsigned long) a->resources.pci.bar[1]; a->resources.pci.length[1] = bri_bar_length[1]; if (diva_os_register_io_port(a, 1, a->resources.pci.bar[2], bar2_length, &a->port_name[0], 2)) { DBG_ERR(("A: BRI, can't register BAR[2]")) diva_bri_cleanup_adapter(a); return (-1); } a->resources.pci.addr[2] = (void *) (unsigned long) a->resources.pci.bar[2]; a->resources.pci.length[2] = bar2_length; /* Set all memory areas */ diva_bri_set_addresses(a); /* Get Serial Number */ a->xdi_adapter.serialNo = diva_bri_get_serial_number(a); /* Register I/O ports with correct name now */ if (diva_bri_reregister_io(a)) { diva_bri_cleanup_adapter(a); return (-1); } /* Initialize OS dependent objects */ if (diva_os_initialize_spin_lock (&a->xdi_adapter.isr_spin_lock, "isr")) { diva_bri_cleanup_adapter(a); return (-1); } if (diva_os_initialize_spin_lock (&a->xdi_adapter.data_spin_lock, "data")) { diva_bri_cleanup_adapter(a); return (-1); } strcpy(a->xdi_adapter.req_soft_isr.dpc_thread_name, "kdivasbrid"); if (diva_os_initialize_soft_isr(&a->xdi_adapter.req_soft_isr, DIDpcRoutine, &a->xdi_adapter)) { diva_bri_cleanup_adapter(a); return (-1); } /* Do not initialize second DPC - only one thread will be created */ a->xdi_adapter.isr_soft_isr.object = a->xdi_adapter.req_soft_isr.object; /* Create entity table */ a->xdi_adapter.Channels = CardProperties[a->CardOrdinal].Channels; a->xdi_adapter.e_max = CardProperties[a->CardOrdinal].E_info; a->xdi_adapter.e_tbl = diva_os_malloc(0, a->xdi_adapter.e_max * sizeof(E_INFO)); if (!a->xdi_adapter.e_tbl) { diva_bri_cleanup_adapter(a); return (-1); } memset(a->xdi_adapter.e_tbl, 0x00, a->xdi_adapter.e_max * sizeof(E_INFO)); /* Set up interface */ a->xdi_adapter.a.io = &a->xdi_adapter; a->xdi_adapter.DIRequest = request; a->interface.cleanup_adapter_proc = diva_bri_cleanup_adapter; a->interface.cmd_proc = diva_bri_cmd_card_proc; p = DIVA_OS_MEM_ATTACH_RESET(&a->xdi_adapter); outpp(p, 0x41); DIVA_OS_MEM_DETACH_RESET(&a->xdi_adapter, p); prepare_maestra_functions(&a->xdi_adapter); a->dsp_mask = 0x00000003; /* Set IRQ handler */ a->xdi_adapter.irq_info.irq_nr = a->resources.pci.irq; sprintf(a->xdi_adapter.irq_info.irq_name, "DIVA BRI %ld", (long) a->xdi_adapter.serialNo); if (diva_os_register_irq(a, a->xdi_adapter.irq_info.irq_nr, a->xdi_adapter.irq_info.irq_name)) { diva_bri_cleanup_adapter(a); return (-1); } a->xdi_adapter.irq_info.registered = 1; diva_log_info("%s IRQ:%d SerNo:%d", a->xdi_adapter.Properties.Name, a->resources.pci.irq, a->xdi_adapter.serialNo); return (0); } static int diva_bri_cleanup_adapter(diva_os_xdi_adapter_t * a) { int i; if (a->xdi_adapter.Initialized) { diva_bri_stop_adapter(a); } /* Remove ISR Handler */ if (a->xdi_adapter.irq_info.registered) { diva_os_remove_irq(a, a->xdi_adapter.irq_info.irq_nr); } a->xdi_adapter.irq_info.registered = 0; if (a->resources.pci.addr[0] && a->resources.pci.bar[0]) { divasa_unmap_pci_bar(a->resources.pci.addr[0]); a->resources.pci.addr[0] = NULL; a->resources.pci.bar[0] = 0; } for (i = 1; i < 3; i++) { if (a->resources.pci.addr[i] && a->resources.pci.bar[i]) { diva_os_register_io_port(a, 0, a->resources.pci.bar[i], a->resources.pci. length[i], &a->port_name[0], i); a->resources.pci.addr[i] = NULL; a->resources.pci.bar[i] = 0; } } /* Free OS objects */ diva_os_cancel_soft_isr(&a->xdi_adapter.req_soft_isr); diva_os_cancel_soft_isr(&a->xdi_adapter.isr_soft_isr); diva_os_remove_soft_isr(&a->xdi_adapter.req_soft_isr); a->xdi_adapter.isr_soft_isr.object = NULL; diva_os_destroy_spin_lock(&a->xdi_adapter.isr_spin_lock, "rm"); diva_os_destroy_spin_lock(&a->xdi_adapter.data_spin_lock, "rm"); /* Free memory */ if (a->xdi_adapter.e_tbl) { diva_os_free(0, a->xdi_adapter.e_tbl); a->xdi_adapter.e_tbl = NULL; } return (0); } void diva_os_prepare_maestra_functions(PISDN_ADAPTER IoAdapter) { } /* ** Get serial number */ static dword diva_bri_get_serial_number(diva_os_xdi_adapter_t * a) { dword serNo = 0; byte __iomem *confIO; word serHi, serLo; word __iomem *confMem; confIO = DIVA_OS_MEM_ATTACH_CFG(&a->xdi_adapter); serHi = (word) (inppw(&confIO[0x22]) & 0x0FFF); serLo = (word) (inppw(&confIO[0x26]) & 0x0FFF); serNo = ((dword) serHi << 16) | (dword) serLo; DIVA_OS_MEM_DETACH_CFG(&a->xdi_adapter, confIO); if ((serNo == 0) || (serNo == 0xFFFFFFFF)) { DBG_FTL(("W: BRI use BAR[0] to get card serial number")) confMem = (word __iomem *)DIVA_OS_MEM_ATTACH_RAM(&a->xdi_adapter); serHi = (word) (READ_WORD(&confMem[0x11]) & 0x0FFF); serLo = (word) (READ_WORD(&confMem[0x13]) & 0x0FFF); serNo = (((dword) serHi) << 16) | ((dword) serLo); DIVA_OS_MEM_DETACH_RAM(&a->xdi_adapter, confMem); } DBG_LOG(("Serial Number=%ld", serNo)) return (serNo); } /* ** Unregister I/O and register it with new name, ** based on Serial Number */ static int diva_bri_reregister_io(diva_os_xdi_adapter_t * a) { int i; for (i = 1; i < 3; i++) { diva_os_register_io_port(a, 0, a->resources.pci.bar[i], a->resources.pci.length[i], &a->port_name[0], i); a->resources.pci.addr[i] = NULL; } sprintf(a->port_name, "DIVA BRI %ld", (long) a->xdi_adapter.serialNo); for (i = 1; i < 3; i++) { if (diva_os_register_io_port(a, 1, a->resources.pci.bar[i], a->resources.pci.length[i], &a->port_name[0], i)) { DBG_ERR(("A: failed to reregister BAR[%d]", i)) return (-1); } a->resources.pci.addr[i] = (void *) (unsigned long) a->resources.pci.bar[i]; } return (0); } /* ** Process command from user mode */ static int diva_bri_cmd_card_proc(struct _diva_os_xdi_adapter *a, diva_xdi_um_cfg_cmd_t * cmd, int length) { int ret = -1; if (cmd->adapter != a->controller) { DBG_ERR(("A: pri_cmd, invalid controller=%d != %d", cmd->adapter, a->controller)) return (-1); } switch (cmd->command) { case DIVA_XDI_UM_CMD_GET_CARD_ORDINAL: a->xdi_mbox.data_length = sizeof(dword); a->xdi_mbox.data = diva_os_malloc(0, a->xdi_mbox.data_length); if (a->xdi_mbox.data) { *(dword *) a->xdi_mbox.data = (dword) a->CardOrdinal; a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY; ret = 0; } break; case DIVA_XDI_UM_CMD_GET_SERIAL_NR: a->xdi_mbox.data_length = sizeof(dword); a->xdi_mbox.data = diva_os_malloc(0, a->xdi_mbox.data_length); if (a->xdi_mbox.data) { *(dword *) a->xdi_mbox.data = (dword) a->xdi_adapter.serialNo; a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY; ret = 0; } break; case DIVA_XDI_UM_CMD_GET_PCI_HW_CONFIG: a->xdi_mbox.data_length = sizeof(dword) * 9; a->xdi_mbox.data = diva_os_malloc(0, a->xdi_mbox.data_length); if (a->xdi_mbox.data) { int i; dword *data = (dword *) a->xdi_mbox.data; for (i = 0; i < 8; i++) { *data++ = a->resources.pci.bar[i]; } *data++ = (dword) a->resources.pci.irq; a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY; ret = 0; } break; case DIVA_XDI_UM_CMD_GET_CARD_STATE: a->xdi_mbox.data_length = sizeof(dword); a->xdi_mbox.data = diva_os_malloc(0, a->xdi_mbox.data_length); if (a->xdi_mbox.data) { dword *data = (dword *) a->xdi_mbox.data; if (!a->xdi_adapter.port) { *data = 3; } else if (a->xdi_adapter.trapped) { *data = 2; } else if (a->xdi_adapter.Initialized) { *data = 1; } else { *data = 0; } a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY; ret = 0; } break; case DIVA_XDI_UM_CMD_RESET_ADAPTER: ret = diva_bri_reset_adapter(&a->xdi_adapter); break; case DIVA_XDI_UM_CMD_WRITE_SDRAM_BLOCK: ret = diva_bri_write_sdram_block(&a->xdi_adapter, cmd->command_data. write_sdram.offset, (byte *) & cmd[1], cmd->command_data. write_sdram.length); break; case DIVA_XDI_UM_CMD_START_ADAPTER: ret = diva_bri_start_adapter(&a->xdi_adapter, cmd->command_data.start. offset, cmd->command_data.start. features); break; case DIVA_XDI_UM_CMD_SET_PROTOCOL_FEATURES: a->xdi_adapter.features = cmd->command_data.features.features; a->xdi_adapter.a.protocol_capabilities = a->xdi_adapter.features; DBG_TRC( ("Set raw protocol features (%08x)", a->xdi_adapter.features)) ret = 0; break; case DIVA_XDI_UM_CMD_STOP_ADAPTER: ret = diva_bri_stop_adapter(a); break; case DIVA_XDI_UM_CMD_READ_XLOG_ENTRY: ret = diva_card_read_xlog(a); break; default: DBG_ERR( ("A: A(%d) invalid cmd=%d", a->controller, cmd->command))} return (ret); } static int diva_bri_reset_adapter(PISDN_ADAPTER IoAdapter) { byte __iomem *addrHi, *addrLo, *ioaddr; dword i; byte __iomem *Port; if (!IoAdapter->port) { return (-1); } if (IoAdapter->Initialized) { DBG_ERR(("A: A(%d) can't reset BRI adapter - please stop first", IoAdapter->ANum)) return (-1); } (*(IoAdapter->rstFnc)) (IoAdapter); diva_os_wait(100); Port = DIVA_OS_MEM_ATTACH_PORT(IoAdapter); addrHi = Port + ((IoAdapter->Properties.Bus == BUS_PCI) ? M_PCI_ADDRH : ADDRH); addrLo = Port + ADDR; ioaddr = Port + DATA; /* recover */ outpp(addrHi, (byte) 0); outppw(addrLo, (word) 0); outppw(ioaddr, (word) 0); /* clear shared memory */ outpp(addrHi, (byte) ( (IoAdapter->MemoryBase + IoAdapter->MemorySize - BRI_SHARED_RAM_SIZE) >> 16)); outppw(addrLo, 0); for (i = 0; i < 0x8000; outppw(ioaddr, 0), ++i); diva_os_wait(100); /* clear signature */ outpp(addrHi, (byte) ( (IoAdapter->MemoryBase + IoAdapter->MemorySize - BRI_SHARED_RAM_SIZE) >> 16)); outppw(addrLo, 0x1e); outpp(ioaddr, 0); outpp(ioaddr, 0); outpp(addrHi, (byte) 0); outppw(addrLo, (word) 0); outppw(ioaddr, (word) 0); DIVA_OS_MEM_DETACH_PORT(IoAdapter, Port); /* Forget all outstanding entities */ IoAdapter->e_count = 0; if (IoAdapter->e_tbl) { memset(IoAdapter->e_tbl, 0x00, IoAdapter->e_max * sizeof(E_INFO)); } IoAdapter->head = 0; IoAdapter->tail = 0; IoAdapter->assign = 0; IoAdapter->trapped = 0; memset(&IoAdapter->a.IdTable[0], 0x00, sizeof(IoAdapter->a.IdTable)); memset(&IoAdapter->a.IdTypeTable[0], 0x00, sizeof(IoAdapter->a.IdTypeTable)); memset(&IoAdapter->a.FlowControlIdTable[0], 0x00, sizeof(IoAdapter->a.FlowControlIdTable)); memset(&IoAdapter->a.FlowControlSkipTable[0], 0x00, sizeof(IoAdapter->a.FlowControlSkipTable)); memset(&IoAdapter->a.misc_flags_table[0], 0x00, sizeof(IoAdapter->a.misc_flags_table)); memset(&IoAdapter->a.rx_stream[0], 0x00, sizeof(IoAdapter->a.rx_stream)); memset(&IoAdapter->a.tx_stream[0], 0x00, sizeof(IoAdapter->a.tx_stream)); memset(&IoAdapter->a.tx_pos[0], 0x00, sizeof(IoAdapter->a.tx_pos)); memset(&IoAdapter->a.rx_pos[0], 0x00, sizeof(IoAdapter->a.rx_pos)); return (0); } static int diva_bri_write_sdram_block(PISDN_ADAPTER IoAdapter, dword address, const byte * data, dword length) { byte __iomem *addrHi, *addrLo, *ioaddr; byte __iomem *Port; if (!IoAdapter->port) { return (-1); } Port = DIVA_OS_MEM_ATTACH_PORT(IoAdapter); addrHi = Port + ((IoAdapter->Properties.Bus == BUS_PCI) ? M_PCI_ADDRH : ADDRH); addrLo = Port + ADDR; ioaddr = Port + DATA; while (length--) { outpp(addrHi, (word) (address >> 16)); outppw(addrLo, (word) (address & 0x0000ffff)); outpp(ioaddr, *data++); address++; } DIVA_OS_MEM_DETACH_PORT(IoAdapter, Port); return (0); } static int diva_bri_start_adapter(PISDN_ADAPTER IoAdapter, dword start_address, dword features) { byte __iomem *Port; dword i, test; byte __iomem *addrHi, *addrLo, *ioaddr; int started = 0; ADAPTER *a = &IoAdapter->a; if (IoAdapter->Initialized) { DBG_ERR( ("A: A(%d) bri_start_adapter, adapter already running", IoAdapter->ANum)) return (-1); } if (!IoAdapter->port) { DBG_ERR(("A: A(%d) bri_start_adapter, adapter not mapped", IoAdapter->ANum)) return (-1); } sprintf(IoAdapter->Name, "A(%d)", (int) IoAdapter->ANum); DBG_LOG(("A(%d) start BRI", IoAdapter->ANum)) Port = DIVA_OS_MEM_ATTACH_PORT(IoAdapter); addrHi = Port + ((IoAdapter->Properties.Bus == BUS_PCI) ? M_PCI_ADDRH : ADDRH); addrLo = Port + ADDR; ioaddr = Port + DATA; outpp(addrHi, (byte) ( (IoAdapter->MemoryBase + IoAdapter->MemorySize - BRI_SHARED_RAM_SIZE) >> 16)); outppw(addrLo, 0x1e); outppw(ioaddr, 0x00); DIVA_OS_MEM_DETACH_PORT(IoAdapter, Port); /* start the protocol code */ Port = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); outpp(Port, 0x08); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, Port); Port = DIVA_OS_MEM_ATTACH_PORT(IoAdapter); addrHi = Port + ((IoAdapter->Properties.Bus == BUS_PCI) ? M_PCI_ADDRH : ADDRH); addrLo = Port + ADDR; ioaddr = Port + DATA; /* wait for signature (max. 3 seconds) */ for (i = 0; i < 300; ++i) { diva_os_wait(10); outpp(addrHi, (byte) ( (IoAdapter->MemoryBase + IoAdapter->MemorySize - BRI_SHARED_RAM_SIZE) >> 16)); outppw(addrLo, 0x1e); test = (dword) inppw(ioaddr); if (test == 0x4447) { DBG_LOG( ("Protocol startup time %d.%02d seconds", (i / 100), (i % 100))) started = 1; break; } } DIVA_OS_MEM_DETACH_PORT(IoAdapter, Port); if (!started) { DBG_FTL(("A: A(%d) %s: Adapter selftest failed 0x%04X", IoAdapter->ANum, IoAdapter->Properties.Name, test)) (*(IoAdapter->trapFnc)) (IoAdapter); return (-1); } IoAdapter->Initialized = 1; /* Check Interrupt */ IoAdapter->IrqCount = 0; a->ReadyInt = 1; if (IoAdapter->reset) { Port = DIVA_OS_MEM_ATTACH_RESET(IoAdapter); outpp(Port, 0x41); DIVA_OS_MEM_DETACH_RESET(IoAdapter, Port); } a->ram_out(a, &PR_RAM->ReadyInt, 1); for (i = 0; ((!IoAdapter->IrqCount) && (i < 100)); i++) { diva_os_wait(10); } if (!IoAdapter->IrqCount) { DBG_ERR( ("A: A(%d) interrupt test failed", IoAdapter->ANum)) IoAdapter->Initialized = 0; IoAdapter->stop(IoAdapter); return (-1); } IoAdapter->Properties.Features = (word) features; diva_xdi_display_adapter_features(IoAdapter->ANum); DBG_LOG(("A(%d) BRI adapter successfully started", IoAdapter->ANum)) /* Register with DIDD */ diva_xdi_didd_register_adapter(IoAdapter->ANum); return (0); } static void diva_bri_clear_interrupts(diva_os_xdi_adapter_t * a) { PISDN_ADAPTER IoAdapter = &a->xdi_adapter; /* clear any pending interrupt */ IoAdapter->disIrq(IoAdapter); IoAdapter->tst_irq(&IoAdapter->a); IoAdapter->clr_irq(&IoAdapter->a); IoAdapter->tst_irq(&IoAdapter->a); /* kill pending dpcs */ diva_os_cancel_soft_isr(&IoAdapter->req_soft_isr); diva_os_cancel_soft_isr(&IoAdapter->isr_soft_isr); } /* ** Stop card */ static int diva_bri_stop_adapter(diva_os_xdi_adapter_t * a) { PISDN_ADAPTER IoAdapter = &a->xdi_adapter; int i = 100; if (!IoAdapter->port) { return (-1); } if (!IoAdapter->Initialized) { DBG_ERR(("A: A(%d) can't stop BRI adapter - not running", IoAdapter->ANum)) return (-1); /* nothing to stop */ } IoAdapter->Initialized = 0; /* Disconnect Adapter from DIDD */ diva_xdi_didd_remove_adapter(IoAdapter->ANum); /* Stop interrupts */ a->clear_interrupts_proc = diva_bri_clear_interrupts; IoAdapter->a.ReadyInt = 1; IoAdapter->a.ram_inc(&IoAdapter->a, &PR_RAM->ReadyInt); do { diva_os_sleep(10); } while (i-- && a->clear_interrupts_proc); if (a->clear_interrupts_proc) { diva_bri_clear_interrupts(a); a->clear_interrupts_proc = NULL; DBG_ERR(("A: A(%d) no final interrupt from BRI adapter", IoAdapter->ANum)) } IoAdapter->a.ReadyInt = 0; /* Stop and reset adapter */ IoAdapter->stop(IoAdapter); return (0); }
gpl-2.0
droidfivex/kernel_lge_msm8974
drivers/mfd/tps65912-core.c
4973
4095
/* * tps65912-core.c -- TI TPS65912x * * Copyright 2011 Texas Instruments Inc. * * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This driver is based on wm8350 implementation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/mfd/core.h> #include <linux/mfd/tps65912.h> static struct mfd_cell tps65912s[] = { { .name = "tps65912-pmic", }, }; int tps65912_set_bits(struct tps65912 *tps65912, u8 reg, u8 mask) { u8 data; int err; mutex_lock(&tps65912->io_mutex); err = tps65912->read(tps65912, reg, 1, &data); if (err) { dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg); goto out; } data |= mask; err = tps65912->write(tps65912, reg, 1, &data); if (err) dev_err(tps65912->dev, "Write to reg 0x%x failed\n", reg); out: mutex_unlock(&tps65912->io_mutex); return err; } EXPORT_SYMBOL_GPL(tps65912_set_bits); int tps65912_clear_bits(struct tps65912 *tps65912, u8 reg, u8 mask) { u8 data; int err; mutex_lock(&tps65912->io_mutex); err = tps65912->read(tps65912, reg, 1, &data); if (err) { dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg); goto out; } data &= ~mask; err = tps65912->write(tps65912, reg, 1, &data); if (err) dev_err(tps65912->dev, "Write to reg 0x%x failed\n", reg); out: mutex_unlock(&tps65912->io_mutex); return err; } EXPORT_SYMBOL_GPL(tps65912_clear_bits); static inline int tps65912_read(struct tps65912 *tps65912, u8 reg) { u8 val; int err; err = tps65912->read(tps65912, reg, 1, &val); if (err < 0) return err; return val; } static inline int tps65912_write(struct tps65912 *tps65912, u8 reg, u8 val) { return tps65912->write(tps65912, reg, 1, &val); } int tps65912_reg_read(struct tps65912 *tps65912, u8 reg) { int data; mutex_lock(&tps65912->io_mutex); data = tps65912_read(tps65912, reg); if (data < 0) dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg); mutex_unlock(&tps65912->io_mutex); return data; } EXPORT_SYMBOL_GPL(tps65912_reg_read); int tps65912_reg_write(struct tps65912 *tps65912, u8 reg, u8 val) { int err; mutex_lock(&tps65912->io_mutex); err = tps65912_write(tps65912, reg, val); if (err < 0) dev_err(tps65912->dev, "Write for reg 0x%x failed\n", reg); mutex_unlock(&tps65912->io_mutex); return err; } EXPORT_SYMBOL_GPL(tps65912_reg_write); int tps65912_device_init(struct tps65912 *tps65912) { struct tps65912_board *pmic_plat_data = tps65912->dev->platform_data; struct tps65912_platform_data *init_data; int ret, dcdc_avs, value; init_data = kzalloc(sizeof(struct tps65912_platform_data), GFP_KERNEL); if (init_data == NULL) return -ENOMEM; mutex_init(&tps65912->io_mutex); dev_set_drvdata(tps65912->dev, tps65912); dcdc_avs = (pmic_plat_data->is_dcdc1_avs << 0 | pmic_plat_data->is_dcdc2_avs << 1 | pmic_plat_data->is_dcdc3_avs << 2 | pmic_plat_data->is_dcdc4_avs << 3); if (dcdc_avs) { tps65912->read(tps65912, TPS65912_I2C_SPI_CFG, 1, &value); dcdc_avs |= value; tps65912->write(tps65912, TPS65912_I2C_SPI_CFG, 1, &dcdc_avs); } ret = mfd_add_devices(tps65912->dev, -1, tps65912s, ARRAY_SIZE(tps65912s), NULL, 0); if (ret < 0) goto err; init_data->irq = pmic_plat_data->irq; init_data->irq_base = pmic_plat_data->irq_base; ret = tps65912_irq_init(tps65912, init_data->irq, init_data); if (ret < 0) goto err; kfree(init_data); return ret; err: kfree(init_data); mfd_remove_devices(tps65912->dev); kfree(tps65912); return ret; } void tps65912_device_exit(struct tps65912 *tps65912) { mfd_remove_devices(tps65912->dev); kfree(tps65912); } MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>"); MODULE_DESCRIPTION("TPS65912x chip family multi-function driver"); MODULE_LICENSE("GPL");
gpl-2.0
VanirRezound/kernel-vigor-aosp-3.4
drivers/media/radio/si470x/radio-si470x-common.c
7789
24616
/* * drivers/media/radio/si470x/radio-si470x-common.c * * Driver for radios with Silicon Labs Si470x FM Radio Receivers * * Copyright (c) 2009 Tobias Lorenz <tobias.lorenz@gmx.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * History: * 2008-01-12 Tobias Lorenz <tobias.lorenz@gmx.net> * Version 1.0.0 * - First working version * 2008-01-13 Tobias Lorenz <tobias.lorenz@gmx.net> * Version 1.0.1 * - Improved error handling, every function now returns errno * - Improved multi user access (start/mute/stop) * - Channel doesn't get lost anymore after start/mute/stop * - RDS support added (polling mode via interrupt EP 1) * - marked default module parameters with *value* * - switched from bit structs to bit masks * - header file cleaned and integrated * 2008-01-14 Tobias Lorenz <tobias.lorenz@gmx.net> * Version 1.0.2 * - hex values are now lower case * - commented USB ID for ADS/Tech moved on todo list * - blacklisted si470x in hid-quirks.c * - rds buffer handling functions integrated into *_work, *_read * - rds_command in si470x_poll exchanged against simple retval * - check for firmware version 15 * - code order and prototypes still remain the same * - spacing and bottom of band codes remain the same * 2008-01-16 Tobias Lorenz <tobias.lorenz@gmx.net> * Version 1.0.3 * - code reordered to avoid function prototypes * - switch/case defaults are now more user-friendly * - unified comment style * - applied all checkpatch.pl v1.12 suggestions * except the warning about the too long lines with bit comments * - renamed FMRADIO to RADIO to cut line length (checkpatch.pl) * 2008-01-22 Tobias Lorenz <tobias.lorenz@gmx.net> * Version 1.0.4 * - avoid poss. locking when doing copy_to_user which may sleep * - RDS is automatically activated on read now * - code cleaned of unnecessary rds_commands * - USB Vendor/Product ID for ADS/Tech FM Radio Receiver verified * (thanks to Guillaume RAMOUSSE) * 2008-01-27 Tobias Lorenz <tobias.lorenz@gmx.net> * Version 1.0.5 * - number of seek_retries changed to tune_timeout * - fixed problem with incomplete tune operations by own buffers * - optimization of variables and printf types * - improved error logging * 2008-01-31 Tobias Lorenz <tobias.lorenz@gmx.net> * Oliver Neukum <oliver@neukum.org> * Version 1.0.6 * - fixed coverity checker warnings in *_usb_driver_disconnect * - probe()/open() race by correct ordering in probe() * - DMA coherency rules by separate allocation of all buffers * - use of endianness macros * - abuse of spinlock, replaced by mutex * - racy handling of timer in disconnect, * replaced by delayed_work * - racy interruptible_sleep_on(), * replaced with wait_event_interruptible() * - handle signals in read() * 2008-02-08 Tobias Lorenz <tobias.lorenz@gmx.net> * Oliver Neukum <oliver@neukum.org> * Version 1.0.7 * - usb autosuspend support * - unplugging fixed * 2008-05-07 Tobias Lorenz <tobias.lorenz@gmx.net> * Version 1.0.8 * - hardware frequency seek support * - afc indication * - more safety checks, let si470x_get_freq return errno * - vidioc behavior corrected according to v4l2 spec * 2008-10-20 Alexey Klimov <klimov.linux@gmail.com> * - add support for KWorld USB FM Radio FM700 * - blacklisted KWorld radio in hid-core.c and hid-ids.h * 2008-12-03 Mark Lord <mlord@pobox.com> * - add support for DealExtreme USB Radio * 2009-01-31 Bob Ross <pigiron@gmx.com> * - correction of stereo detection/setting * - correction of signal strength indicator scaling * 2009-01-31 Rick Bronson <rick@efn.org> * Tobias Lorenz <tobias.lorenz@gmx.net> * - add LED status output * - get HW/SW version from scratchpad * 2009-06-16 Edouard Lafargue <edouard@lafargue.name> * Version 1.0.10 * - add support for interrupt mode for RDS endpoint, * instead of polling. * Improves RDS reception significantly */ /* kernel includes */ #include "radio-si470x.h" /************************************************************************** * Module Parameters **************************************************************************/ /* Spacing (kHz) */ /* 0: 200 kHz (USA, Australia) */ /* 1: 100 kHz (Europe, Japan) */ /* 2: 50 kHz */ static unsigned short space = 2; module_param(space, ushort, 0444); MODULE_PARM_DESC(space, "Spacing: 0=200kHz 1=100kHz *2=50kHz*"); /* Bottom of Band (MHz) */ /* 0: 87.5 - 108 MHz (USA, Europe)*/ /* 1: 76 - 108 MHz (Japan wide band) */ /* 2: 76 - 90 MHz (Japan) */ static unsigned short band = 1; module_param(band, ushort, 0444); MODULE_PARM_DESC(band, "Band: 0=87.5..108MHz *1=76..108MHz* 2=76..90MHz"); /* De-emphasis */ /* 0: 75 us (USA) */ /* 1: 50 us (Europe, Australia, Japan) */ static unsigned short de = 1; module_param(de, ushort, 0444); MODULE_PARM_DESC(de, "De-emphasis: 0=75us *1=50us*"); /* Tune timeout */ static unsigned int tune_timeout = 3000; module_param(tune_timeout, uint, 0644); MODULE_PARM_DESC(tune_timeout, "Tune timeout: *3000*"); /* Seek timeout */ static unsigned int seek_timeout = 5000; module_param(seek_timeout, uint, 0644); MODULE_PARM_DESC(seek_timeout, "Seek timeout: *5000*"); /************************************************************************** * Generic Functions **************************************************************************/ /* * si470x_set_chan - set the channel */ static int si470x_set_chan(struct si470x_device *radio, unsigned short chan) { int retval; unsigned long timeout; bool timed_out = 0; /* start tuning */ radio->registers[CHANNEL] &= ~CHANNEL_CHAN; radio->registers[CHANNEL] |= CHANNEL_TUNE | chan; retval = si470x_set_register(radio, CHANNEL); if (retval < 0) goto done; /* currently I2C driver only uses interrupt way to tune */ if (radio->stci_enabled) { INIT_COMPLETION(radio->completion); /* wait till tune operation has completed */ retval = wait_for_completion_timeout(&radio->completion, msecs_to_jiffies(tune_timeout)); if (!retval) timed_out = true; } else { /* wait till tune operation has completed */ timeout = jiffies + msecs_to_jiffies(tune_timeout); do { retval = si470x_get_register(radio, STATUSRSSI); if (retval < 0) goto stop; timed_out = time_after(jiffies, timeout); } while (((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0) && (!timed_out)); } if ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0) dev_warn(&radio->videodev->dev, "tune does not complete\n"); if (timed_out) dev_warn(&radio->videodev->dev, "tune timed out after %u ms\n", tune_timeout); stop: /* stop tuning */ radio->registers[CHANNEL] &= ~CHANNEL_TUNE; retval = si470x_set_register(radio, CHANNEL); done: return retval; } /* * si470x_get_freq - get the frequency */ static int si470x_get_freq(struct si470x_device *radio, unsigned int *freq) { unsigned int spacing, band_bottom; unsigned short chan; int retval; /* Spacing (kHz) */ switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_SPACE) >> 4) { /* 0: 200 kHz (USA, Australia) */ case 0: spacing = 0.200 * FREQ_MUL; break; /* 1: 100 kHz (Europe, Japan) */ case 1: spacing = 0.100 * FREQ_MUL; break; /* 2: 50 kHz */ default: spacing = 0.050 * FREQ_MUL; break; }; /* Bottom of Band (MHz) */ switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) { /* 0: 87.5 - 108 MHz (USA, Europe) */ case 0: band_bottom = 87.5 * FREQ_MUL; break; /* 1: 76 - 108 MHz (Japan wide band) */ default: band_bottom = 76 * FREQ_MUL; break; /* 2: 76 - 90 MHz (Japan) */ case 2: band_bottom = 76 * FREQ_MUL; break; }; /* read channel */ retval = si470x_get_register(radio, READCHAN); chan = radio->registers[READCHAN] & READCHAN_READCHAN; /* Frequency (MHz) = Spacing (kHz) x Channel + Bottom of Band (MHz) */ *freq = chan * spacing + band_bottom; return retval; } /* * si470x_set_freq - set the frequency */ int si470x_set_freq(struct si470x_device *radio, unsigned int freq) { unsigned int spacing, band_bottom; unsigned short chan; /* Spacing (kHz) */ switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_SPACE) >> 4) { /* 0: 200 kHz (USA, Australia) */ case 0: spacing = 0.200 * FREQ_MUL; break; /* 1: 100 kHz (Europe, Japan) */ case 1: spacing = 0.100 * FREQ_MUL; break; /* 2: 50 kHz */ default: spacing = 0.050 * FREQ_MUL; break; }; /* Bottom of Band (MHz) */ switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) { /* 0: 87.5 - 108 MHz (USA, Europe) */ case 0: band_bottom = 87.5 * FREQ_MUL; break; /* 1: 76 - 108 MHz (Japan wide band) */ default: band_bottom = 76 * FREQ_MUL; break; /* 2: 76 - 90 MHz (Japan) */ case 2: band_bottom = 76 * FREQ_MUL; break; }; /* Chan = [ Freq (Mhz) - Bottom of Band (MHz) ] / Spacing (kHz) */ chan = (freq - band_bottom) / spacing; return si470x_set_chan(radio, chan); } /* * si470x_set_seek - set seek */ static int si470x_set_seek(struct si470x_device *radio, unsigned int wrap_around, unsigned int seek_upward) { int retval = 0; unsigned long timeout; bool timed_out = 0; /* start seeking */ radio->registers[POWERCFG] |= POWERCFG_SEEK; if (wrap_around == 1) radio->registers[POWERCFG] &= ~POWERCFG_SKMODE; else radio->registers[POWERCFG] |= POWERCFG_SKMODE; if (seek_upward == 1) radio->registers[POWERCFG] |= POWERCFG_SEEKUP; else radio->registers[POWERCFG] &= ~POWERCFG_SEEKUP; retval = si470x_set_register(radio, POWERCFG); if (retval < 0) goto done; /* currently I2C driver only uses interrupt way to seek */ if (radio->stci_enabled) { INIT_COMPLETION(radio->completion); /* wait till seek operation has completed */ retval = wait_for_completion_timeout(&radio->completion, msecs_to_jiffies(seek_timeout)); if (!retval) timed_out = true; } else { /* wait till seek operation has completed */ timeout = jiffies + msecs_to_jiffies(seek_timeout); do { retval = si470x_get_register(radio, STATUSRSSI); if (retval < 0) goto stop; timed_out = time_after(jiffies, timeout); } while (((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0) && (!timed_out)); } if ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0) dev_warn(&radio->videodev->dev, "seek does not complete\n"); if (radio->registers[STATUSRSSI] & STATUSRSSI_SF) dev_warn(&radio->videodev->dev, "seek failed / band limit reached\n"); if (timed_out) dev_warn(&radio->videodev->dev, "seek timed out after %u ms\n", seek_timeout); stop: /* stop seeking */ radio->registers[POWERCFG] &= ~POWERCFG_SEEK; retval = si470x_set_register(radio, POWERCFG); done: /* try again, if timed out */ if ((retval == 0) && timed_out) retval = -EAGAIN; return retval; } /* * si470x_start - switch on radio */ int si470x_start(struct si470x_device *radio) { int retval; /* powercfg */ radio->registers[POWERCFG] = POWERCFG_DMUTE | POWERCFG_ENABLE | POWERCFG_RDSM; retval = si470x_set_register(radio, POWERCFG); if (retval < 0) goto done; /* sysconfig 1 */ radio->registers[SYSCONFIG1] = (de << 11) & SYSCONFIG1_DE; /* DE*/ retval = si470x_set_register(radio, SYSCONFIG1); if (retval < 0) goto done; /* sysconfig 2 */ radio->registers[SYSCONFIG2] = (0x3f << 8) | /* SEEKTH */ ((band << 6) & SYSCONFIG2_BAND) | /* BAND */ ((space << 4) & SYSCONFIG2_SPACE) | /* SPACE */ 15; /* VOLUME (max) */ retval = si470x_set_register(radio, SYSCONFIG2); if (retval < 0) goto done; /* reset last channel */ retval = si470x_set_chan(radio, radio->registers[CHANNEL] & CHANNEL_CHAN); done: return retval; } /* * si470x_stop - switch off radio */ int si470x_stop(struct si470x_device *radio) { int retval; /* sysconfig 1 */ radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_RDS; retval = si470x_set_register(radio, SYSCONFIG1); if (retval < 0) goto done; /* powercfg */ radio->registers[POWERCFG] &= ~POWERCFG_DMUTE; /* POWERCFG_ENABLE has to automatically go low */ radio->registers[POWERCFG] |= POWERCFG_ENABLE | POWERCFG_DISABLE; retval = si470x_set_register(radio, POWERCFG); done: return retval; } /* * si470x_rds_on - switch on rds reception */ static int si470x_rds_on(struct si470x_device *radio) { int retval; /* sysconfig 1 */ radio->registers[SYSCONFIG1] |= SYSCONFIG1_RDS; retval = si470x_set_register(radio, SYSCONFIG1); if (retval < 0) radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_RDS; return retval; } /************************************************************************** * File Operations Interface **************************************************************************/ /* * si470x_fops_read - read RDS data */ static ssize_t si470x_fops_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct si470x_device *radio = video_drvdata(file); int retval = 0; unsigned int block_count = 0; /* switch on rds reception */ mutex_lock(&radio->lock); if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0) si470x_rds_on(radio); /* block if no new data available */ while (radio->wr_index == radio->rd_index) { if (file->f_flags & O_NONBLOCK) { retval = -EWOULDBLOCK; goto done; } if (wait_event_interruptible(radio->read_queue, radio->wr_index != radio->rd_index) < 0) { retval = -EINTR; goto done; } } /* calculate block count from byte count */ count /= 3; /* copy RDS block out of internal buffer and to user buffer */ while (block_count < count) { if (radio->rd_index == radio->wr_index) break; /* always transfer rds complete blocks */ if (copy_to_user(buf, &radio->buffer[radio->rd_index], 3)) /* retval = -EFAULT; */ break; /* increment and wrap read pointer */ radio->rd_index += 3; if (radio->rd_index >= radio->buf_size) radio->rd_index = 0; /* increment counters */ block_count++; buf += 3; retval += 3; } done: mutex_unlock(&radio->lock); return retval; } /* * si470x_fops_poll - poll RDS data */ static unsigned int si470x_fops_poll(struct file *file, struct poll_table_struct *pts) { struct si470x_device *radio = video_drvdata(file); int retval = 0; /* switch on rds reception */ mutex_lock(&radio->lock); if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0) si470x_rds_on(radio); mutex_unlock(&radio->lock); poll_wait(file, &radio->read_queue, pts); if (radio->rd_index != radio->wr_index) retval = POLLIN | POLLRDNORM; return retval; } /* * si470x_fops - file operations interface */ static const struct v4l2_file_operations si470x_fops = { .owner = THIS_MODULE, .read = si470x_fops_read, .poll = si470x_fops_poll, .unlocked_ioctl = video_ioctl2, .open = si470x_fops_open, .release = si470x_fops_release, }; /************************************************************************** * Video4Linux Interface **************************************************************************/ /* * si470x_vidioc_queryctrl - enumerate control items */ static int si470x_vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { struct si470x_device *radio = video_drvdata(file); int retval = -EINVAL; /* abort if qc->id is below V4L2_CID_BASE */ if (qc->id < V4L2_CID_BASE) goto done; /* search video control */ switch (qc->id) { case V4L2_CID_AUDIO_VOLUME: return v4l2_ctrl_query_fill(qc, 0, 15, 1, 15); case V4L2_CID_AUDIO_MUTE: return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1); } /* disable unsupported base controls */ /* to satisfy kradio and such apps */ if ((retval == -EINVAL) && (qc->id < V4L2_CID_LASTP1)) { qc->flags = V4L2_CTRL_FLAG_DISABLED; retval = 0; } done: if (retval < 0) dev_warn(&radio->videodev->dev, "query controls failed with %d\n", retval); return retval; } /* * si470x_vidioc_g_ctrl - get the value of a control */ static int si470x_vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct si470x_device *radio = video_drvdata(file); int retval = 0; mutex_lock(&radio->lock); /* safety checks */ retval = si470x_disconnect_check(radio); if (retval) goto done; switch (ctrl->id) { case V4L2_CID_AUDIO_VOLUME: ctrl->value = radio->registers[SYSCONFIG2] & SYSCONFIG2_VOLUME; break; case V4L2_CID_AUDIO_MUTE: ctrl->value = ((radio->registers[POWERCFG] & POWERCFG_DMUTE) == 0) ? 1 : 0; break; default: retval = -EINVAL; } done: if (retval < 0) dev_warn(&radio->videodev->dev, "get control failed with %d\n", retval); mutex_unlock(&radio->lock); return retval; } /* * si470x_vidioc_s_ctrl - set the value of a control */ static int si470x_vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct si470x_device *radio = video_drvdata(file); int retval = 0; mutex_lock(&radio->lock); /* safety checks */ retval = si470x_disconnect_check(radio); if (retval) goto done; switch (ctrl->id) { case V4L2_CID_AUDIO_VOLUME: radio->registers[SYSCONFIG2] &= ~SYSCONFIG2_VOLUME; radio->registers[SYSCONFIG2] |= ctrl->value; retval = si470x_set_register(radio, SYSCONFIG2); break; case V4L2_CID_AUDIO_MUTE: if (ctrl->value == 1) radio->registers[POWERCFG] &= ~POWERCFG_DMUTE; else radio->registers[POWERCFG] |= POWERCFG_DMUTE; retval = si470x_set_register(radio, POWERCFG); break; default: retval = -EINVAL; } done: if (retval < 0) dev_warn(&radio->videodev->dev, "set control failed with %d\n", retval); mutex_unlock(&radio->lock); return retval; } /* * si470x_vidioc_g_audio - get audio attributes */ static int si470x_vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *audio) { /* driver constants */ audio->index = 0; strcpy(audio->name, "Radio"); audio->capability = V4L2_AUDCAP_STEREO; audio->mode = 0; return 0; } /* * si470x_vidioc_g_tuner - get tuner attributes */ static int si470x_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *tuner) { struct si470x_device *radio = video_drvdata(file); int retval = 0; mutex_lock(&radio->lock); /* safety checks */ retval = si470x_disconnect_check(radio); if (retval) goto done; if (tuner->index != 0) { retval = -EINVAL; goto done; } retval = si470x_get_register(radio, STATUSRSSI); if (retval < 0) goto done; /* driver constants */ strcpy(tuner->name, "FM"); tuner->type = V4L2_TUNER_RADIO; tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO; /* range limits */ switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) { /* 0: 87.5 - 108 MHz (USA, Europe, default) */ default: tuner->rangelow = 87.5 * FREQ_MUL; tuner->rangehigh = 108 * FREQ_MUL; break; /* 1: 76 - 108 MHz (Japan wide band) */ case 1: tuner->rangelow = 76 * FREQ_MUL; tuner->rangehigh = 108 * FREQ_MUL; break; /* 2: 76 - 90 MHz (Japan) */ case 2: tuner->rangelow = 76 * FREQ_MUL; tuner->rangehigh = 90 * FREQ_MUL; break; }; /* stereo indicator == stereo (instead of mono) */ if ((radio->registers[STATUSRSSI] & STATUSRSSI_ST) == 0) tuner->rxsubchans = V4L2_TUNER_SUB_MONO; else tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; /* If there is a reliable method of detecting an RDS channel, then this code should check for that before setting this RDS subchannel. */ tuner->rxsubchans |= V4L2_TUNER_SUB_RDS; /* mono/stereo selector */ if ((radio->registers[POWERCFG] & POWERCFG_MONO) == 0) tuner->audmode = V4L2_TUNER_MODE_STEREO; else tuner->audmode = V4L2_TUNER_MODE_MONO; /* min is worst, max is best; signal:0..0xffff; rssi: 0..0xff */ /* measured in units of dbµV in 1 db increments (max at ~75 dbµV) */ tuner->signal = (radio->registers[STATUSRSSI] & STATUSRSSI_RSSI); /* the ideal factor is 0xffff/75 = 873,8 */ tuner->signal = (tuner->signal * 873) + (8 * tuner->signal / 10); /* automatic frequency control: -1: freq to low, 1 freq to high */ /* AFCRL does only indicate that freq. differs, not if too low/high */ tuner->afc = (radio->registers[STATUSRSSI] & STATUSRSSI_AFCRL) ? 1 : 0; done: if (retval < 0) dev_warn(&radio->videodev->dev, "get tuner failed with %d\n", retval); mutex_unlock(&radio->lock); return retval; } /* * si470x_vidioc_s_tuner - set tuner attributes */ static int si470x_vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *tuner) { struct si470x_device *radio = video_drvdata(file); int retval = 0; mutex_lock(&radio->lock); /* safety checks */ retval = si470x_disconnect_check(radio); if (retval) goto done; if (tuner->index != 0) goto done; /* mono/stereo selector */ switch (tuner->audmode) { case V4L2_TUNER_MODE_MONO: radio->registers[POWERCFG] |= POWERCFG_MONO; /* force mono */ break; case V4L2_TUNER_MODE_STEREO: radio->registers[POWERCFG] &= ~POWERCFG_MONO; /* try stereo */ break; default: goto done; } retval = si470x_set_register(radio, POWERCFG); done: if (retval < 0) dev_warn(&radio->videodev->dev, "set tuner failed with %d\n", retval); mutex_unlock(&radio->lock); return retval; } /* * si470x_vidioc_g_frequency - get tuner or modulator radio frequency */ static int si470x_vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *freq) { struct si470x_device *radio = video_drvdata(file); int retval = 0; /* safety checks */ mutex_lock(&radio->lock); retval = si470x_disconnect_check(radio); if (retval) goto done; if (freq->tuner != 0) { retval = -EINVAL; goto done; } freq->type = V4L2_TUNER_RADIO; retval = si470x_get_freq(radio, &freq->frequency); done: if (retval < 0) dev_warn(&radio->videodev->dev, "get frequency failed with %d\n", retval); mutex_unlock(&radio->lock); return retval; } /* * si470x_vidioc_s_frequency - set tuner or modulator radio frequency */ static int si470x_vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *freq) { struct si470x_device *radio = video_drvdata(file); int retval = 0; mutex_lock(&radio->lock); /* safety checks */ retval = si470x_disconnect_check(radio); if (retval) goto done; if (freq->tuner != 0) { retval = -EINVAL; goto done; } retval = si470x_set_freq(radio, freq->frequency); done: if (retval < 0) dev_warn(&radio->videodev->dev, "set frequency failed with %d\n", retval); mutex_unlock(&radio->lock); return retval; } /* * si470x_vidioc_s_hw_freq_seek - set hardware frequency seek */ static int si470x_vidioc_s_hw_freq_seek(struct file *file, void *priv, struct v4l2_hw_freq_seek *seek) { struct si470x_device *radio = video_drvdata(file); int retval = 0; mutex_lock(&radio->lock); /* safety checks */ retval = si470x_disconnect_check(radio); if (retval) goto done; if (seek->tuner != 0) { retval = -EINVAL; goto done; } retval = si470x_set_seek(radio, seek->wrap_around, seek->seek_upward); done: if (retval < 0) dev_warn(&radio->videodev->dev, "set hardware frequency seek failed with %d\n", retval); mutex_unlock(&radio->lock); return retval; } /* * si470x_ioctl_ops - video device ioctl operations */ static const struct v4l2_ioctl_ops si470x_ioctl_ops = { .vidioc_querycap = si470x_vidioc_querycap, .vidioc_queryctrl = si470x_vidioc_queryctrl, .vidioc_g_ctrl = si470x_vidioc_g_ctrl, .vidioc_s_ctrl = si470x_vidioc_s_ctrl, .vidioc_g_audio = si470x_vidioc_g_audio, .vidioc_g_tuner = si470x_vidioc_g_tuner, .vidioc_s_tuner = si470x_vidioc_s_tuner, .vidioc_g_frequency = si470x_vidioc_g_frequency, .vidioc_s_frequency = si470x_vidioc_s_frequency, .vidioc_s_hw_freq_seek = si470x_vidioc_s_hw_freq_seek, }; /* * si470x_viddev_template - video device interface */ struct video_device si470x_viddev_template = { .fops = &si470x_fops, .name = DRIVER_NAME, .release = video_device_release, .ioctl_ops = &si470x_ioctl_ops, };
gpl-2.0
aditheking/G7102_MM_SWA_Opensource
drivers/media/dvb/dvb-usb/af9005-remote.c
9837
4317
/* DVB USB compliant Linux driver for the Afatech 9005 * USB1.1 DVB-T receiver. * * Standard remote decode function * * Copyright (C) 2007 Luca Olivetti (luca@ventoso.org) * * Thanks to Afatech who kindly provided information. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * see Documentation/dvb/README.dvb-usb for more information */ #include "af9005.h" /* debug */ static int dvb_usb_af9005_remote_debug; module_param_named(debug, dvb_usb_af9005_remote_debug, int, 0644); MODULE_PARM_DESC(debug, "enable (1) or disable (0) debug messages." DVB_USB_DEBUG_STATUS); #define deb_decode(args...) dprintk(dvb_usb_af9005_remote_debug,0x01,args) struct rc_map_table rc_map_af9005_table[] = { {0x01b7, KEY_POWER}, {0x01a7, KEY_VOLUMEUP}, {0x0187, KEY_CHANNELUP}, {0x017f, KEY_MUTE}, {0x01bf, KEY_VOLUMEDOWN}, {0x013f, KEY_CHANNELDOWN}, {0x01df, KEY_1}, {0x015f, KEY_2}, {0x019f, KEY_3}, {0x011f, KEY_4}, {0x01ef, KEY_5}, {0x016f, KEY_6}, {0x01af, KEY_7}, {0x0127, KEY_8}, {0x0107, KEY_9}, {0x01cf, KEY_ZOOM}, {0x014f, KEY_0}, {0x018f, KEY_GOTO}, /* marked jump on the remote */ {0x00bd, KEY_POWER}, {0x007d, KEY_VOLUMEUP}, {0x00fd, KEY_CHANNELUP}, {0x009d, KEY_MUTE}, {0x005d, KEY_VOLUMEDOWN}, {0x00dd, KEY_CHANNELDOWN}, {0x00ad, KEY_1}, {0x006d, KEY_2}, {0x00ed, KEY_3}, {0x008d, KEY_4}, {0x004d, KEY_5}, {0x00cd, KEY_6}, {0x00b5, KEY_7}, {0x0075, KEY_8}, {0x00f5, KEY_9}, {0x0095, KEY_ZOOM}, {0x0055, KEY_0}, {0x00d5, KEY_GOTO}, /* marked jump on the remote */ }; int rc_map_af9005_table_size = ARRAY_SIZE(rc_map_af9005_table); static int repeatable_keys[] = { KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_CHANNELUP, KEY_CHANNELDOWN }; int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len, u32 * event, int *state) { u16 mark, space; u32 result; u8 cust, dat, invdat; int i; if (len >= 6) { mark = (u16) (data[0] << 8) + data[1]; space = (u16) (data[2] << 8) + data[3]; if (space * 3 < mark) { for (i = 0; i < ARRAY_SIZE(repeatable_keys); i++) { if (d->last_event == repeatable_keys[i]) { *state = REMOTE_KEY_REPEAT; *event = d->last_event; deb_decode("repeat key, event %x\n", *event); return 0; } } deb_decode("repeated key ignored (non repeatable)\n"); return 0; } else if (len >= 33 * 4) { /*32 bits + start code */ result = 0; for (i = 4; i < 4 + 32 * 4; i += 4) { result <<= 1; mark = (u16) (data[i] << 8) + data[i + 1]; mark >>= 1; space = (u16) (data[i + 2] << 8) + data[i + 3]; space >>= 1; if (mark * 2 > space) result += 1; } deb_decode("key pressed, raw value %x\n", result); if ((result & 0xff000000) != 0xfe000000) { deb_decode ("doesn't start with 0xfe, ignored\n"); return 0; } cust = (result >> 16) & 0xff; dat = (result >> 8) & 0xff; invdat = (~result) & 0xff; if (dat != invdat) { deb_decode("code != inverted code\n"); return 0; } for (i = 0; i < rc_map_af9005_table_size; i++) { if (rc5_custom(&rc_map_af9005_table[i]) == cust && rc5_data(&rc_map_af9005_table[i]) == dat) { *event = rc_map_af9005_table[i].keycode; *state = REMOTE_KEY_PRESSED; deb_decode ("key pressed, event %x\n", *event); return 0; } } deb_decode("not found in table\n"); } } return 0; } EXPORT_SYMBOL(rc_map_af9005_table); EXPORT_SYMBOL(rc_map_af9005_table_size); EXPORT_SYMBOL(af9005_rc_decode); MODULE_AUTHOR("Luca Olivetti <luca@ventoso.org>"); MODULE_DESCRIPTION ("Standard remote control decoder for Afatech 9005 DVB-T USB1.1 stick"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/android_kernel_samsung_trelte
arch/powerpc/platforms/cell/ras.c
10093
7980
/* * Copyright 2006-2008, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #undef DEBUG #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/reboot.h> #include <linux/kexec.h> #include <linux/crash_dump.h> #include <asm/kexec.h> #include <asm/reg.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/rtas.h> #include <asm/cell-regs.h> #include "ras.h" static void dump_fir(int cpu) { struct cbe_pmd_regs __iomem *pregs = cbe_get_cpu_pmd_regs(cpu); struct cbe_iic_regs __iomem *iregs = cbe_get_cpu_iic_regs(cpu); if (pregs == NULL) return; /* Todo: do some nicer parsing of bits and based on them go down * to other sub-units FIRs and not only IIC */ printk(KERN_ERR "Global Checkstop FIR : 0x%016llx\n", in_be64(&pregs->checkstop_fir)); printk(KERN_ERR "Global Recoverable FIR : 0x%016llx\n", in_be64(&pregs->checkstop_fir)); printk(KERN_ERR "Global MachineCheck FIR : 0x%016llx\n", in_be64(&pregs->spec_att_mchk_fir)); if (iregs == NULL) return; printk(KERN_ERR "IOC FIR : 0x%016llx\n", in_be64(&iregs->ioc_fir)); } void cbe_system_error_exception(struct pt_regs *regs) { int cpu = smp_processor_id(); printk(KERN_ERR "System Error Interrupt on CPU %d !\n", cpu); dump_fir(cpu); dump_stack(); } void cbe_maintenance_exception(struct pt_regs *regs) { int cpu = smp_processor_id(); /* * Nothing implemented for the maintenance interrupt at this point */ printk(KERN_ERR "Unhandled Maintenance interrupt on CPU %d !\n", cpu); dump_stack(); } void cbe_thermal_exception(struct pt_regs *regs) { int cpu = smp_processor_id(); /* * Nothing implemented for the thermal interrupt at this point */ printk(KERN_ERR "Unhandled Thermal interrupt on CPU %d !\n", cpu); dump_stack(); } static int cbe_machine_check_handler(struct pt_regs *regs) { int cpu = smp_processor_id(); printk(KERN_ERR "Machine Check Interrupt on CPU %d !\n", cpu); dump_fir(cpu); /* No recovery from this code now, lets continue */ return 0; } struct ptcal_area { struct list_head list; int nid; int order; struct page *pages; }; static LIST_HEAD(ptcal_list); static int ptcal_start_tok, ptcal_stop_tok; static int __init cbe_ptcal_enable_on_node(int nid, int order) { struct ptcal_area *area; int ret = -ENOMEM; unsigned long addr; if (is_kdump_kernel()) rtas_call(ptcal_stop_tok, 1, 1, NULL, nid); area = kmalloc(sizeof(*area), GFP_KERNEL); if (!area) goto out_err; area->nid = nid; area->order = order; area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE, area->order); if (!area->pages) { printk(KERN_WARNING "%s: no page on node %d\n", __func__, area->nid); goto out_free_area; } /* * We move the ptcal area to the middle of the allocated * page, in order to avoid prefetches in memcpy and similar * functions stepping on it. */ addr = __pa(page_address(area->pages)) + (PAGE_SIZE >> 1); printk(KERN_DEBUG "%s: enabling PTCAL on node %d address=0x%016lx\n", __func__, area->nid, addr); ret = -EIO; if (rtas_call(ptcal_start_tok, 3, 1, NULL, area->nid, (unsigned int)(addr >> 32), (unsigned int)(addr & 0xffffffff))) { printk(KERN_ERR "%s: error enabling PTCAL on node %d!\n", __func__, nid); goto out_free_pages; } list_add(&area->list, &ptcal_list); return 0; out_free_pages: __free_pages(area->pages, area->order); out_free_area: kfree(area); out_err: return ret; } static int __init cbe_ptcal_enable(void) { const u32 *size; struct device_node *np; int order, found_mic = 0; np = of_find_node_by_path("/rtas"); if (!np) return -ENODEV; size = of_get_property(np, "ibm,cbe-ptcal-size", NULL); if (!size) { of_node_put(np); return -ENODEV; } pr_debug("%s: enabling PTCAL, size = 0x%x\n", __func__, *size); order = get_order(*size); of_node_put(np); /* support for malta device trees, with be@/mic@ nodes */ for_each_node_by_type(np, "mic-tm") { cbe_ptcal_enable_on_node(of_node_to_nid(np), order); found_mic = 1; } if (found_mic) return 0; /* support for older device tree - use cpu nodes */ for_each_node_by_type(np, "cpu") { const u32 *nid = of_get_property(np, "node-id", NULL); if (!nid) { printk(KERN_ERR "%s: node %s is missing node-id?\n", __func__, np->full_name); continue; } cbe_ptcal_enable_on_node(*nid, order); found_mic = 1; } return found_mic ? 0 : -ENODEV; } static int cbe_ptcal_disable(void) { struct ptcal_area *area, *tmp; int ret = 0; pr_debug("%s: disabling PTCAL\n", __func__); list_for_each_entry_safe(area, tmp, &ptcal_list, list) { /* disable ptcal on this node */ if (rtas_call(ptcal_stop_tok, 1, 1, NULL, area->nid)) { printk(KERN_ERR "%s: error disabling PTCAL " "on node %d!\n", __func__, area->nid); ret = -EIO; continue; } /* ensure we can access the PTCAL area */ memset(page_address(area->pages), 0, 1 << (area->order + PAGE_SHIFT)); /* clean up */ list_del(&area->list); __free_pages(area->pages, area->order); kfree(area); } return ret; } static int cbe_ptcal_notify_reboot(struct notifier_block *nb, unsigned long code, void *data) { return cbe_ptcal_disable(); } static void cbe_ptcal_crash_shutdown(void) { cbe_ptcal_disable(); } static struct notifier_block cbe_ptcal_reboot_notifier = { .notifier_call = cbe_ptcal_notify_reboot }; #ifdef CONFIG_PPC_IBM_CELL_RESETBUTTON static int sysreset_hack; static int __init cbe_sysreset_init(void) { struct cbe_pmd_regs __iomem *regs; sysreset_hack = of_machine_is_compatible("IBM,CBPLUS-1.0"); if (!sysreset_hack) return 0; regs = cbe_get_cpu_pmd_regs(0); if (!regs) return 0; /* Enable JTAG system-reset hack */ out_be32(&regs->fir_mode_reg, in_be32(&regs->fir_mode_reg) | CBE_PMD_FIR_MODE_M8); return 0; } device_initcall(cbe_sysreset_init); int cbe_sysreset_hack(void) { struct cbe_pmd_regs __iomem *regs; /* * The BMC can inject user triggered system reset exceptions, * but cannot set the system reset reason in srr1, * so check an extra register here. */ if (sysreset_hack && (smp_processor_id() == 0)) { regs = cbe_get_cpu_pmd_regs(0); if (!regs) return 0; if (in_be64(&regs->ras_esc_0) & 0x0000ffff) { out_be64(&regs->ras_esc_0, 0); return 0; } } return 1; } #endif /* CONFIG_PPC_IBM_CELL_RESETBUTTON */ int __init cbe_ptcal_init(void) { int ret; ptcal_start_tok = rtas_token("ibm,cbe-start-ptcal"); ptcal_stop_tok = rtas_token("ibm,cbe-stop-ptcal"); if (ptcal_start_tok == RTAS_UNKNOWN_SERVICE || ptcal_stop_tok == RTAS_UNKNOWN_SERVICE) return -ENODEV; ret = register_reboot_notifier(&cbe_ptcal_reboot_notifier); if (ret) goto out1; ret = crash_shutdown_register(&cbe_ptcal_crash_shutdown); if (ret) goto out2; return cbe_ptcal_enable(); out2: unregister_reboot_notifier(&cbe_ptcal_reboot_notifier); out1: printk(KERN_ERR "Can't disable PTCAL, so not enabling\n"); return ret; } arch_initcall(cbe_ptcal_init); void __init cbe_ras_init(void) { unsigned long hid0; /* * Enable System Error & thermal interrupts and wakeup conditions */ hid0 = mfspr(SPRN_HID0); hid0 |= HID0_CBE_THERM_INT_EN | HID0_CBE_THERM_WAKEUP | HID0_CBE_SYSERR_INT_EN | HID0_CBE_SYSERR_WAKEUP; mtspr(SPRN_HID0, hid0); mb(); /* * Install machine check handler. Leave setting of precise mode to * what the firmware did for now */ ppc_md.machine_check_exception = cbe_machine_check_handler; mb(); /* * For now, we assume that IOC_FIR is already set to forward some * error conditions to the System Error handler. If that is not true * then it will have to be fixed up here. */ }
gpl-2.0
samarthp/sam-tenderloin-kernel-3.4
lib/crc8.c
10349
2461
/* * Copyright (c) 2011 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/crc8.h> #include <linux/printk.h> /* * crc8_populate_msb - fill crc table for given polynomial in reverse bit order. * * table: table to be filled. * polynomial: polynomial for which table is to be filled. */ void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) { int i, j; const u8 msbit = 0x80; u8 t = msbit; table[0] = 0; for (i = 1; i < CRC8_TABLE_SIZE; i *= 2) { t = (t << 1) ^ (t & msbit ? polynomial : 0); for (j = 0; j < i; j++) table[i+j] = table[j] ^ t; } } EXPORT_SYMBOL(crc8_populate_msb); /* * crc8_populate_lsb - fill crc table for given polynomial in regular bit order. * * table: table to be filled. * polynomial: polynomial for which table is to be filled. */ void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) { int i, j; u8 t = 1; table[0] = 0; for (i = (CRC8_TABLE_SIZE >> 1); i; i >>= 1) { t = (t >> 1) ^ (t & 1 ? polynomial : 0); for (j = 0; j < CRC8_TABLE_SIZE; j += 2*i) table[i+j] = table[j] ^ t; } } EXPORT_SYMBOL(crc8_populate_lsb); /* * crc8 - calculate a crc8 over the given input data. * * table: crc table used for calculation. * pdata: pointer to data buffer. * nbytes: number of bytes in data buffer. * crc: previous returned crc8 value. */ u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc) { /* loop over the buffer data */ while (nbytes-- > 0) crc = table[(crc ^ *pdata++) & 0xff]; return crc; } EXPORT_SYMBOL(crc8); MODULE_DESCRIPTION("CRC8 (by Williams, Ross N.) function"); MODULE_AUTHOR("Broadcom Corporation"); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
jdkoreclipse/incrediblec_2.6.38
drivers/net/wireless/rtlwifi/core.c
110
28292
/****************************************************************************** * * Copyright(c) 2009-2010 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> *****************************************************************************/ #include "wifi.h" #include "core.h" #include "cam.h" #include "base.h" #include "ps.h" /*mutex for start & stop is must here. */ static int rtl_op_start(struct ieee80211_hw *hw) { int err = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); if (!is_hal_stop(rtlhal)) return 0; if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) return 0; mutex_lock(&rtlpriv->locks.conf_mutex); err = rtlpriv->intf_ops->adapter_start(hw); if (err) goto out; rtl_watch_dog_timer_callback((unsigned long)hw); out: mutex_unlock(&rtlpriv->locks.conf_mutex); return err; } static void rtl_op_stop(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); if (is_hal_stop(rtlhal)) return; if (unlikely(ppsc->rfpwr_state == ERFOFF)) { rtl_ips_nic_on(hw); mdelay(1); } mutex_lock(&rtlpriv->locks.conf_mutex); mac->link_state = MAC80211_NOLINK; memset(mac->bssid, 0, 6); /*reset sec info */ rtl_cam_reset_sec_info(hw); rtl_deinit_deferred_work(hw); rtlpriv->intf_ops->adapter_stop(hw); mutex_unlock(&rtlpriv->locks.conf_mutex); } static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON)) goto err_free; if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) goto err_free; rtlpriv->intf_ops->adapter_tx(hw, skb); return NETDEV_TX_OK; err_free: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } static int rtl_op_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); int err = 0; if (mac->vif) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("vif has been set!! mac->vif = 0x%p\n", mac->vif)); return -EOPNOTSUPP; } rtl_ips_nic_on(hw); mutex_lock(&rtlpriv->locks.conf_mutex); switch (vif->type) { case NL80211_IFTYPE_STATION: if (mac->beacon_enabled == 1) { RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("NL80211_IFTYPE_STATION\n")); mac->beacon_enabled = 0; rtlpriv->cfg->ops->update_interrupt_mask(hw, 0, rtlpriv->cfg->maps [RTL_IBSS_INT_MASKS]); } break; case NL80211_IFTYPE_ADHOC: RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("NL80211_IFTYPE_ADHOC\n")); mac->link_state = MAC80211_LINKED; rtlpriv->cfg->ops->set_bcn_reg(hw); break; case NL80211_IFTYPE_AP: RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("NL80211_IFTYPE_AP\n")); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("operation mode %d is not support!\n", vif->type)); err = -EOPNOTSUPP; goto out; } mac->vif = vif; mac->opmode = vif->type; rtlpriv->cfg->ops->set_network_type(hw, vif->type); memcpy(mac->mac_addr, vif->addr, ETH_ALEN); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr); out: mutex_unlock(&rtlpriv->locks.conf_mutex); return err; } static void rtl_op_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); mutex_lock(&rtlpriv->locks.conf_mutex); /* Free beacon resources */ if ((mac->opmode == NL80211_IFTYPE_AP) || (mac->opmode == NL80211_IFTYPE_ADHOC) || (mac->opmode == NL80211_IFTYPE_MESH_POINT)) { if (mac->beacon_enabled == 1) { mac->beacon_enabled = 0; rtlpriv->cfg->ops->update_interrupt_mask(hw, 0, rtlpriv->cfg->maps [RTL_IBSS_INT_MASKS]); } } /* *Note: We assume NL80211_IFTYPE_UNSPECIFIED as *NO LINK for our hardware. */ mac->vif = NULL; mac->link_state = MAC80211_NOLINK; memset(mac->bssid, 0, 6); mac->opmode = NL80211_IFTYPE_UNSPECIFIED; rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); mutex_unlock(&rtlpriv->locks.conf_mutex); } static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct ieee80211_conf *conf = &hw->conf; mutex_lock(&rtlpriv->locks.conf_mutex); if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /*BIT(2)*/ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n")); } /*For IPS */ if (changed & IEEE80211_CONF_CHANGE_IDLE) { if (hw->conf.flags & IEEE80211_CONF_IDLE) rtl_ips_nic_off(hw); else rtl_ips_nic_on(hw); } else { /* *although rfoff may not cause by ips, but we will *check the reason in set_rf_power_state function */ if (unlikely(ppsc->rfpwr_state == ERFOFF)) rtl_ips_nic_on(hw); } /*For LPS */ if (changed & IEEE80211_CONF_CHANGE_PS) { if (conf->flags & IEEE80211_CONF_PS) rtl_lps_enter(hw); else rtl_lps_leave(hw); } if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n", hw->conf.long_frame_max_tx_count)); mac->retry_long = hw->conf.long_frame_max_tx_count; mac->retry_short = hw->conf.long_frame_max_tx_count; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT, (u8 *) (&hw->conf. long_frame_max_tx_count)); } if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { struct ieee80211_channel *channel = hw->conf.channel; u8 wide_chan = (u8) channel->hw_value; /* *because we should back channel to *current_network.chan in in scanning, *So if set_chan == current_network.chan *we should set it. *because mac80211 tell us wrong bw40 *info for cisco1253 bw20, so we modify *it here based on UPPER & LOWER */ switch (hw->conf.channel_type) { case NL80211_CHAN_HT20: case NL80211_CHAN_NO_HT: /* SC */ mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_DONT_CARE; rtlphy->current_chan_bw = HT_CHANNEL_WIDTH_20; mac->bw_40 = false; break; case NL80211_CHAN_HT40MINUS: /* SC */ mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_UPPER; rtlphy->current_chan_bw = HT_CHANNEL_WIDTH_20_40; mac->bw_40 = true; /*wide channel */ wide_chan -= 2; break; case NL80211_CHAN_HT40PLUS: /* SC */ mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_LOWER; rtlphy->current_chan_bw = HT_CHANNEL_WIDTH_20_40; mac->bw_40 = true; /*wide channel */ wide_chan += 2; break; default: mac->bw_40 = false; RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case not processed\n")); break; } if (wide_chan <= 0) wide_chan = 1; rtlphy->current_channel = wide_chan; rtlpriv->cfg->ops->set_channel_access(hw); rtlpriv->cfg->ops->switch_channel(hw); rtlpriv->cfg->ops->set_bw_mode(hw, hw->conf.channel_type); } mutex_unlock(&rtlpriv->locks.conf_mutex); return 0; } static void rtl_op_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *new_flags, u64 multicast) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); *new_flags &= RTL_SUPPORTED_FILTERS; if (!changed_flags) return; /*TODO: we disable broadcase now, so enable here */ if (changed_flags & FIF_ALLMULTI) { if (*new_flags & FIF_ALLMULTI) { mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] | rtlpriv->cfg->maps[MAC_RCR_AB]; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("Enable receive multicast frame.\n")); } else { mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] | rtlpriv->cfg->maps[MAC_RCR_AB]); RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("Disable receive multicast frame.\n")); } } if (changed_flags & FIF_FCSFAIL) { if (*new_flags & FIF_FCSFAIL) { mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32]; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("Enable receive FCS error frame.\n")); } else { mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32]; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("Disable receive FCS error frame.\n")); } } if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { /* *TODO: BIT(5) is probe response BIT(8) is beacon *TODO: Use define for BIT(5) and BIT(8) */ if (*new_flags & FIF_BCN_PRBRESP_PROMISC) mac->rx_mgt_filter |= (BIT(5) | BIT(8)); else mac->rx_mgt_filter &= ~(BIT(5) | BIT(8)); } if (changed_flags & FIF_CONTROL) { if (*new_flags & FIF_CONTROL) { mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF]; mac->rx_ctrl_filter |= RTL_SUPPORTED_CTRL_FILTER; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("Enable receive control frame.\n")); } else { mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF]; mac->rx_ctrl_filter &= ~RTL_SUPPORTED_CTRL_FILTER; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("Disable receive control frame.\n")); } } if (changed_flags & FIF_OTHER_BSS) { if (*new_flags & FIF_OTHER_BSS) { mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP]; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("Enable receive other BSS's frame.\n")); } else { mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP]; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("Disable receive other BSS's frame.\n")); } } rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MGT_FILTER, (u8 *) (&mac->rx_mgt_filter)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CTRL_FILTER, (u8 *) (&mac->rx_ctrl_filter)); } static int _rtl_get_hal_qnum(u16 queue) { int qnum; switch (queue) { case 0: qnum = AC3_VO; break; case 1: qnum = AC2_VI; break; case 2: qnum = AC0_BE; break; case 3: qnum = AC1_BK; break; default: qnum = AC0_BE; break; } return qnum; } /* *for mac80211 VO=0, VI=1, BE=2, BK=3 *for rtl819x BE=0, BK=1, VI=2, VO=3 */ static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue, const struct ieee80211_tx_queue_params *param) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); int aci; if (queue >= AC_MAX) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("queue number %d is incorrect!\n", queue)); return -EINVAL; } aci = _rtl_get_hal_qnum(queue); mac->ac[aci].aifs = param->aifs; mac->ac[aci].cw_min = param->cw_min; mac->ac[aci].cw_max = param->cw_max; mac->ac[aci].tx_op = param->txop; memcpy(&mac->edca_param[aci], param, sizeof(*param)); rtlpriv->cfg->ops->set_qos(hw, aci); return 0; } static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changed) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); mutex_lock(&rtlpriv->locks.conf_mutex); if ((vif->type == NL80211_IFTYPE_ADHOC) || (vif->type == NL80211_IFTYPE_AP) || (vif->type == NL80211_IFTYPE_MESH_POINT)) { if ((changed & BSS_CHANGED_BEACON) || (changed & BSS_CHANGED_BEACON_ENABLED && bss_conf->enable_beacon)) { if (mac->beacon_enabled == 0) { RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, ("BSS_CHANGED_BEACON_ENABLED\n")); /*start hw beacon interrupt. */ /*rtlpriv->cfg->ops->set_bcn_reg(hw); */ mac->beacon_enabled = 1; rtlpriv->cfg->ops->update_interrupt_mask(hw, rtlpriv->cfg->maps [RTL_IBSS_INT_MASKS], 0); } } else { if (mac->beacon_enabled == 1) { RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, ("ADHOC DISABLE BEACON\n")); mac->beacon_enabled = 0; rtlpriv->cfg->ops->update_interrupt_mask(hw, 0, rtlpriv->cfg->maps [RTL_IBSS_INT_MASKS]); } } if (changed & BSS_CHANGED_BEACON_INT) { RT_TRACE(rtlpriv, COMP_BEACON, DBG_TRACE, ("BSS_CHANGED_BEACON_INT\n")); mac->beacon_interval = bss_conf->beacon_int; rtlpriv->cfg->ops->set_bcn_intv(hw); } } /*TODO: reference to enum ieee80211_bss_change */ if (changed & BSS_CHANGED_ASSOC) { if (bss_conf->assoc) { mac->link_state = MAC80211_LINKED; mac->cnt_after_linked = 0; mac->assoc_id = bss_conf->aid; memcpy(mac->bssid, bss_conf->bssid, 6); RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, ("BSS_CHANGED_ASSOC\n")); } else { if (mac->link_state == MAC80211_LINKED) rtl_lps_leave(hw); mac->link_state = MAC80211_NOLINK; memset(mac->bssid, 0, 6); /* reset sec info */ rtl_cam_reset_sec_info(hw); RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, ("BSS_CHANGED_UN_ASSOC\n")); } } if (changed & BSS_CHANGED_ERP_CTS_PROT) { RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, ("BSS_CHANGED_ERP_CTS_PROT\n")); mac->use_cts_protect = bss_conf->use_cts_prot; } if (changed & BSS_CHANGED_ERP_PREAMBLE) { RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n", bss_conf->use_short_preamble)); mac->short_preamble = bss_conf->use_short_preamble; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE, (u8 *) (&mac->short_preamble)); } if (changed & BSS_CHANGED_ERP_SLOT) { RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, ("BSS_CHANGED_ERP_SLOT\n")); if (bss_conf->use_short_slot) mac->slot_time = RTL_SLOT_TIME_9; else mac->slot_time = RTL_SLOT_TIME_20; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, (u8 *) (&mac->slot_time)); } if (changed & BSS_CHANGED_HT) { struct ieee80211_sta *sta = NULL; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, ("BSS_CHANGED_HT\n")); sta = ieee80211_find_sta(mac->vif, mac->bssid); if (sta) { if (sta->ht_cap.ampdu_density > mac->current_ampdu_density) mac->current_ampdu_density = sta->ht_cap.ampdu_density; if (sta->ht_cap.ampdu_factor < mac->current_ampdu_factor) mac->current_ampdu_factor = sta->ht_cap.ampdu_factor; } rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY, (u8 *) (&mac->max_mss_density)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR, &mac->current_ampdu_factor); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE, &mac->current_ampdu_density); } if (changed & BSS_CHANGED_BSSID) { struct ieee80211_sta *sta = NULL; u32 basic_rates; u8 i; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID, (u8 *) bss_conf->bssid); RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, (MAC_FMT "\n", MAC_ARG(bss_conf->bssid))); memcpy(mac->bssid, bss_conf->bssid, 6); if (is_valid_ether_addr(bss_conf->bssid)) { switch (vif->type) { case NL80211_IFTYPE_UNSPECIFIED: break; case NL80211_IFTYPE_ADHOC: break; case NL80211_IFTYPE_STATION: break; case NL80211_IFTYPE_AP: break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case not process\n")); break; } rtlpriv->cfg->ops->set_network_type(hw, vif->type); } else rtlpriv->cfg->ops->set_network_type(hw, NL80211_IFTYPE_UNSPECIFIED); memset(mac->mcs, 0, 16); mac->ht_enable = false; mac->sgi_40 = false; mac->sgi_20 = false; if (!bss_conf->use_short_slot) mac->mode = WIRELESS_MODE_B; else mac->mode = WIRELESS_MODE_G; sta = ieee80211_find_sta(mac->vif, mac->bssid); if (sta) { if (sta->ht_cap.ht_supported) { mac->mode = WIRELESS_MODE_N_24G; mac->ht_enable = true; } if (mac->ht_enable) { u16 ht_cap = sta->ht_cap.cap; memcpy(mac->mcs, (u8 *) (&sta->ht_cap.mcs), 16); for (i = 0; i < 16; i++) RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("%x ", mac->mcs[i])); RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("\n")); if (ht_cap & IEEE80211_HT_CAP_SGI_40) mac->sgi_40 = true; if (ht_cap & IEEE80211_HT_CAP_SGI_20) mac->sgi_20 = true; /* * for cisco 1252 bw20 it's wrong * if (ht_cap & * IEEE80211_HT_CAP_SUP_WIDTH_20_40) { * mac->bw_40 = true; * } */ } } /*mac80211 just give us CCK rates any time *So we add G rate in basic rates when not in B mode*/ if (changed & BSS_CHANGED_BASIC_RATES) { if (mac->mode == WIRELESS_MODE_B) basic_rates = bss_conf->basic_rates | 0x00f; else basic_rates = bss_conf->basic_rates | 0xff0; if (!vif) goto out; mac->basic_rates = basic_rates; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, (u8 *) (&basic_rates)); if (rtlpriv->dm.b_useramask) rtlpriv->cfg->ops->update_rate_mask(hw, 0); else rtlpriv->cfg->ops->update_rate_table(hw); } } /* * For FW LPS: * To tell firmware we have connected * to an AP. For 92SE/CE power save v2. */ if (changed & BSS_CHANGED_ASSOC) { if (bss_conf->assoc) { if (ppsc->b_fwctrl_lps) { u8 mstatus = RT_MEDIA_CONNECT; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *) (&mstatus)); ppsc->report_linked = true; } } else { if (ppsc->b_fwctrl_lps) { u8 mstatus = RT_MEDIA_DISCONNECT; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus)); ppsc->report_linked = false; } } } out: mutex_unlock(&rtlpriv->locks.conf_mutex); } static u64 rtl_op_get_tsf(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u64 tsf; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&tsf)); return tsf; } static void rtl_op_set_tsf(struct ieee80211_hw *hw, u64 tsf) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;; mac->tsf = tsf; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&bibss)); } static void rtl_op_reset_tsf(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmp = 0; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, (u8 *) (&tmp)); } static void rtl_op_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { switch (cmd) { case STA_NOTIFY_SLEEP: break; case STA_NOTIFY_AWAKE: break; default: break; } } static int rtl_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, u16 tid, u16 * ssn) { struct rtl_priv *rtlpriv = rtl_priv(hw); switch (action) { case IEEE80211_AMPDU_TX_START: RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, ("IEEE80211_AMPDU_TX_START: TID:%d\n", tid)); return rtl_tx_agg_start(hw, sta->addr, tid, ssn); break; case IEEE80211_AMPDU_TX_STOP: RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, ("IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid)); return rtl_tx_agg_stop(hw, sta->addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, ("IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid)); break; case IEEE80211_AMPDU_RX_START: RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, ("IEEE80211_AMPDU_RX_START:TID:%d\n", tid)); break; case IEEE80211_AMPDU_RX_STOP: RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, ("IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid)); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("IEEE80211_AMPDU_ERR!!!!:\n")); return -EOPNOTSUPP; } return 0; } static void rtl_op_sw_scan_start(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); mac->act_scanning = true; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("\n")); if (mac->link_state == MAC80211_LINKED) { rtl_lps_leave(hw); mac->link_state = MAC80211_LINKED_SCANNING; } else rtl_ips_nic_on(hw); rtlpriv->cfg->ops->led_control(hw, LED_CTL_SITE_SURVEY); rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP); } static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("\n")); rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE); mac->act_scanning = false; if (mac->link_state == MAC80211_LINKED_SCANNING) { mac->link_state = MAC80211_LINKED; /* fix fwlps issue */ rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); if (rtlpriv->dm.b_useramask) rtlpriv->cfg->ops->update_rate_mask(hw, 0); else rtlpriv->cfg->ops->update_rate_table(hw); } } static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); u8 key_type = NO_ENCRYPTION; u8 key_idx; bool group_key = false; bool wep_only = false; int err = 0; u8 mac_addr[ETH_ALEN]; u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u8 zero_addr[ETH_ALEN] = { 0 }; if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("not open hw encryption\n")); return -ENOSPC; /*User disabled HW-crypto */ } RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("%s hardware based encryption for keyidx: %d, mac: %pM\n", cmd == SET_KEY ? "Using" : "Disabling", key->keyidx, sta ? sta->addr : bcast_addr)); rtlpriv->sec.being_setkey = true; rtl_ips_nic_on(hw); mutex_lock(&rtlpriv->locks.conf_mutex); /* <1> get encryption alg */ switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: key_type = WEP40_ENCRYPTION; RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:WEP40\n")); rtlpriv->sec.use_defaultkey = true; break; case WLAN_CIPHER_SUITE_WEP104: RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:WEP104\n")); key_type = WEP104_ENCRYPTION; rtlpriv->sec.use_defaultkey = true; break; case WLAN_CIPHER_SUITE_TKIP: key_type = TKIP_ENCRYPTION; RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:TKIP\n")); if (mac->opmode == NL80211_IFTYPE_ADHOC) rtlpriv->sec.use_defaultkey = true; break; case WLAN_CIPHER_SUITE_CCMP: key_type = AESCCMP_ENCRYPTION; RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:CCMP\n")); if (mac->opmode == NL80211_IFTYPE_ADHOC) rtlpriv->sec.use_defaultkey = true; break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("alg_err:%x!!!!:\n", key->cipher)); goto out_unlock; } /* <2> get key_idx */ key_idx = (u8) (key->keyidx); if (key_idx > 3) goto out_unlock; /* <3> if pairwise key enable_hw_sec */ group_key = !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE); if ((!group_key) || (mac->opmode == NL80211_IFTYPE_ADHOC) || rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) { if (rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION && (key_type == WEP40_ENCRYPTION || key_type == WEP104_ENCRYPTION)) wep_only = true; rtlpriv->sec.pairwise_enc_algorithm = key_type; rtlpriv->cfg->ops->enable_hw_sec(hw); } /* <4> set key based on cmd */ switch (cmd) { case SET_KEY: if (wep_only) { RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("set WEP(group/pairwise) key\n")); /* Pairwise key with an assigned MAC address. */ rtlpriv->sec.pairwise_enc_algorithm = key_type; rtlpriv->sec.group_enc_algorithm = key_type; /*set local buf about wep key. */ memcpy(rtlpriv->sec.key_buf[key_idx], key->key, key->keylen); rtlpriv->sec.key_len[key_idx] = key->keylen; memcpy(mac_addr, zero_addr, ETH_ALEN); } else if (group_key) { /* group key */ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("set group key\n")); /* group key */ rtlpriv->sec.group_enc_algorithm = key_type; /*set local buf about group key. */ memcpy(rtlpriv->sec.key_buf[key_idx], key->key, key->keylen); rtlpriv->sec.key_len[key_idx] = key->keylen; memcpy(mac_addr, bcast_addr, ETH_ALEN); } else { /* pairwise key */ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("set pairwise key\n")); if (!sta) { RT_ASSERT(false, ("pairwise key withnot" "mac_addr\n")); err = -EOPNOTSUPP; goto out_unlock; } /* Pairwise key with an assigned MAC address. */ rtlpriv->sec.pairwise_enc_algorithm = key_type; /*set local buf about pairwise key. */ memcpy(rtlpriv->sec.key_buf[PAIRWISE_KEYIDX], key->key, key->keylen); rtlpriv->sec.key_len[PAIRWISE_KEYIDX] = key->keylen; rtlpriv->sec.pairwise_key = rtlpriv->sec.key_buf[PAIRWISE_KEYIDX]; memcpy(mac_addr, sta->addr, ETH_ALEN); } rtlpriv->cfg->ops->set_key(hw, key_idx, mac_addr, group_key, key_type, wep_only, false); /* <5> tell mac80211 do something: */ /*must use sw generate IV, or can not work !!!!. */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; key->hw_key_idx = key_idx; if (key_type == TKIP_ENCRYPTION) key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; break; case DISABLE_KEY: RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("disable key delete one entry\n")); /*set local buf about wep key. */ memset(rtlpriv->sec.key_buf[key_idx], 0, key->keylen); rtlpriv->sec.key_len[key_idx] = 0; memcpy(mac_addr, zero_addr, ETH_ALEN); /* *mac80211 will delete entrys one by one, *so don't use rtl_cam_reset_all_entry *or clear all entry here. */ rtl_cam_delete_one_entry(hw, mac_addr, key_idx); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("cmd_err:%x!!!!:\n", cmd)); } out_unlock: mutex_unlock(&rtlpriv->locks.conf_mutex); rtlpriv->sec.being_setkey = false; return err; } static void rtl_op_rfkill_poll(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); bool radio_state; bool blocked; u8 valid = 0; if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) return; mutex_lock(&rtlpriv->locks.conf_mutex); /*if Radio On return true here */ radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid); if (valid) { if (unlikely(radio_state != rtlpriv->rfkill.rfkill_state)) { rtlpriv->rfkill.rfkill_state = radio_state; RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, (KERN_INFO "wireless radio switch turned %s\n", radio_state ? "on" : "off")); blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1; wiphy_rfkill_set_hw_state(hw->wiphy, blocked); } } mutex_unlock(&rtlpriv->locks.conf_mutex); } const struct ieee80211_ops rtl_ops = { .start = rtl_op_start, .stop = rtl_op_stop, .tx = rtl_op_tx, .add_interface = rtl_op_add_interface, .remove_interface = rtl_op_remove_interface, .config = rtl_op_config, .configure_filter = rtl_op_configure_filter, .set_key = rtl_op_set_key, .conf_tx = rtl_op_conf_tx, .bss_info_changed = rtl_op_bss_info_changed, .get_tsf = rtl_op_get_tsf, .set_tsf = rtl_op_set_tsf, .reset_tsf = rtl_op_reset_tsf, .sta_notify = rtl_op_sta_notify, .ampdu_action = rtl_op_ampdu_action, .sw_scan_start = rtl_op_sw_scan_start, .sw_scan_complete = rtl_op_sw_scan_complete, .rfkill_poll = rtl_op_rfkill_poll, };
gpl-2.0
AndroidOpenSourceXperia/android_kernel_sony_u8500
arch/arm/mach-mx5/board-mx53_loco.c
110
7870
/* * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved. */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/init.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio.h> #include <mach/common.h> #include <mach/hardware.h> #include <mach/iomux-mx53.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include "crm_regs.h" #include "devices-imx53.h" #define MX53_LOCO_POWER IMX_GPIO_NR(1, 8) #define MX53_LOCO_UI1 IMX_GPIO_NR(2, 14) #define MX53_LOCO_UI2 IMX_GPIO_NR(2, 15) #define LOCO_FEC_PHY_RST IMX_GPIO_NR(7, 6) static iomux_v3_cfg_t mx53_loco_pads[] = { /* FEC */ MX53_PAD_FEC_MDC__FEC_MDC, MX53_PAD_FEC_MDIO__FEC_MDIO, MX53_PAD_FEC_REF_CLK__FEC_TX_CLK, MX53_PAD_FEC_RX_ER__FEC_RX_ER, MX53_PAD_FEC_CRS_DV__FEC_RX_DV, MX53_PAD_FEC_RXD1__FEC_RDATA_1, MX53_PAD_FEC_RXD0__FEC_RDATA_0, MX53_PAD_FEC_TX_EN__FEC_TX_EN, MX53_PAD_FEC_TXD1__FEC_TDATA_1, MX53_PAD_FEC_TXD0__FEC_TDATA_0, /* FEC_nRST */ MX53_PAD_PATA_DA_0__GPIO7_6, /* FEC_nINT */ MX53_PAD_PATA_DATA4__GPIO2_4, /* AUDMUX5 */ MX53_PAD_KEY_COL0__AUDMUX_AUD5_TXC, MX53_PAD_KEY_ROW0__AUDMUX_AUD5_TXD, MX53_PAD_KEY_COL1__AUDMUX_AUD5_TXFS, MX53_PAD_KEY_ROW1__AUDMUX_AUD5_RXD, /* I2C2 */ MX53_PAD_KEY_COL3__I2C2_SCL, MX53_PAD_KEY_ROW3__I2C2_SDA, /* SD1 */ MX53_PAD_SD1_CMD__ESDHC1_CMD, MX53_PAD_SD1_CLK__ESDHC1_CLK, MX53_PAD_SD1_DATA0__ESDHC1_DAT0, MX53_PAD_SD1_DATA1__ESDHC1_DAT1, MX53_PAD_SD1_DATA2__ESDHC1_DAT2, MX53_PAD_SD1_DATA3__ESDHC1_DAT3, /* SD3 */ MX53_PAD_PATA_DATA8__ESDHC3_DAT0, MX53_PAD_PATA_DATA9__ESDHC3_DAT1, MX53_PAD_PATA_DATA10__ESDHC3_DAT2, MX53_PAD_PATA_DATA11__ESDHC3_DAT3, MX53_PAD_PATA_DATA0__ESDHC3_DAT4, MX53_PAD_PATA_DATA1__ESDHC3_DAT5, MX53_PAD_PATA_DATA2__ESDHC3_DAT6, MX53_PAD_PATA_DATA3__ESDHC3_DAT7, MX53_PAD_PATA_IORDY__ESDHC3_CLK, MX53_PAD_PATA_RESET_B__ESDHC3_CMD, /* SD3_CD */ MX53_PAD_EIM_DA11__GPIO3_11, /* SD3_WP */ MX53_PAD_EIM_DA12__GPIO3_12, /* VGA */ MX53_PAD_EIM_OE__IPU_DI1_PIN7, MX53_PAD_EIM_RW__IPU_DI1_PIN8, /* DISPLB */ MX53_PAD_EIM_D20__IPU_SER_DISP0_CS, MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK, MX53_PAD_EIM_D22__IPU_DISPB0_SER_DIN, MX53_PAD_EIM_D23__IPU_DI0_D0_CS, /* DISP0_POWER_EN */ MX53_PAD_EIM_D24__GPIO3_24, /* DISP0 DET INT */ MX53_PAD_EIM_D31__GPIO3_31, /* LVDS */ MX53_PAD_LVDS0_TX3_P__LDB_LVDS0_TX3, MX53_PAD_LVDS0_CLK_P__LDB_LVDS0_CLK, MX53_PAD_LVDS0_TX2_P__LDB_LVDS0_TX2, MX53_PAD_LVDS0_TX1_P__LDB_LVDS0_TX1, MX53_PAD_LVDS0_TX0_P__LDB_LVDS0_TX0, MX53_PAD_LVDS1_TX3_P__LDB_LVDS1_TX3, MX53_PAD_LVDS1_TX2_P__LDB_LVDS1_TX2, MX53_PAD_LVDS1_CLK_P__LDB_LVDS1_CLK, MX53_PAD_LVDS1_TX1_P__LDB_LVDS1_TX1, MX53_PAD_LVDS1_TX0_P__LDB_LVDS1_TX0, /* I2C1 */ MX53_PAD_CSI0_DAT8__I2C1_SDA, MX53_PAD_CSI0_DAT9__I2C1_SCL, /* UART1 */ MX53_PAD_CSI0_DAT10__UART1_TXD_MUX, MX53_PAD_CSI0_DAT11__UART1_RXD_MUX, /* CSI0 */ MX53_PAD_CSI0_DAT12__IPU_CSI0_D_12, MX53_PAD_CSI0_DAT13__IPU_CSI0_D_13, MX53_PAD_CSI0_DAT14__IPU_CSI0_D_14, MX53_PAD_CSI0_DAT15__IPU_CSI0_D_15, MX53_PAD_CSI0_DAT16__IPU_CSI0_D_16, MX53_PAD_CSI0_DAT17__IPU_CSI0_D_17, MX53_PAD_CSI0_DAT18__IPU_CSI0_D_18, MX53_PAD_CSI0_DAT19__IPU_CSI0_D_19, MX53_PAD_CSI0_VSYNC__IPU_CSI0_VSYNC, MX53_PAD_CSI0_MCLK__IPU_CSI0_HSYNC, MX53_PAD_CSI0_PIXCLK__IPU_CSI0_PIXCLK, /* DISPLAY */ MX53_PAD_DI0_DISP_CLK__IPU_DI0_DISP_CLK, MX53_PAD_DI0_PIN15__IPU_DI0_PIN15, MX53_PAD_DI0_PIN2__IPU_DI0_PIN2, MX53_PAD_DI0_PIN3__IPU_DI0_PIN3, MX53_PAD_DISP0_DAT0__IPU_DISP0_DAT_0, MX53_PAD_DISP0_DAT1__IPU_DISP0_DAT_1, MX53_PAD_DISP0_DAT2__IPU_DISP0_DAT_2, MX53_PAD_DISP0_DAT3__IPU_DISP0_DAT_3, MX53_PAD_DISP0_DAT4__IPU_DISP0_DAT_4, MX53_PAD_DISP0_DAT5__IPU_DISP0_DAT_5, MX53_PAD_DISP0_DAT6__IPU_DISP0_DAT_6, MX53_PAD_DISP0_DAT7__IPU_DISP0_DAT_7, MX53_PAD_DISP0_DAT8__IPU_DISP0_DAT_8, MX53_PAD_DISP0_DAT9__IPU_DISP0_DAT_9, MX53_PAD_DISP0_DAT10__IPU_DISP0_DAT_10, MX53_PAD_DISP0_DAT11__IPU_DISP0_DAT_11, MX53_PAD_DISP0_DAT12__IPU_DISP0_DAT_12, MX53_PAD_DISP0_DAT13__IPU_DISP0_DAT_13, MX53_PAD_DISP0_DAT14__IPU_DISP0_DAT_14, MX53_PAD_DISP0_DAT15__IPU_DISP0_DAT_15, MX53_PAD_DISP0_DAT16__IPU_DISP0_DAT_16, MX53_PAD_DISP0_DAT17__IPU_DISP0_DAT_17, MX53_PAD_DISP0_DAT18__IPU_DISP0_DAT_18, MX53_PAD_DISP0_DAT19__IPU_DISP0_DAT_19, MX53_PAD_DISP0_DAT20__IPU_DISP0_DAT_20, MX53_PAD_DISP0_DAT21__IPU_DISP0_DAT_21, MX53_PAD_DISP0_DAT22__IPU_DISP0_DAT_22, MX53_PAD_DISP0_DAT23__IPU_DISP0_DAT_23, /* Audio CLK*/ MX53_PAD_GPIO_0__CCM_SSI_EXT1_CLK, /* PWM */ MX53_PAD_GPIO_1__PWM2_PWMO, /* SPDIF */ MX53_PAD_GPIO_7__SPDIF_PLOCK, MX53_PAD_GPIO_17__SPDIF_OUT1, /* GPIO */ MX53_PAD_PATA_DA_1__GPIO7_7, MX53_PAD_PATA_DA_2__GPIO7_8, MX53_PAD_PATA_DATA5__GPIO2_5, MX53_PAD_PATA_DATA6__GPIO2_6, MX53_PAD_PATA_DATA14__GPIO2_14, MX53_PAD_PATA_DATA15__GPIO2_15, MX53_PAD_PATA_INTRQ__GPIO7_2, MX53_PAD_EIM_WAIT__GPIO5_0, MX53_PAD_NANDF_WP_B__GPIO6_9, MX53_PAD_NANDF_RB0__GPIO6_10, MX53_PAD_NANDF_CS1__GPIO6_14, MX53_PAD_NANDF_CS2__GPIO6_15, MX53_PAD_NANDF_CS3__GPIO6_16, MX53_PAD_GPIO_5__GPIO1_5, MX53_PAD_GPIO_16__GPIO7_11, MX53_PAD_GPIO_8__GPIO1_8, }; #define GPIO_BUTTON(gpio_num, ev_code, act_low, descr, wake) \ { \ .gpio = gpio_num, \ .type = EV_KEY, \ .code = ev_code, \ .active_low = act_low, \ .desc = "btn " descr, \ .wakeup = wake, \ } static struct gpio_keys_button loco_buttons[] = { GPIO_BUTTON(MX53_LOCO_POWER, KEY_POWER, 1, "power", 0), GPIO_BUTTON(MX53_LOCO_UI1, KEY_VOLUMEUP, 1, "volume-up", 0), GPIO_BUTTON(MX53_LOCO_UI2, KEY_VOLUMEDOWN, 1, "volume-down", 0), }; static const struct gpio_keys_platform_data loco_button_data __initconst = { .buttons = loco_buttons, .nbuttons = ARRAY_SIZE(loco_buttons), }; static inline void mx53_loco_fec_reset(void) { int ret; /* reset FEC PHY */ ret = gpio_request(LOCO_FEC_PHY_RST, "fec-phy-reset"); if (ret) { printk(KERN_ERR"failed to get GPIO_FEC_PHY_RESET: %d\n", ret); return; } gpio_direction_output(LOCO_FEC_PHY_RST, 0); msleep(1); gpio_set_value(LOCO_FEC_PHY_RST, 1); } static struct fec_platform_data mx53_loco_fec_data = { .phy = PHY_INTERFACE_MODE_RMII, }; static const struct imxi2c_platform_data mx53_loco_i2c_data __initconst = { .bitrate = 100000, }; static void __init mx53_loco_board_init(void) { mxc_iomux_v3_setup_multiple_pads(mx53_loco_pads, ARRAY_SIZE(mx53_loco_pads)); imx53_add_imx_uart(0, NULL); mx53_loco_fec_reset(); imx53_add_fec(&mx53_loco_fec_data); imx53_add_imx2_wdt(0, NULL); imx53_add_imx_i2c(0, &mx53_loco_i2c_data); imx53_add_imx_i2c(1, &mx53_loco_i2c_data); imx53_add_sdhci_esdhc_imx(0, NULL); imx53_add_sdhci_esdhc_imx(2, NULL); imx_add_gpio_keys(&loco_button_data); } static void __init mx53_loco_timer_init(void) { mx53_clocks_init(32768, 24000000, 0, 0); } static struct sys_timer mx53_loco_timer = { .init = mx53_loco_timer_init, }; static const char *mx53_loco_dt_match[] __initdata = { "fsl,mx53-loco", NULL }; MACHINE_START(MX53_LOCO, "Freescale MX53 LOCO Board") .map_io = mx53_map_io, .init_early = imx53_init_early, .init_irq = mx53_init_irq, .timer = &mx53_loco_timer, .init_machine = mx53_loco_board_init, .dt_compat = mx53_loco_dt_match, MACHINE_END
gpl-2.0
RaspberryPi-CM/android_kernel_raspberry_pi2
drivers/memstick/core/mspro_block.c
1134
39972
/* * Sony MemoryStick Pro storage support * * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Special thanks to Carlos Corbacho for providing various MemoryStick cards * that made this driver possible. * */ #include <linux/blkdev.h> #include <linux/idr.h> #include <linux/hdreg.h> #include <linux/kthread.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/memstick.h> #include <linux/module.h> #define DRIVER_NAME "mspro_block" static int major; module_param(major, int, 0644); #define MSPRO_BLOCK_MAX_SEGS 32 #define MSPRO_BLOCK_MAX_PAGES ((2 << 16) - 1) #define MSPRO_BLOCK_SIGNATURE 0xa5c3 #define MSPRO_BLOCK_MAX_ATTRIBUTES 41 #define MSPRO_BLOCK_PART_SHIFT 3 enum { MSPRO_BLOCK_ID_SYSINFO = 0x10, MSPRO_BLOCK_ID_MODELNAME = 0x15, MSPRO_BLOCK_ID_MBR = 0x20, MSPRO_BLOCK_ID_PBR16 = 0x21, MSPRO_BLOCK_ID_PBR32 = 0x22, MSPRO_BLOCK_ID_SPECFILEVALUES1 = 0x25, MSPRO_BLOCK_ID_SPECFILEVALUES2 = 0x26, MSPRO_BLOCK_ID_DEVINFO = 0x30 }; struct mspro_sys_attr { size_t size; void *data; unsigned char id; char name[32]; struct device_attribute dev_attr; }; struct mspro_attr_entry { __be32 address; __be32 size; unsigned char id; unsigned char reserved[3]; } __attribute__((packed)); struct mspro_attribute { __be16 signature; unsigned short version; unsigned char count; unsigned char reserved[11]; struct mspro_attr_entry entries[]; } __attribute__((packed)); struct mspro_sys_info { unsigned char class; unsigned char reserved0; __be16 block_size; __be16 block_count; __be16 user_block_count; __be16 page_size; unsigned char reserved1[2]; unsigned char assembly_date[8]; __be32 serial_number; unsigned char assembly_maker_code; unsigned char assembly_model_code[3]; __be16 memory_maker_code; __be16 memory_model_code; unsigned char reserved2[4]; unsigned char vcc; unsigned char vpp; __be16 controller_number; __be16 controller_function; __be16 start_sector; __be16 unit_size; unsigned char ms_sub_class; unsigned char reserved3[4]; unsigned char interface_type; __be16 controller_code; unsigned char format_type; unsigned char reserved4; unsigned char device_type; unsigned char reserved5[7]; unsigned char mspro_id[16]; unsigned char reserved6[16]; } __attribute__((packed)); struct mspro_mbr { unsigned char boot_partition; unsigned char start_head; unsigned char start_sector; unsigned char start_cylinder; unsigned char partition_type; unsigned char end_head; unsigned char end_sector; unsigned char end_cylinder; unsigned int start_sectors; unsigned int sectors_per_partition; } __attribute__((packed)); struct mspro_specfile { char name[8]; char ext[3]; unsigned char attr; unsigned char reserved[10]; unsigned short time; unsigned short date; unsigned short cluster; unsigned int size; } __attribute__((packed)); struct mspro_devinfo { __be16 cylinders; __be16 heads; __be16 bytes_per_track; __be16 bytes_per_sector; __be16 sectors_per_track; unsigned char reserved[6]; } __attribute__((packed)); struct mspro_block_data { struct memstick_dev *card; unsigned int usage_count; unsigned int caps; struct gendisk *disk; struct request_queue *queue; struct request *block_req; spinlock_t q_lock; unsigned short page_size; unsigned short cylinders; unsigned short heads; unsigned short sectors_per_track; unsigned char system; unsigned char read_only:1, eject:1, has_request:1, data_dir:1, active:1; unsigned char transfer_cmd; int (*mrq_handler)(struct memstick_dev *card, struct memstick_request **mrq); /* Default request setup function for data access method preferred by * this host instance. */ void (*setup_transfer)(struct memstick_dev *card, u64 offset, size_t length); struct attribute_group attr_group; struct scatterlist req_sg[MSPRO_BLOCK_MAX_SEGS]; unsigned int seg_count; unsigned int current_seg; unsigned int current_page; }; static DEFINE_IDR(mspro_block_disk_idr); static DEFINE_MUTEX(mspro_block_disk_lock); static int mspro_block_complete_req(struct memstick_dev *card, int error); /*** Block device ***/ static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode) { struct gendisk *disk = bdev->bd_disk; struct mspro_block_data *msb = disk->private_data; int rc = -ENXIO; mutex_lock(&mspro_block_disk_lock); if (msb && msb->card) { msb->usage_count++; if ((mode & FMODE_WRITE) && msb->read_only) rc = -EROFS; else rc = 0; } mutex_unlock(&mspro_block_disk_lock); return rc; } static void mspro_block_disk_release(struct gendisk *disk) { struct mspro_block_data *msb = disk->private_data; int disk_id = MINOR(disk_devt(disk)) >> MSPRO_BLOCK_PART_SHIFT; mutex_lock(&mspro_block_disk_lock); if (msb) { if (msb->usage_count) msb->usage_count--; if (!msb->usage_count) { kfree(msb); disk->private_data = NULL; idr_remove(&mspro_block_disk_idr, disk_id); put_disk(disk); } } mutex_unlock(&mspro_block_disk_lock); } static void mspro_block_bd_release(struct gendisk *disk, fmode_t mode) { mspro_block_disk_release(disk); } static int mspro_block_bd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mspro_block_data *msb = bdev->bd_disk->private_data; geo->heads = msb->heads; geo->sectors = msb->sectors_per_track; geo->cylinders = msb->cylinders; return 0; } static const struct block_device_operations ms_block_bdops = { .open = mspro_block_bd_open, .release = mspro_block_bd_release, .getgeo = mspro_block_bd_getgeo, .owner = THIS_MODULE }; /*** Information ***/ static struct mspro_sys_attr *mspro_from_sysfs_attr(struct attribute *attr) { struct device_attribute *dev_attr = container_of(attr, struct device_attribute, attr); return container_of(dev_attr, struct mspro_sys_attr, dev_attr); } static const char *mspro_block_attr_name(unsigned char tag) { switch (tag) { case MSPRO_BLOCK_ID_SYSINFO: return "attr_sysinfo"; case MSPRO_BLOCK_ID_MODELNAME: return "attr_modelname"; case MSPRO_BLOCK_ID_MBR: return "attr_mbr"; case MSPRO_BLOCK_ID_PBR16: return "attr_pbr16"; case MSPRO_BLOCK_ID_PBR32: return "attr_pbr32"; case MSPRO_BLOCK_ID_SPECFILEVALUES1: return "attr_specfilevalues1"; case MSPRO_BLOCK_ID_SPECFILEVALUES2: return "attr_specfilevalues2"; case MSPRO_BLOCK_ID_DEVINFO: return "attr_devinfo"; default: return NULL; }; } typedef ssize_t (*sysfs_show_t)(struct device *dev, struct device_attribute *attr, char *buffer); static ssize_t mspro_block_attr_show_default(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *s_attr = container_of(attr, struct mspro_sys_attr, dev_attr); ssize_t cnt, rc = 0; for (cnt = 0; cnt < s_attr->size; cnt++) { if (cnt && !(cnt % 16)) { if (PAGE_SIZE - rc) buffer[rc++] = '\n'; } rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "%02x ", ((unsigned char *)s_attr->data)[cnt]); } return rc; } static ssize_t mspro_block_attr_show_sysinfo(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *x_attr = container_of(attr, struct mspro_sys_attr, dev_attr); struct mspro_sys_info *x_sys = x_attr->data; ssize_t rc = 0; int date_tz = 0, date_tz_f = 0; if (x_sys->assembly_date[0] > 0x80U) { date_tz = (~x_sys->assembly_date[0]) + 1; date_tz_f = date_tz & 3; date_tz >>= 2; date_tz = -date_tz; date_tz_f *= 15; } else if (x_sys->assembly_date[0] < 0x80U) { date_tz = x_sys->assembly_date[0]; date_tz_f = date_tz & 3; date_tz >>= 2; date_tz_f *= 15; } rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "class: %x\n", x_sys->class); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "block size: %x\n", be16_to_cpu(x_sys->block_size)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "block count: %x\n", be16_to_cpu(x_sys->block_count)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "user block count: %x\n", be16_to_cpu(x_sys->user_block_count)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "page size: %x\n", be16_to_cpu(x_sys->page_size)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "assembly date: " "GMT%+d:%d %04u-%02u-%02u %02u:%02u:%02u\n", date_tz, date_tz_f, be16_to_cpup((__be16 *)&x_sys->assembly_date[1]), x_sys->assembly_date[3], x_sys->assembly_date[4], x_sys->assembly_date[5], x_sys->assembly_date[6], x_sys->assembly_date[7]); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "serial number: %x\n", be32_to_cpu(x_sys->serial_number)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "assembly maker code: %x\n", x_sys->assembly_maker_code); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "assembly model code: " "%02x%02x%02x\n", x_sys->assembly_model_code[0], x_sys->assembly_model_code[1], x_sys->assembly_model_code[2]); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "memory maker code: %x\n", be16_to_cpu(x_sys->memory_maker_code)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "memory model code: %x\n", be16_to_cpu(x_sys->memory_model_code)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "vcc: %x\n", x_sys->vcc); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "vpp: %x\n", x_sys->vpp); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "controller number: %x\n", be16_to_cpu(x_sys->controller_number)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "controller function: %x\n", be16_to_cpu(x_sys->controller_function)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start sector: %x\n", be16_to_cpu(x_sys->start_sector)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "unit size: %x\n", be16_to_cpu(x_sys->unit_size)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "sub class: %x\n", x_sys->ms_sub_class); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "interface type: %x\n", x_sys->interface_type); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "controller code: %x\n", be16_to_cpu(x_sys->controller_code)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "format type: %x\n", x_sys->format_type); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "device type: %x\n", x_sys->device_type); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "mspro id: %s\n", x_sys->mspro_id); return rc; } static ssize_t mspro_block_attr_show_modelname(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *s_attr = container_of(attr, struct mspro_sys_attr, dev_attr); return scnprintf(buffer, PAGE_SIZE, "%s", (char *)s_attr->data); } static ssize_t mspro_block_attr_show_mbr(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *x_attr = container_of(attr, struct mspro_sys_attr, dev_attr); struct mspro_mbr *x_mbr = x_attr->data; ssize_t rc = 0; rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "boot partition: %x\n", x_mbr->boot_partition); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start head: %x\n", x_mbr->start_head); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start sector: %x\n", x_mbr->start_sector); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start cylinder: %x\n", x_mbr->start_cylinder); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "partition type: %x\n", x_mbr->partition_type); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "end head: %x\n", x_mbr->end_head); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "end sector: %x\n", x_mbr->end_sector); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "end cylinder: %x\n", x_mbr->end_cylinder); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start sectors: %x\n", x_mbr->start_sectors); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "sectors per partition: %x\n", x_mbr->sectors_per_partition); return rc; } static ssize_t mspro_block_attr_show_specfile(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *x_attr = container_of(attr, struct mspro_sys_attr, dev_attr); struct mspro_specfile *x_spfile = x_attr->data; char name[9], ext[4]; ssize_t rc = 0; memcpy(name, x_spfile->name, 8); name[8] = 0; memcpy(ext, x_spfile->ext, 3); ext[3] = 0; rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "name: %s\n", name); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "ext: %s\n", ext); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "attribute: %x\n", x_spfile->attr); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "time: %d:%d:%d\n", x_spfile->time >> 11, (x_spfile->time >> 5) & 0x3f, (x_spfile->time & 0x1f) * 2); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "date: %d-%d-%d\n", (x_spfile->date >> 9) + 1980, (x_spfile->date >> 5) & 0xf, x_spfile->date & 0x1f); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start cluster: %x\n", x_spfile->cluster); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "size: %x\n", x_spfile->size); return rc; } static ssize_t mspro_block_attr_show_devinfo(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *x_attr = container_of(attr, struct mspro_sys_attr, dev_attr); struct mspro_devinfo *x_devinfo = x_attr->data; ssize_t rc = 0; rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "cylinders: %x\n", be16_to_cpu(x_devinfo->cylinders)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "heads: %x\n", be16_to_cpu(x_devinfo->heads)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "bytes per track: %x\n", be16_to_cpu(x_devinfo->bytes_per_track)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "bytes per sector: %x\n", be16_to_cpu(x_devinfo->bytes_per_sector)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "sectors per track: %x\n", be16_to_cpu(x_devinfo->sectors_per_track)); return rc; } static sysfs_show_t mspro_block_attr_show(unsigned char tag) { switch (tag) { case MSPRO_BLOCK_ID_SYSINFO: return mspro_block_attr_show_sysinfo; case MSPRO_BLOCK_ID_MODELNAME: return mspro_block_attr_show_modelname; case MSPRO_BLOCK_ID_MBR: return mspro_block_attr_show_mbr; case MSPRO_BLOCK_ID_SPECFILEVALUES1: case MSPRO_BLOCK_ID_SPECFILEVALUES2: return mspro_block_attr_show_specfile; case MSPRO_BLOCK_ID_DEVINFO: return mspro_block_attr_show_devinfo; default: return mspro_block_attr_show_default; } } /*** Protocol handlers ***/ /* * Functions prefixed with "h_" are protocol callbacks. They can be called from * interrupt context. Return value of 0 means that request processing is still * ongoing, while special error value of -EAGAIN means that current request is * finished (and request processor should come back some time later). */ static int h_mspro_block_req_init(struct memstick_dev *card, struct memstick_request **mrq) { struct mspro_block_data *msb = memstick_get_drvdata(card); *mrq = &card->current_mrq; card->next_request = msb->mrq_handler; return 0; } static int h_mspro_block_default(struct memstick_dev *card, struct memstick_request **mrq) { return mspro_block_complete_req(card, (*mrq)->error); } static int h_mspro_block_default_bad(struct memstick_dev *card, struct memstick_request **mrq) { return -ENXIO; } static int h_mspro_block_get_ro(struct memstick_dev *card, struct memstick_request **mrq) { struct mspro_block_data *msb = memstick_get_drvdata(card); if (!(*mrq)->error) { if ((*mrq)->data[offsetof(struct ms_status_register, status0)] & MEMSTICK_STATUS0_WP) msb->read_only = 1; else msb->read_only = 0; } return mspro_block_complete_req(card, (*mrq)->error); } static int h_mspro_block_wait_for_ced(struct memstick_dev *card, struct memstick_request **mrq) { dev_dbg(&card->dev, "wait for ced: value %x\n", (*mrq)->data[0]); if (!(*mrq)->error) { if ((*mrq)->data[0] & (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)) (*mrq)->error = -EFAULT; else if (!((*mrq)->data[0] & MEMSTICK_INT_CED)) return 0; } return mspro_block_complete_req(card, (*mrq)->error); } static int h_mspro_block_transfer_data(struct memstick_dev *card, struct memstick_request **mrq) { struct mspro_block_data *msb = memstick_get_drvdata(card); unsigned char t_val = 0; struct scatterlist t_sg = { 0 }; size_t t_offset; if ((*mrq)->error) return mspro_block_complete_req(card, (*mrq)->error); switch ((*mrq)->tpc) { case MS_TPC_WRITE_REG: memstick_init_req(*mrq, MS_TPC_SET_CMD, &msb->transfer_cmd, 1); (*mrq)->need_card_int = 1; return 0; case MS_TPC_SET_CMD: t_val = (*mrq)->int_reg; memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1); if (msb->caps & MEMSTICK_CAP_AUTO_GET_INT) goto has_int_reg; return 0; case MS_TPC_GET_INT: t_val = (*mrq)->data[0]; has_int_reg: if (t_val & (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)) { t_val = MSPRO_CMD_STOP; memstick_init_req(*mrq, MS_TPC_SET_CMD, &t_val, 1); card->next_request = h_mspro_block_default; return 0; } if (msb->current_page == (msb->req_sg[msb->current_seg].length / msb->page_size)) { msb->current_page = 0; msb->current_seg++; if (msb->current_seg == msb->seg_count) { if (t_val & MEMSTICK_INT_CED) { return mspro_block_complete_req(card, 0); } else { card->next_request = h_mspro_block_wait_for_ced; memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1); return 0; } } } if (!(t_val & MEMSTICK_INT_BREQ)) { memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1); return 0; } t_offset = msb->req_sg[msb->current_seg].offset; t_offset += msb->current_page * msb->page_size; sg_set_page(&t_sg, nth_page(sg_page(&(msb->req_sg[msb->current_seg])), t_offset >> PAGE_SHIFT), msb->page_size, offset_in_page(t_offset)); memstick_init_req_sg(*mrq, msb->data_dir == READ ? MS_TPC_READ_LONG_DATA : MS_TPC_WRITE_LONG_DATA, &t_sg); (*mrq)->need_card_int = 1; return 0; case MS_TPC_READ_LONG_DATA: case MS_TPC_WRITE_LONG_DATA: msb->current_page++; if (msb->caps & MEMSTICK_CAP_AUTO_GET_INT) { t_val = (*mrq)->int_reg; goto has_int_reg; } else { memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1); return 0; } default: BUG(); } } /*** Transfer setup functions for different access methods. ***/ /** Setup data transfer request for SET_CMD TPC with arguments in card * registers. * * @card Current media instance * @offset Target data offset in bytes * @length Required transfer length in bytes. */ static void h_mspro_block_setup_cmd(struct memstick_dev *card, u64 offset, size_t length) { struct mspro_block_data *msb = memstick_get_drvdata(card); struct mspro_param_register param = { .system = msb->system, .data_count = cpu_to_be16((uint16_t)(length / msb->page_size)), /* ISO C90 warning precludes direct initialization for now. */ .data_address = 0, .tpc_param = 0 }; do_div(offset, msb->page_size); param.data_address = cpu_to_be32((uint32_t)offset); card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_transfer_data; memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, &param, sizeof(param)); } /*** Data transfer ***/ static int mspro_block_issue_req(struct memstick_dev *card, int chunk) { struct mspro_block_data *msb = memstick_get_drvdata(card); u64 t_off; unsigned int count; try_again: while (chunk) { msb->current_page = 0; msb->current_seg = 0; msb->seg_count = blk_rq_map_sg(msb->block_req->q, msb->block_req, msb->req_sg); if (!msb->seg_count) { chunk = __blk_end_request_cur(msb->block_req, -ENOMEM); continue; } t_off = blk_rq_pos(msb->block_req); t_off <<= 9; count = blk_rq_bytes(msb->block_req); msb->setup_transfer(card, t_off, count); msb->data_dir = rq_data_dir(msb->block_req); msb->transfer_cmd = msb->data_dir == READ ? MSPRO_CMD_READ_DATA : MSPRO_CMD_WRITE_DATA; memstick_new_req(card->host); return 0; } dev_dbg(&card->dev, "blk_fetch\n"); msb->block_req = blk_fetch_request(msb->queue); if (!msb->block_req) { dev_dbg(&card->dev, "issue end\n"); return -EAGAIN; } dev_dbg(&card->dev, "trying again\n"); chunk = 1; goto try_again; } static int mspro_block_complete_req(struct memstick_dev *card, int error) { struct mspro_block_data *msb = memstick_get_drvdata(card); int chunk, cnt; unsigned int t_len = 0; unsigned long flags; spin_lock_irqsave(&msb->q_lock, flags); dev_dbg(&card->dev, "complete %d, %d\n", msb->has_request ? 1 : 0, error); if (msb->has_request) { /* Nothing to do - not really an error */ if (error == -EAGAIN) error = 0; if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) { if (msb->data_dir == READ) { for (cnt = 0; cnt < msb->current_seg; cnt++) { t_len += msb->req_sg[cnt].length / msb->page_size; if (msb->current_page) t_len += msb->current_page - 1; t_len *= msb->page_size; } } } else t_len = blk_rq_bytes(msb->block_req); dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error); if (error && !t_len) t_len = blk_rq_cur_bytes(msb->block_req); chunk = __blk_end_request(msb->block_req, error, t_len); error = mspro_block_issue_req(card, chunk); if (!error) goto out; else msb->has_request = 0; } else { if (!error) error = -EAGAIN; } card->next_request = h_mspro_block_default_bad; complete_all(&card->mrq_complete); out: spin_unlock_irqrestore(&msb->q_lock, flags); return error; } static void mspro_block_stop(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); int rc = 0; unsigned long flags; while (1) { spin_lock_irqsave(&msb->q_lock, flags); if (!msb->has_request) { blk_stop_queue(msb->queue); rc = 1; } spin_unlock_irqrestore(&msb->q_lock, flags); if (rc) break; wait_for_completion(&card->mrq_complete); } } static void mspro_block_start(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); unsigned long flags; spin_lock_irqsave(&msb->q_lock, flags); blk_start_queue(msb->queue); spin_unlock_irqrestore(&msb->q_lock, flags); } static int mspro_block_prepare_req(struct request_queue *q, struct request *req) { if (req->cmd_type != REQ_TYPE_FS && req->cmd_type != REQ_TYPE_BLOCK_PC) { blk_dump_rq_flags(req, "MSPro unsupported request"); return BLKPREP_KILL; } req->cmd_flags |= REQ_DONTPREP; return BLKPREP_OK; } static void mspro_block_submit_req(struct request_queue *q) { struct memstick_dev *card = q->queuedata; struct mspro_block_data *msb = memstick_get_drvdata(card); struct request *req = NULL; if (msb->has_request) return; if (msb->eject) { while ((req = blk_fetch_request(q)) != NULL) __blk_end_request_all(req, -ENODEV); return; } msb->has_request = 1; if (mspro_block_issue_req(card, 0)) msb->has_request = 0; } /*** Initialization ***/ static int mspro_block_wait_for_ced(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_wait_for_ced; memstick_init_req(&card->current_mrq, MS_TPC_GET_INT, NULL, 1); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); return card->current_mrq.error; } static int mspro_block_set_interface(struct memstick_dev *card, unsigned char sys_reg) { struct memstick_host *host = card->host; struct mspro_block_data *msb = memstick_get_drvdata(card); struct mspro_param_register param = { .system = sys_reg, .data_count = 0, .data_address = 0, .tpc_param = 0 }; card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_default; memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, &param, sizeof(param)); memstick_new_req(host); wait_for_completion(&card->mrq_complete); return card->current_mrq.error; } static int mspro_block_switch_interface(struct memstick_dev *card) { struct memstick_host *host = card->host; struct mspro_block_data *msb = memstick_get_drvdata(card); int rc = 0; try_again: if (msb->caps & MEMSTICK_CAP_PAR4) rc = mspro_block_set_interface(card, MEMSTICK_SYS_PAR4); else return 0; if (rc) { printk(KERN_WARNING "%s: could not switch to 4-bit mode, error %d\n", dev_name(&card->dev), rc); return 0; } msb->system = MEMSTICK_SYS_PAR4; host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4); printk(KERN_INFO "%s: switching to 4-bit parallel mode\n", dev_name(&card->dev)); if (msb->caps & MEMSTICK_CAP_PAR8) { rc = mspro_block_set_interface(card, MEMSTICK_SYS_PAR8); if (!rc) { msb->system = MEMSTICK_SYS_PAR8; host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR8); printk(KERN_INFO "%s: switching to 8-bit parallel mode\n", dev_name(&card->dev)); } else printk(KERN_WARNING "%s: could not switch to 8-bit mode, error %d\n", dev_name(&card->dev), rc); } card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_default; memstick_init_req(&card->current_mrq, MS_TPC_GET_INT, NULL, 1); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); rc = card->current_mrq.error; if (rc) { printk(KERN_WARNING "%s: interface error, trying to fall back to serial\n", dev_name(&card->dev)); msb->system = MEMSTICK_SYS_SERIAL; host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); msleep(10); host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON); host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL); rc = memstick_set_rw_addr(card); if (!rc) rc = mspro_block_set_interface(card, msb->system); if (!rc) { msleep(150); rc = mspro_block_wait_for_ced(card); if (rc) return rc; if (msb->caps & MEMSTICK_CAP_PAR8) { msb->caps &= ~MEMSTICK_CAP_PAR8; goto try_again; } } } return rc; } /* Memory allocated for attributes by this function should be freed by * mspro_block_data_clear, no matter if the initialization process succeeded * or failed. */ static int mspro_block_read_attributes(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); struct mspro_attribute *attr = NULL; struct mspro_sys_attr *s_attr = NULL; unsigned char *buffer = NULL; int cnt, rc, attr_count; /* While normally physical device offsets, represented here by * attr_offset and attr_len will be of large numeric types, we can be * sure, that attributes are close enough to the beginning of the * device, to save ourselves some trouble. */ unsigned int addr, attr_offset = 0, attr_len = msb->page_size; attr = kmalloc(msb->page_size, GFP_KERNEL); if (!attr) return -ENOMEM; sg_init_one(&msb->req_sg[0], attr, msb->page_size); msb->seg_count = 1; msb->current_seg = 0; msb->current_page = 0; msb->data_dir = READ; msb->transfer_cmd = MSPRO_CMD_READ_ATRB; msb->setup_transfer(card, attr_offset, attr_len); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); if (card->current_mrq.error) { rc = card->current_mrq.error; goto out_free_attr; } if (be16_to_cpu(attr->signature) != MSPRO_BLOCK_SIGNATURE) { printk(KERN_ERR "%s: unrecognized device signature %x\n", dev_name(&card->dev), be16_to_cpu(attr->signature)); rc = -ENODEV; goto out_free_attr; } if (attr->count > MSPRO_BLOCK_MAX_ATTRIBUTES) { printk(KERN_WARNING "%s: way too many attribute entries\n", dev_name(&card->dev)); attr_count = MSPRO_BLOCK_MAX_ATTRIBUTES; } else attr_count = attr->count; msb->attr_group.attrs = kcalloc(attr_count + 1, sizeof(*msb->attr_group.attrs), GFP_KERNEL); if (!msb->attr_group.attrs) { rc = -ENOMEM; goto out_free_attr; } msb->attr_group.name = "media_attributes"; buffer = kmalloc(attr_len, GFP_KERNEL); if (!buffer) { rc = -ENOMEM; goto out_free_attr; } memcpy(buffer, (char *)attr, attr_len); for (cnt = 0; cnt < attr_count; ++cnt) { s_attr = kzalloc(sizeof(struct mspro_sys_attr), GFP_KERNEL); if (!s_attr) { rc = -ENOMEM; goto out_free_buffer; } msb->attr_group.attrs[cnt] = &s_attr->dev_attr.attr; addr = be32_to_cpu(attr->entries[cnt].address); s_attr->size = be32_to_cpu(attr->entries[cnt].size); dev_dbg(&card->dev, "adding attribute %d: id %x, address %x, " "size %zx\n", cnt, attr->entries[cnt].id, addr, s_attr->size); s_attr->id = attr->entries[cnt].id; if (mspro_block_attr_name(s_attr->id)) snprintf(s_attr->name, sizeof(s_attr->name), "%s", mspro_block_attr_name(attr->entries[cnt].id)); else snprintf(s_attr->name, sizeof(s_attr->name), "attr_x%02x", attr->entries[cnt].id); sysfs_attr_init(&s_attr->dev_attr.attr); s_attr->dev_attr.attr.name = s_attr->name; s_attr->dev_attr.attr.mode = S_IRUGO; s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id); if (!s_attr->size) continue; s_attr->data = kmalloc(s_attr->size, GFP_KERNEL); if (!s_attr->data) { rc = -ENOMEM; goto out_free_buffer; } if (((addr / msb->page_size) == (attr_offset / msb->page_size)) && (((addr + s_attr->size - 1) / msb->page_size) == (attr_offset / msb->page_size))) { memcpy(s_attr->data, buffer + addr % msb->page_size, s_attr->size); continue; } attr_offset = (addr / msb->page_size) * msb->page_size; if ((attr_offset + attr_len) < (addr + s_attr->size)) { kfree(buffer); attr_len = (((addr + s_attr->size) / msb->page_size) + 1 ) * msb->page_size - attr_offset; buffer = kmalloc(attr_len, GFP_KERNEL); if (!buffer) { rc = -ENOMEM; goto out_free_attr; } } sg_init_one(&msb->req_sg[0], buffer, attr_len); msb->seg_count = 1; msb->current_seg = 0; msb->current_page = 0; msb->data_dir = READ; msb->transfer_cmd = MSPRO_CMD_READ_ATRB; dev_dbg(&card->dev, "reading attribute range %x, %x\n", attr_offset, attr_len); msb->setup_transfer(card, attr_offset, attr_len); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); if (card->current_mrq.error) { rc = card->current_mrq.error; goto out_free_buffer; } memcpy(s_attr->data, buffer + addr % msb->page_size, s_attr->size); } rc = 0; out_free_buffer: kfree(buffer); out_free_attr: kfree(attr); return rc; } static int mspro_block_init_card(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); struct memstick_host *host = card->host; int rc = 0; msb->system = MEMSTICK_SYS_SERIAL; msb->setup_transfer = h_mspro_block_setup_cmd; card->reg_addr.r_offset = offsetof(struct mspro_register, status); card->reg_addr.r_length = sizeof(struct ms_status_register); card->reg_addr.w_offset = offsetof(struct mspro_register, param); card->reg_addr.w_length = sizeof(struct mspro_param_register); if (memstick_set_rw_addr(card)) return -EIO; msb->caps = host->caps; msleep(150); rc = mspro_block_wait_for_ced(card); if (rc) return rc; rc = mspro_block_switch_interface(card); if (rc) return rc; dev_dbg(&card->dev, "card activated\n"); if (msb->system != MEMSTICK_SYS_SERIAL) msb->caps |= MEMSTICK_CAP_AUTO_GET_INT; card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_get_ro; memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL, sizeof(struct ms_status_register)); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); if (card->current_mrq.error) return card->current_mrq.error; dev_dbg(&card->dev, "card r/w status %d\n", msb->read_only ? 0 : 1); msb->page_size = 512; rc = mspro_block_read_attributes(card); if (rc) return rc; dev_dbg(&card->dev, "attributes loaded\n"); return 0; } static int mspro_block_init_disk(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); struct memstick_host *host = card->host; struct mspro_devinfo *dev_info = NULL; struct mspro_sys_info *sys_info = NULL; struct mspro_sys_attr *s_attr = NULL; int rc, disk_id; u64 limit = BLK_BOUNCE_HIGH; unsigned long capacity; if (host->dev.dma_mask && *(host->dev.dma_mask)) limit = *(host->dev.dma_mask); for (rc = 0; msb->attr_group.attrs[rc]; ++rc) { s_attr = mspro_from_sysfs_attr(msb->attr_group.attrs[rc]); if (s_attr->id == MSPRO_BLOCK_ID_DEVINFO) dev_info = s_attr->data; else if (s_attr->id == MSPRO_BLOCK_ID_SYSINFO) sys_info = s_attr->data; } if (!dev_info || !sys_info) return -ENODEV; msb->cylinders = be16_to_cpu(dev_info->cylinders); msb->heads = be16_to_cpu(dev_info->heads); msb->sectors_per_track = be16_to_cpu(dev_info->sectors_per_track); msb->page_size = be16_to_cpu(sys_info->unit_size); mutex_lock(&mspro_block_disk_lock); disk_id = idr_alloc(&mspro_block_disk_idr, card, 0, 256, GFP_KERNEL); mutex_unlock(&mspro_block_disk_lock); if (disk_id < 0) return disk_id; msb->disk = alloc_disk(1 << MSPRO_BLOCK_PART_SHIFT); if (!msb->disk) { rc = -ENOMEM; goto out_release_id; } msb->queue = blk_init_queue(mspro_block_submit_req, &msb->q_lock); if (!msb->queue) { rc = -ENOMEM; goto out_put_disk; } msb->queue->queuedata = card; blk_queue_prep_rq(msb->queue, mspro_block_prepare_req); blk_queue_bounce_limit(msb->queue, limit); blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); blk_queue_max_segment_size(msb->queue, MSPRO_BLOCK_MAX_PAGES * msb->page_size); msb->disk->major = major; msb->disk->first_minor = disk_id << MSPRO_BLOCK_PART_SHIFT; msb->disk->fops = &ms_block_bdops; msb->usage_count = 1; msb->disk->private_data = msb; msb->disk->queue = msb->queue; msb->disk->driverfs_dev = &card->dev; sprintf(msb->disk->disk_name, "mspblk%d", disk_id); blk_queue_logical_block_size(msb->queue, msb->page_size); capacity = be16_to_cpu(sys_info->user_block_count); capacity *= be16_to_cpu(sys_info->block_size); capacity *= msb->page_size >> 9; set_capacity(msb->disk, capacity); dev_dbg(&card->dev, "capacity set %ld\n", capacity); add_disk(msb->disk); msb->active = 1; return 0; out_put_disk: put_disk(msb->disk); out_release_id: mutex_lock(&mspro_block_disk_lock); idr_remove(&mspro_block_disk_idr, disk_id); mutex_unlock(&mspro_block_disk_lock); return rc; } static void mspro_block_data_clear(struct mspro_block_data *msb) { int cnt; struct mspro_sys_attr *s_attr; if (msb->attr_group.attrs) { for (cnt = 0; msb->attr_group.attrs[cnt]; ++cnt) { s_attr = mspro_from_sysfs_attr(msb->attr_group .attrs[cnt]); kfree(s_attr->data); kfree(s_attr); } kfree(msb->attr_group.attrs); } msb->card = NULL; } static int mspro_block_check_card(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); return (msb->active == 1); } static int mspro_block_probe(struct memstick_dev *card) { struct mspro_block_data *msb; int rc = 0; msb = kzalloc(sizeof(struct mspro_block_data), GFP_KERNEL); if (!msb) return -ENOMEM; memstick_set_drvdata(card, msb); msb->card = card; spin_lock_init(&msb->q_lock); rc = mspro_block_init_card(card); if (rc) goto out_free; rc = sysfs_create_group(&card->dev.kobj, &msb->attr_group); if (rc) goto out_free; rc = mspro_block_init_disk(card); if (!rc) { card->check = mspro_block_check_card; card->stop = mspro_block_stop; card->start = mspro_block_start; return 0; } sysfs_remove_group(&card->dev.kobj, &msb->attr_group); out_free: memstick_set_drvdata(card, NULL); mspro_block_data_clear(msb); kfree(msb); return rc; } static void mspro_block_remove(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); unsigned long flags; spin_lock_irqsave(&msb->q_lock, flags); msb->eject = 1; blk_start_queue(msb->queue); spin_unlock_irqrestore(&msb->q_lock, flags); del_gendisk(msb->disk); dev_dbg(&card->dev, "mspro block remove\n"); blk_cleanup_queue(msb->queue); msb->queue = NULL; sysfs_remove_group(&card->dev.kobj, &msb->attr_group); mutex_lock(&mspro_block_disk_lock); mspro_block_data_clear(msb); mutex_unlock(&mspro_block_disk_lock); mspro_block_disk_release(msb->disk); memstick_set_drvdata(card, NULL); } #ifdef CONFIG_PM static int mspro_block_suspend(struct memstick_dev *card, pm_message_t state) { struct mspro_block_data *msb = memstick_get_drvdata(card); unsigned long flags; spin_lock_irqsave(&msb->q_lock, flags); blk_stop_queue(msb->queue); msb->active = 0; spin_unlock_irqrestore(&msb->q_lock, flags); return 0; } static int mspro_block_resume(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); unsigned long flags; int rc = 0; #ifdef CONFIG_MEMSTICK_UNSAFE_RESUME struct mspro_block_data *new_msb; struct memstick_host *host = card->host; struct mspro_sys_attr *s_attr, *r_attr; unsigned char cnt; mutex_lock(&host->lock); new_msb = kzalloc(sizeof(struct mspro_block_data), GFP_KERNEL); if (!new_msb) { rc = -ENOMEM; goto out_unlock; } new_msb->card = card; memstick_set_drvdata(card, new_msb); if (mspro_block_init_card(card)) goto out_free; for (cnt = 0; new_msb->attr_group.attrs[cnt] && msb->attr_group.attrs[cnt]; ++cnt) { s_attr = mspro_from_sysfs_attr(new_msb->attr_group.attrs[cnt]); r_attr = mspro_from_sysfs_attr(msb->attr_group.attrs[cnt]); if (s_attr->id == MSPRO_BLOCK_ID_SYSINFO && r_attr->id == s_attr->id) { if (memcmp(s_attr->data, r_attr->data, s_attr->size)) break; msb->active = 1; break; } } out_free: memstick_set_drvdata(card, msb); mspro_block_data_clear(new_msb); kfree(new_msb); out_unlock: mutex_unlock(&host->lock); #endif /* CONFIG_MEMSTICK_UNSAFE_RESUME */ spin_lock_irqsave(&msb->q_lock, flags); blk_start_queue(msb->queue); spin_unlock_irqrestore(&msb->q_lock, flags); return rc; } #else #define mspro_block_suspend NULL #define mspro_block_resume NULL #endif /* CONFIG_PM */ static struct memstick_device_id mspro_block_id_tbl[] = { {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_PRO, MEMSTICK_CATEGORY_STORAGE_DUO, MEMSTICK_CLASS_DUO}, {} }; static struct memstick_driver mspro_block_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE }, .id_table = mspro_block_id_tbl, .probe = mspro_block_probe, .remove = mspro_block_remove, .suspend = mspro_block_suspend, .resume = mspro_block_resume }; static int __init mspro_block_init(void) { int rc = -ENOMEM; rc = register_blkdev(major, DRIVER_NAME); if (rc < 0) { printk(KERN_ERR DRIVER_NAME ": failed to register " "major %d, error %d\n", major, rc); return rc; } if (!major) major = rc; rc = memstick_register_driver(&mspro_block_driver); if (rc) unregister_blkdev(major, DRIVER_NAME); return rc; } static void __exit mspro_block_exit(void) { memstick_unregister_driver(&mspro_block_driver); unregister_blkdev(major, DRIVER_NAME); idr_destroy(&mspro_block_disk_idr); } module_init(mspro_block_init); module_exit(mspro_block_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alex Dubov"); MODULE_DESCRIPTION("Sony MemoryStickPro block device driver"); MODULE_DEVICE_TABLE(memstick, mspro_block_id_tbl);
gpl-2.0
RezaSR/android_kernel_asus_tf303cl
drivers/media/rc/winbond-cir.c
2158
34411
/* * winbond-cir.c - Driver for the Consumer IR functionality of Winbond * SuperI/O chips. * * Currently supports the Winbond WPCD376i chip (PNP id WEC1022), but * could probably support others (Winbond WEC102X, NatSemi, etc) * with minor modifications. * * Original Author: David Härdeman <david@hardeman.nu> * Copyright (C) 2012 Sean Young <sean@mess.org> * Copyright (C) 2009 - 2011 David Härdeman <david@hardeman.nu> * * Dedicated to my daughter Matilda, without whose loving attention this * driver would have been finished in half the time and with a fraction * of the bugs. * * Written using: * o Winbond WPCD376I datasheet helpfully provided by Jesse Barnes at Intel * o NatSemi PC87338/PC97338 datasheet (for the serial port stuff) * o DSDT dumps * * Supported features: * o IR Receive * o IR Transmit * o Wake-On-CIR functionality * o Carrier detection * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/pnp.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/leds.h> #include <linux/spinlock.h> #include <linux/pci_ids.h> #include <linux/io.h> #include <linux/bitrev.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/sched.h> #include <media/rc-core.h> #define DRVNAME "winbond-cir" /* CEIR Wake-Up Registers, relative to data->wbase */ #define WBCIR_REG_WCEIR_CTL 0x03 /* CEIR Receiver Control */ #define WBCIR_REG_WCEIR_STS 0x04 /* CEIR Receiver Status */ #define WBCIR_REG_WCEIR_EV_EN 0x05 /* CEIR Receiver Event Enable */ #define WBCIR_REG_WCEIR_CNTL 0x06 /* CEIR Receiver Counter Low */ #define WBCIR_REG_WCEIR_CNTH 0x07 /* CEIR Receiver Counter High */ #define WBCIR_REG_WCEIR_INDEX 0x08 /* CEIR Receiver Index */ #define WBCIR_REG_WCEIR_DATA 0x09 /* CEIR Receiver Data */ #define WBCIR_REG_WCEIR_CSL 0x0A /* CEIR Re. Compare Strlen */ #define WBCIR_REG_WCEIR_CFG1 0x0B /* CEIR Re. Configuration 1 */ #define WBCIR_REG_WCEIR_CFG2 0x0C /* CEIR Re. Configuration 2 */ /* CEIR Enhanced Functionality Registers, relative to data->ebase */ #define WBCIR_REG_ECEIR_CTS 0x00 /* Enhanced IR Control Status */ #define WBCIR_REG_ECEIR_CCTL 0x01 /* Infrared Counter Control */ #define WBCIR_REG_ECEIR_CNT_LO 0x02 /* Infrared Counter LSB */ #define WBCIR_REG_ECEIR_CNT_HI 0x03 /* Infrared Counter MSB */ #define WBCIR_REG_ECEIR_IREM 0x04 /* Infrared Emitter Status */ /* SP3 Banked Registers, relative to data->sbase */ #define WBCIR_REG_SP3_BSR 0x03 /* Bank Select, all banks */ /* Bank 0 */ #define WBCIR_REG_SP3_RXDATA 0x00 /* FIFO RX data (r) */ #define WBCIR_REG_SP3_TXDATA 0x00 /* FIFO TX data (w) */ #define WBCIR_REG_SP3_IER 0x01 /* Interrupt Enable */ #define WBCIR_REG_SP3_EIR 0x02 /* Event Identification (r) */ #define WBCIR_REG_SP3_FCR 0x02 /* FIFO Control (w) */ #define WBCIR_REG_SP3_MCR 0x04 /* Mode Control */ #define WBCIR_REG_SP3_LSR 0x05 /* Link Status */ #define WBCIR_REG_SP3_MSR 0x06 /* Modem Status */ #define WBCIR_REG_SP3_ASCR 0x07 /* Aux Status and Control */ /* Bank 2 */ #define WBCIR_REG_SP3_BGDL 0x00 /* Baud Divisor LSB */ #define WBCIR_REG_SP3_BGDH 0x01 /* Baud Divisor MSB */ #define WBCIR_REG_SP3_EXCR1 0x02 /* Extended Control 1 */ #define WBCIR_REG_SP3_EXCR2 0x04 /* Extended Control 2 */ #define WBCIR_REG_SP3_TXFLV 0x06 /* TX FIFO Level */ #define WBCIR_REG_SP3_RXFLV 0x07 /* RX FIFO Level */ /* Bank 3 */ #define WBCIR_REG_SP3_MRID 0x00 /* Module Identification */ #define WBCIR_REG_SP3_SH_LCR 0x01 /* LCR Shadow */ #define WBCIR_REG_SP3_SH_FCR 0x02 /* FCR Shadow */ /* Bank 4 */ #define WBCIR_REG_SP3_IRCR1 0x02 /* Infrared Control 1 */ /* Bank 5 */ #define WBCIR_REG_SP3_IRCR2 0x04 /* Infrared Control 2 */ /* Bank 6 */ #define WBCIR_REG_SP3_IRCR3 0x00 /* Infrared Control 3 */ #define WBCIR_REG_SP3_SIR_PW 0x02 /* SIR Pulse Width */ /* Bank 7 */ #define WBCIR_REG_SP3_IRRXDC 0x00 /* IR RX Demod Control */ #define WBCIR_REG_SP3_IRTXMC 0x01 /* IR TX Mod Control */ #define WBCIR_REG_SP3_RCCFG 0x02 /* CEIR Config */ #define WBCIR_REG_SP3_IRCFG1 0x04 /* Infrared Config 1 */ #define WBCIR_REG_SP3_IRCFG4 0x07 /* Infrared Config 4 */ /* * Magic values follow */ /* No interrupts for WBCIR_REG_SP3_IER and WBCIR_REG_SP3_EIR */ #define WBCIR_IRQ_NONE 0x00 /* RX data bit for WBCIR_REG_SP3_IER and WBCIR_REG_SP3_EIR */ #define WBCIR_IRQ_RX 0x01 /* TX data low bit for WBCIR_REG_SP3_IER and WBCIR_REG_SP3_EIR */ #define WBCIR_IRQ_TX_LOW 0x02 /* Over/Under-flow bit for WBCIR_REG_SP3_IER and WBCIR_REG_SP3_EIR */ #define WBCIR_IRQ_ERR 0x04 /* TX data empty bit for WBCEIR_REG_SP3_IER and WBCIR_REG_SP3_EIR */ #define WBCIR_IRQ_TX_EMPTY 0x20 /* Led enable/disable bit for WBCIR_REG_ECEIR_CTS */ #define WBCIR_LED_ENABLE 0x80 /* RX data available bit for WBCIR_REG_SP3_LSR */ #define WBCIR_RX_AVAIL 0x01 /* RX data overrun error bit for WBCIR_REG_SP3_LSR */ #define WBCIR_RX_OVERRUN 0x02 /* TX End-Of-Transmission bit for WBCIR_REG_SP3_ASCR */ #define WBCIR_TX_EOT 0x04 /* RX disable bit for WBCIR_REG_SP3_ASCR */ #define WBCIR_RX_DISABLE 0x20 /* TX data underrun error bit for WBCIR_REG_SP3_ASCR */ #define WBCIR_TX_UNDERRUN 0x40 /* Extended mode enable bit for WBCIR_REG_SP3_EXCR1 */ #define WBCIR_EXT_ENABLE 0x01 /* Select compare register in WBCIR_REG_WCEIR_INDEX (bits 5 & 6) */ #define WBCIR_REGSEL_COMPARE 0x10 /* Select mask register in WBCIR_REG_WCEIR_INDEX (bits 5 & 6) */ #define WBCIR_REGSEL_MASK 0x20 /* Starting address of selected register in WBCIR_REG_WCEIR_INDEX */ #define WBCIR_REG_ADDR0 0x00 /* Enable carrier counter */ #define WBCIR_CNTR_EN 0x01 /* Reset carrier counter */ #define WBCIR_CNTR_R 0x02 /* Invert TX */ #define WBCIR_IRTX_INV 0x04 /* Receiver oversampling */ #define WBCIR_RX_T_OV 0x40 /* Valid banks for the SP3 UART */ enum wbcir_bank { WBCIR_BANK_0 = 0x00, WBCIR_BANK_1 = 0x80, WBCIR_BANK_2 = 0xE0, WBCIR_BANK_3 = 0xE4, WBCIR_BANK_4 = 0xE8, WBCIR_BANK_5 = 0xEC, WBCIR_BANK_6 = 0xF0, WBCIR_BANK_7 = 0xF4, }; /* Supported power-on IR Protocols */ enum wbcir_protocol { IR_PROTOCOL_RC5 = 0x0, IR_PROTOCOL_NEC = 0x1, IR_PROTOCOL_RC6 = 0x2, }; /* Possible states for IR reception */ enum wbcir_rxstate { WBCIR_RXSTATE_INACTIVE = 0, WBCIR_RXSTATE_ACTIVE, WBCIR_RXSTATE_ERROR }; /* Possible states for IR transmission */ enum wbcir_txstate { WBCIR_TXSTATE_INACTIVE = 0, WBCIR_TXSTATE_ACTIVE, WBCIR_TXSTATE_ERROR }; /* Misc */ #define WBCIR_NAME "Winbond CIR" #define WBCIR_ID_FAMILY 0xF1 /* Family ID for the WPCD376I */ #define WBCIR_ID_CHIP 0x04 /* Chip ID for the WPCD376I */ #define INVALID_SCANCODE 0x7FFFFFFF /* Invalid with all protos */ #define WAKEUP_IOMEM_LEN 0x10 /* Wake-Up I/O Reg Len */ #define EHFUNC_IOMEM_LEN 0x10 /* Enhanced Func I/O Reg Len */ #define SP_IOMEM_LEN 0x08 /* Serial Port 3 (IR) Reg Len */ /* Per-device data */ struct wbcir_data { spinlock_t spinlock; struct rc_dev *dev; struct led_classdev led; unsigned long wbase; /* Wake-Up Baseaddr */ unsigned long ebase; /* Enhanced Func. Baseaddr */ unsigned long sbase; /* Serial Port Baseaddr */ unsigned int irq; /* Serial Port IRQ */ u8 irqmask; /* RX state */ enum wbcir_rxstate rxstate; struct led_trigger *rxtrigger; int carrier_report_enabled; u32 pulse_duration; /* TX state */ enum wbcir_txstate txstate; struct led_trigger *txtrigger; u32 txlen; u32 txoff; u32 *txbuf; u8 txmask; u32 txcarrier; }; static enum wbcir_protocol protocol = IR_PROTOCOL_RC6; module_param(protocol, uint, 0444); MODULE_PARM_DESC(protocol, "IR protocol to use for the power-on command " "(0 = RC5, 1 = NEC, 2 = RC6A, default)"); static bool invert; /* default = 0 */ module_param(invert, bool, 0444); MODULE_PARM_DESC(invert, "Invert the signal from the IR receiver"); static bool txandrx; /* default = 0 */ module_param(txandrx, bool, 0444); MODULE_PARM_DESC(txandrx, "Allow simultaneous TX and RX"); static unsigned int wake_sc = 0x800F040C; module_param(wake_sc, uint, 0644); MODULE_PARM_DESC(wake_sc, "Scancode of the power-on IR command"); static unsigned int wake_rc6mode = 6; module_param(wake_rc6mode, uint, 0644); MODULE_PARM_DESC(wake_rc6mode, "RC6 mode for the power-on command " "(0 = 0, 6 = 6A, default)"); /***************************************************************************** * * UTILITY FUNCTIONS * *****************************************************************************/ /* Caller needs to hold wbcir_lock */ static void wbcir_set_bits(unsigned long addr, u8 bits, u8 mask) { u8 val; val = inb(addr); val = ((val & ~mask) | (bits & mask)); outb(val, addr); } /* Selects the register bank for the serial port */ static inline void wbcir_select_bank(struct wbcir_data *data, enum wbcir_bank bank) { outb(bank, data->sbase + WBCIR_REG_SP3_BSR); } static inline void wbcir_set_irqmask(struct wbcir_data *data, u8 irqmask) { if (data->irqmask == irqmask) return; wbcir_select_bank(data, WBCIR_BANK_0); outb(irqmask, data->sbase + WBCIR_REG_SP3_IER); data->irqmask = irqmask; } static enum led_brightness wbcir_led_brightness_get(struct led_classdev *led_cdev) { struct wbcir_data *data = container_of(led_cdev, struct wbcir_data, led); if (inb(data->ebase + WBCIR_REG_ECEIR_CTS) & WBCIR_LED_ENABLE) return LED_FULL; else return LED_OFF; } static void wbcir_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct wbcir_data *data = container_of(led_cdev, struct wbcir_data, led); wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CTS, brightness == LED_OFF ? 0x00 : WBCIR_LED_ENABLE, WBCIR_LED_ENABLE); } /* Manchester encodes bits to RC6 message cells (see wbcir_shutdown) */ static u8 wbcir_to_rc6cells(u8 val) { u8 coded = 0x00; int i; val &= 0x0F; for (i = 0; i < 4; i++) { if (val & 0x01) coded |= 0x02 << (i * 2); else coded |= 0x01 << (i * 2); val >>= 1; } return coded; } /***************************************************************************** * * INTERRUPT FUNCTIONS * *****************************************************************************/ static void wbcir_carrier_report(struct wbcir_data *data) { unsigned counter = inb(data->ebase + WBCIR_REG_ECEIR_CNT_LO) | inb(data->ebase + WBCIR_REG_ECEIR_CNT_HI) << 8; if (counter > 0 && counter < 0xffff) { DEFINE_IR_RAW_EVENT(ev); ev.carrier_report = 1; ev.carrier = DIV_ROUND_CLOSEST(counter * 1000000u, data->pulse_duration); ir_raw_event_store(data->dev, &ev); } /* reset and restart the counter */ data->pulse_duration = 0; wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_R, WBCIR_CNTR_EN | WBCIR_CNTR_R); wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_EN, WBCIR_CNTR_EN | WBCIR_CNTR_R); } static void wbcir_idle_rx(struct rc_dev *dev, bool idle) { struct wbcir_data *data = dev->priv; if (!idle && data->rxstate == WBCIR_RXSTATE_INACTIVE) { data->rxstate = WBCIR_RXSTATE_ACTIVE; led_trigger_event(data->rxtrigger, LED_FULL); } if (idle && data->rxstate != WBCIR_RXSTATE_INACTIVE) { data->rxstate = WBCIR_RXSTATE_INACTIVE; led_trigger_event(data->rxtrigger, LED_OFF); if (data->carrier_report_enabled) wbcir_carrier_report(data); /* Tell hardware to go idle by setting RXINACTIVE */ outb(WBCIR_RX_DISABLE, data->sbase + WBCIR_REG_SP3_ASCR); } } static void wbcir_irq_rx(struct wbcir_data *data, struct pnp_dev *device) { u8 irdata; DEFINE_IR_RAW_EVENT(rawir); unsigned duration; /* Since RXHDLEV is set, at least 8 bytes are in the FIFO */ while (inb(data->sbase + WBCIR_REG_SP3_LSR) & WBCIR_RX_AVAIL) { irdata = inb(data->sbase + WBCIR_REG_SP3_RXDATA); if (data->rxstate == WBCIR_RXSTATE_ERROR) continue; duration = ((irdata & 0x7F) + 1) * (data->carrier_report_enabled ? 2 : 10); rawir.pulse = irdata & 0x80 ? false : true; rawir.duration = US_TO_NS(duration); if (rawir.pulse) data->pulse_duration += duration; ir_raw_event_store_with_filter(data->dev, &rawir); } ir_raw_event_handle(data->dev); } static void wbcir_irq_tx(struct wbcir_data *data) { unsigned int space; unsigned int used; u8 bytes[16]; u8 byte; if (!data->txbuf) return; switch (data->txstate) { case WBCIR_TXSTATE_INACTIVE: /* TX FIFO empty */ space = 16; led_trigger_event(data->txtrigger, LED_FULL); break; case WBCIR_TXSTATE_ACTIVE: /* TX FIFO low (3 bytes or less) */ space = 13; break; case WBCIR_TXSTATE_ERROR: space = 0; break; default: return; } /* * TX data is run-length coded in bytes: YXXXXXXX * Y = space (1) or pulse (0) * X = duration, encoded as (X + 1) * 10us (i.e 10 to 1280 us) */ for (used = 0; used < space && data->txoff != data->txlen; used++) { if (data->txbuf[data->txoff] == 0) { data->txoff++; continue; } byte = min((u32)0x80, data->txbuf[data->txoff]); data->txbuf[data->txoff] -= byte; byte--; byte |= (data->txoff % 2 ? 0x80 : 0x00); /* pulse/space */ bytes[used] = byte; } while (data->txbuf[data->txoff] == 0 && data->txoff != data->txlen) data->txoff++; if (used == 0) { /* Finished */ if (data->txstate == WBCIR_TXSTATE_ERROR) /* Clear TX underrun bit */ outb(WBCIR_TX_UNDERRUN, data->sbase + WBCIR_REG_SP3_ASCR); wbcir_set_irqmask(data, WBCIR_IRQ_RX | WBCIR_IRQ_ERR); led_trigger_event(data->txtrigger, LED_OFF); kfree(data->txbuf); data->txbuf = NULL; data->txstate = WBCIR_TXSTATE_INACTIVE; } else if (data->txoff == data->txlen) { /* At the end of transmission, tell the hw before last byte */ outsb(data->sbase + WBCIR_REG_SP3_TXDATA, bytes, used - 1); outb(WBCIR_TX_EOT, data->sbase + WBCIR_REG_SP3_ASCR); outb(bytes[used - 1], data->sbase + WBCIR_REG_SP3_TXDATA); wbcir_set_irqmask(data, WBCIR_IRQ_RX | WBCIR_IRQ_ERR | WBCIR_IRQ_TX_EMPTY); } else { /* More data to follow... */ outsb(data->sbase + WBCIR_REG_SP3_RXDATA, bytes, used); if (data->txstate == WBCIR_TXSTATE_INACTIVE) { wbcir_set_irqmask(data, WBCIR_IRQ_RX | WBCIR_IRQ_ERR | WBCIR_IRQ_TX_LOW); data->txstate = WBCIR_TXSTATE_ACTIVE; } } } static irqreturn_t wbcir_irq_handler(int irqno, void *cookie) { struct pnp_dev *device = cookie; struct wbcir_data *data = pnp_get_drvdata(device); unsigned long flags; u8 status; spin_lock_irqsave(&data->spinlock, flags); wbcir_select_bank(data, WBCIR_BANK_0); status = inb(data->sbase + WBCIR_REG_SP3_EIR); status &= data->irqmask; if (!status) { spin_unlock_irqrestore(&data->spinlock, flags); return IRQ_NONE; } if (status & WBCIR_IRQ_ERR) { /* RX overflow? (read clears bit) */ if (inb(data->sbase + WBCIR_REG_SP3_LSR) & WBCIR_RX_OVERRUN) { data->rxstate = WBCIR_RXSTATE_ERROR; ir_raw_event_reset(data->dev); } /* TX underflow? */ if (inb(data->sbase + WBCIR_REG_SP3_ASCR) & WBCIR_TX_UNDERRUN) data->txstate = WBCIR_TXSTATE_ERROR; } if (status & WBCIR_IRQ_RX) wbcir_irq_rx(data, device); if (status & (WBCIR_IRQ_TX_LOW | WBCIR_IRQ_TX_EMPTY)) wbcir_irq_tx(data); spin_unlock_irqrestore(&data->spinlock, flags); return IRQ_HANDLED; } /***************************************************************************** * * RC-CORE INTERFACE FUNCTIONS * *****************************************************************************/ static int wbcir_set_carrier_report(struct rc_dev *dev, int enable) { struct wbcir_data *data = dev->priv; unsigned long flags; spin_lock_irqsave(&data->spinlock, flags); if (data->carrier_report_enabled == enable) { spin_unlock_irqrestore(&data->spinlock, flags); return 0; } data->pulse_duration = 0; wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_R, WBCIR_CNTR_EN | WBCIR_CNTR_R); if (enable && data->dev->idle) wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_EN, WBCIR_CNTR_EN | WBCIR_CNTR_R); /* Set a higher sampling resolution if carrier reports are enabled */ wbcir_select_bank(data, WBCIR_BANK_2); data->dev->rx_resolution = US_TO_NS(enable ? 2 : 10); outb(enable ? 0x03 : 0x0f, data->sbase + WBCIR_REG_SP3_BGDL); outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH); /* Enable oversampling if carrier reports are enabled */ wbcir_select_bank(data, WBCIR_BANK_7); wbcir_set_bits(data->sbase + WBCIR_REG_SP3_RCCFG, enable ? WBCIR_RX_T_OV : 0, WBCIR_RX_T_OV); data->carrier_report_enabled = enable; spin_unlock_irqrestore(&data->spinlock, flags); return 0; } static int wbcir_txcarrier(struct rc_dev *dev, u32 carrier) { struct wbcir_data *data = dev->priv; unsigned long flags; u8 val; u32 freq; freq = DIV_ROUND_CLOSEST(carrier, 1000); if (freq < 30 || freq > 60) return -EINVAL; switch (freq) { case 58: case 59: case 60: val = freq - 58; freq *= 1000; break; case 57: val = freq - 27; freq = 56900; break; default: val = freq - 27; freq *= 1000; break; } spin_lock_irqsave(&data->spinlock, flags); if (data->txstate != WBCIR_TXSTATE_INACTIVE) { spin_unlock_irqrestore(&data->spinlock, flags); return -EBUSY; } if (data->txcarrier != freq) { wbcir_select_bank(data, WBCIR_BANK_7); wbcir_set_bits(data->sbase + WBCIR_REG_SP3_IRTXMC, val, 0x1F); data->txcarrier = freq; } spin_unlock_irqrestore(&data->spinlock, flags); return 0; } static int wbcir_txmask(struct rc_dev *dev, u32 mask) { struct wbcir_data *data = dev->priv; unsigned long flags; u8 val; /* Four outputs, only one output can be enabled at a time */ switch (mask) { case 0x1: val = 0x0; break; case 0x2: val = 0x1; break; case 0x4: val = 0x2; break; case 0x8: val = 0x3; break; default: return -EINVAL; } spin_lock_irqsave(&data->spinlock, flags); if (data->txstate != WBCIR_TXSTATE_INACTIVE) { spin_unlock_irqrestore(&data->spinlock, flags); return -EBUSY; } if (data->txmask != mask) { wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CTS, val, 0x0c); data->txmask = mask; } spin_unlock_irqrestore(&data->spinlock, flags); return 0; } static int wbcir_tx(struct rc_dev *dev, unsigned *b, unsigned count) { struct wbcir_data *data = dev->priv; unsigned *buf; unsigned i; unsigned long flags; buf = kmalloc(count * sizeof(*b), GFP_KERNEL); if (!buf) return -ENOMEM; /* Convert values to multiples of 10us */ for (i = 0; i < count; i++) buf[i] = DIV_ROUND_CLOSEST(b[i], 10); /* Not sure if this is possible, but better safe than sorry */ spin_lock_irqsave(&data->spinlock, flags); if (data->txstate != WBCIR_TXSTATE_INACTIVE) { spin_unlock_irqrestore(&data->spinlock, flags); kfree(buf); return -EBUSY; } /* Fill the TX fifo once, the irq handler will do the rest */ data->txbuf = buf; data->txlen = count; data->txoff = 0; wbcir_irq_tx(data); /* We're done */ spin_unlock_irqrestore(&data->spinlock, flags); return count; } /***************************************************************************** * * SETUP/INIT/SUSPEND/RESUME FUNCTIONS * *****************************************************************************/ static void wbcir_shutdown(struct pnp_dev *device) { struct device *dev = &device->dev; struct wbcir_data *data = pnp_get_drvdata(device); bool do_wake = true; u8 match[11]; u8 mask[11]; u8 rc6_csl = 0; int i; memset(match, 0, sizeof(match)); memset(mask, 0, sizeof(mask)); if (wake_sc == INVALID_SCANCODE || !device_may_wakeup(dev)) { do_wake = false; goto finish; } switch (protocol) { case IR_PROTOCOL_RC5: if (wake_sc > 0xFFF) { do_wake = false; dev_err(dev, "RC5 - Invalid wake scancode\n"); break; } /* Mask = 13 bits, ex toggle */ mask[0] = 0xFF; mask[1] = 0x17; match[0] = (wake_sc & 0x003F); /* 6 command bits */ match[0] |= (wake_sc & 0x0180) >> 1; /* 2 address bits */ match[1] = (wake_sc & 0x0E00) >> 9; /* 3 address bits */ if (!(wake_sc & 0x0040)) /* 2nd start bit */ match[1] |= 0x10; break; case IR_PROTOCOL_NEC: if (wake_sc > 0xFFFFFF) { do_wake = false; dev_err(dev, "NEC - Invalid wake scancode\n"); break; } mask[0] = mask[1] = mask[2] = mask[3] = 0xFF; match[1] = bitrev8((wake_sc & 0xFF)); match[0] = ~match[1]; match[3] = bitrev8((wake_sc & 0xFF00) >> 8); if (wake_sc > 0xFFFF) match[2] = bitrev8((wake_sc & 0xFF0000) >> 16); else match[2] = ~match[3]; break; case IR_PROTOCOL_RC6: if (wake_rc6mode == 0) { if (wake_sc > 0xFFFF) { do_wake = false; dev_err(dev, "RC6 - Invalid wake scancode\n"); break; } /* Command */ match[0] = wbcir_to_rc6cells(wake_sc >> 0); mask[0] = 0xFF; match[1] = wbcir_to_rc6cells(wake_sc >> 4); mask[1] = 0xFF; /* Address */ match[2] = wbcir_to_rc6cells(wake_sc >> 8); mask[2] = 0xFF; match[3] = wbcir_to_rc6cells(wake_sc >> 12); mask[3] = 0xFF; /* Header */ match[4] = 0x50; /* mode1 = mode0 = 0, ignore toggle */ mask[4] = 0xF0; match[5] = 0x09; /* start bit = 1, mode2 = 0 */ mask[5] = 0x0F; rc6_csl = 44; } else if (wake_rc6mode == 6) { i = 0; /* Command */ match[i] = wbcir_to_rc6cells(wake_sc >> 0); mask[i++] = 0xFF; match[i] = wbcir_to_rc6cells(wake_sc >> 4); mask[i++] = 0xFF; /* Address + Toggle */ match[i] = wbcir_to_rc6cells(wake_sc >> 8); mask[i++] = 0xFF; match[i] = wbcir_to_rc6cells(wake_sc >> 12); mask[i++] = 0x3F; /* Customer bits 7 - 0 */ match[i] = wbcir_to_rc6cells(wake_sc >> 16); mask[i++] = 0xFF; match[i] = wbcir_to_rc6cells(wake_sc >> 20); mask[i++] = 0xFF; if (wake_sc & 0x80000000) { /* Customer range bit and bits 15 - 8 */ match[i] = wbcir_to_rc6cells(wake_sc >> 24); mask[i++] = 0xFF; match[i] = wbcir_to_rc6cells(wake_sc >> 28); mask[i++] = 0xFF; rc6_csl = 76; } else if (wake_sc <= 0x007FFFFF) { rc6_csl = 60; } else { do_wake = false; dev_err(dev, "RC6 - Invalid wake scancode\n"); break; } /* Header */ match[i] = 0x93; /* mode1 = mode0 = 1, submode = 0 */ mask[i++] = 0xFF; match[i] = 0x0A; /* start bit = 1, mode2 = 1 */ mask[i++] = 0x0F; } else { do_wake = false; dev_err(dev, "RC6 - Invalid wake mode\n"); } break; default: do_wake = false; break; } finish: if (do_wake) { /* Set compare and compare mask */ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_INDEX, WBCIR_REGSEL_COMPARE | WBCIR_REG_ADDR0, 0x3F); outsb(data->wbase + WBCIR_REG_WCEIR_DATA, match, 11); wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_INDEX, WBCIR_REGSEL_MASK | WBCIR_REG_ADDR0, 0x3F); outsb(data->wbase + WBCIR_REG_WCEIR_DATA, mask, 11); /* RC6 Compare String Len */ outb(rc6_csl, data->wbase + WBCIR_REG_WCEIR_CSL); /* Clear status bits NEC_REP, BUFF, MSG_END, MATCH */ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_STS, 0x17, 0x17); /* Clear BUFF_EN, Clear END_EN, Set MATCH_EN */ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_EV_EN, 0x01, 0x07); /* Set CEIR_EN */ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CTL, 0x01, 0x01); } else { /* Clear BUFF_EN, Clear END_EN, Clear MATCH_EN */ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_EV_EN, 0x00, 0x07); /* Clear CEIR_EN */ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CTL, 0x00, 0x01); } /* * ACPI will set the HW disable bit for SP3 which means that the * output signals are left in an undefined state which may cause * spurious interrupts which we need to ignore until the hardware * is reinitialized. */ wbcir_set_irqmask(data, WBCIR_IRQ_NONE); disable_irq(data->irq); /* Disable LED */ led_trigger_event(data->rxtrigger, LED_OFF); led_trigger_event(data->txtrigger, LED_OFF); } static int wbcir_suspend(struct pnp_dev *device, pm_message_t state) { wbcir_shutdown(device); return 0; } static void wbcir_init_hw(struct wbcir_data *data) { u8 tmp; /* Disable interrupts */ wbcir_set_irqmask(data, WBCIR_IRQ_NONE); /* Set PROT_SEL, RX_INV, Clear CEIR_EN (needed for the led) */ tmp = protocol << 4; if (invert) tmp |= 0x08; outb(tmp, data->wbase + WBCIR_REG_WCEIR_CTL); /* Clear status bits NEC_REP, BUFF, MSG_END, MATCH */ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_STS, 0x17, 0x17); /* Clear BUFF_EN, Clear END_EN, Clear MATCH_EN */ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_EV_EN, 0x00, 0x07); /* Set RC5 cell time to correspond to 36 kHz */ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CFG1, 0x4A, 0x7F); /* Set IRTX_INV */ if (invert) outb(WBCIR_IRTX_INV, data->ebase + WBCIR_REG_ECEIR_CCTL); else outb(0x00, data->ebase + WBCIR_REG_ECEIR_CCTL); /* * Clear IR LED, set SP3 clock to 24Mhz, set TX mask to IRTX1, * set SP3_IRRX_SW to binary 01, helpfully not documented */ outb(0x10, data->ebase + WBCIR_REG_ECEIR_CTS); data->txmask = 0x1; /* Enable extended mode */ wbcir_select_bank(data, WBCIR_BANK_2); outb(WBCIR_EXT_ENABLE, data->sbase + WBCIR_REG_SP3_EXCR1); /* * Configure baud generator, IR data will be sampled at * a bitrate of: (24Mhz * prescaler) / (divisor * 16). * * The ECIR registers include a flag to change the * 24Mhz clock freq to 48Mhz. * * It's not documented in the specs, but fifo levels * other than 16 seems to be unsupported. */ /* prescaler 1.0, tx/rx fifo lvl 16 */ outb(0x30, data->sbase + WBCIR_REG_SP3_EXCR2); /* Set baud divisor to sample every 10 us */ outb(0x0f, data->sbase + WBCIR_REG_SP3_BGDL); outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH); /* Set CEIR mode */ wbcir_select_bank(data, WBCIR_BANK_0); outb(0xC0, data->sbase + WBCIR_REG_SP3_MCR); inb(data->sbase + WBCIR_REG_SP3_LSR); /* Clear LSR */ inb(data->sbase + WBCIR_REG_SP3_MSR); /* Clear MSR */ /* Disable RX demod, enable run-length enc/dec, set freq span */ wbcir_select_bank(data, WBCIR_BANK_7); outb(0x90, data->sbase + WBCIR_REG_SP3_RCCFG); /* Disable timer */ wbcir_select_bank(data, WBCIR_BANK_4); outb(0x00, data->sbase + WBCIR_REG_SP3_IRCR1); /* Disable MSR interrupt, clear AUX_IRX, mask RX during TX? */ wbcir_select_bank(data, WBCIR_BANK_5); outb(txandrx ? 0x03 : 0x02, data->sbase + WBCIR_REG_SP3_IRCR2); /* Disable CRC */ wbcir_select_bank(data, WBCIR_BANK_6); outb(0x20, data->sbase + WBCIR_REG_SP3_IRCR3); /* Set RX demodulation freq, not really used */ wbcir_select_bank(data, WBCIR_BANK_7); outb(0xF2, data->sbase + WBCIR_REG_SP3_IRRXDC); /* Set TX modulation, 36kHz, 7us pulse width */ outb(0x69, data->sbase + WBCIR_REG_SP3_IRTXMC); data->txcarrier = 36000; /* Set invert and pin direction */ if (invert) outb(0x10, data->sbase + WBCIR_REG_SP3_IRCFG4); else outb(0x00, data->sbase + WBCIR_REG_SP3_IRCFG4); /* Set FIFO thresholds (RX = 8, TX = 3), reset RX/TX */ wbcir_select_bank(data, WBCIR_BANK_0); outb(0x97, data->sbase + WBCIR_REG_SP3_FCR); /* Clear AUX status bits */ outb(0xE0, data->sbase + WBCIR_REG_SP3_ASCR); /* Clear RX state */ data->rxstate = WBCIR_RXSTATE_INACTIVE; ir_raw_event_reset(data->dev); ir_raw_event_set_idle(data->dev, true); /* Clear TX state */ if (data->txstate == WBCIR_TXSTATE_ACTIVE) { kfree(data->txbuf); data->txbuf = NULL; data->txstate = WBCIR_TXSTATE_INACTIVE; } /* Enable interrupts */ wbcir_set_irqmask(data, WBCIR_IRQ_RX | WBCIR_IRQ_ERR); } static int wbcir_resume(struct pnp_dev *device) { struct wbcir_data *data = pnp_get_drvdata(device); wbcir_init_hw(data); enable_irq(data->irq); return 0; } static int wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id) { struct device *dev = &device->dev; struct wbcir_data *data; int err; if (!(pnp_port_len(device, 0) == EHFUNC_IOMEM_LEN && pnp_port_len(device, 1) == WAKEUP_IOMEM_LEN && pnp_port_len(device, 2) == SP_IOMEM_LEN)) { dev_err(dev, "Invalid resources\n"); return -ENODEV; } data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } pnp_set_drvdata(device, data); spin_lock_init(&data->spinlock); data->ebase = pnp_port_start(device, 0); data->wbase = pnp_port_start(device, 1); data->sbase = pnp_port_start(device, 2); data->irq = pnp_irq(device, 0); if (data->wbase == 0 || data->ebase == 0 || data->sbase == 0 || data->irq == 0) { err = -ENODEV; dev_err(dev, "Invalid resources\n"); goto exit_free_data; } dev_dbg(&device->dev, "Found device " "(w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n", data->wbase, data->ebase, data->sbase, data->irq); led_trigger_register_simple("cir-tx", &data->txtrigger); if (!data->txtrigger) { err = -ENOMEM; goto exit_free_data; } led_trigger_register_simple("cir-rx", &data->rxtrigger); if (!data->rxtrigger) { err = -ENOMEM; goto exit_unregister_txtrigger; } data->led.name = "cir::activity"; data->led.default_trigger = "cir-rx"; data->led.brightness_set = wbcir_led_brightness_set; data->led.brightness_get = wbcir_led_brightness_get; err = led_classdev_register(&device->dev, &data->led); if (err) goto exit_unregister_rxtrigger; data->dev = rc_allocate_device(); if (!data->dev) { err = -ENOMEM; goto exit_unregister_led; } data->dev->driver_type = RC_DRIVER_IR_RAW; data->dev->driver_name = DRVNAME; data->dev->input_name = WBCIR_NAME; data->dev->input_phys = "wbcir/cir0"; data->dev->input_id.bustype = BUS_HOST; data->dev->input_id.vendor = PCI_VENDOR_ID_WINBOND; data->dev->input_id.product = WBCIR_ID_FAMILY; data->dev->input_id.version = WBCIR_ID_CHIP; data->dev->map_name = RC_MAP_RC6_MCE; data->dev->s_idle = wbcir_idle_rx; data->dev->s_carrier_report = wbcir_set_carrier_report; data->dev->s_tx_mask = wbcir_txmask; data->dev->s_tx_carrier = wbcir_txcarrier; data->dev->tx_ir = wbcir_tx; data->dev->priv = data; data->dev->dev.parent = &device->dev; data->dev->timeout = MS_TO_NS(100); data->dev->rx_resolution = US_TO_NS(2); data->dev->allowed_protos = RC_BIT_ALL; err = rc_register_device(data->dev); if (err) goto exit_free_rc; if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) { dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1); err = -EBUSY; goto exit_unregister_device; } if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) { dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1); err = -EBUSY; goto exit_release_wbase; } if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) { dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", data->sbase, data->sbase + SP_IOMEM_LEN - 1); err = -EBUSY; goto exit_release_ebase; } err = request_irq(data->irq, wbcir_irq_handler, IRQF_DISABLED, DRVNAME, device); if (err) { dev_err(dev, "Failed to claim IRQ %u\n", data->irq); err = -EBUSY; goto exit_release_sbase; } device_init_wakeup(&device->dev, 1); wbcir_init_hw(data); return 0; exit_release_sbase: release_region(data->sbase, SP_IOMEM_LEN); exit_release_ebase: release_region(data->ebase, EHFUNC_IOMEM_LEN); exit_release_wbase: release_region(data->wbase, WAKEUP_IOMEM_LEN); exit_unregister_device: rc_unregister_device(data->dev); data->dev = NULL; exit_free_rc: rc_free_device(data->dev); exit_unregister_led: led_classdev_unregister(&data->led); exit_unregister_rxtrigger: led_trigger_unregister_simple(data->rxtrigger); exit_unregister_txtrigger: led_trigger_unregister_simple(data->txtrigger); exit_free_data: kfree(data); pnp_set_drvdata(device, NULL); exit: return err; } static void wbcir_remove(struct pnp_dev *device) { struct wbcir_data *data = pnp_get_drvdata(device); /* Disable interrupts */ wbcir_set_irqmask(data, WBCIR_IRQ_NONE); free_irq(data->irq, device); /* Clear status bits NEC_REP, BUFF, MSG_END, MATCH */ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_STS, 0x17, 0x17); /* Clear CEIR_EN */ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CTL, 0x00, 0x01); /* Clear BUFF_EN, END_EN, MATCH_EN */ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_EV_EN, 0x00, 0x07); rc_unregister_device(data->dev); led_trigger_unregister_simple(data->rxtrigger); led_trigger_unregister_simple(data->txtrigger); led_classdev_unregister(&data->led); /* This is ok since &data->led isn't actually used */ wbcir_led_brightness_set(&data->led, LED_OFF); release_region(data->wbase, WAKEUP_IOMEM_LEN); release_region(data->ebase, EHFUNC_IOMEM_LEN); release_region(data->sbase, SP_IOMEM_LEN); kfree(data); pnp_set_drvdata(device, NULL); } static const struct pnp_device_id wbcir_ids[] = { { "WEC1022", 0 }, { "", 0 } }; MODULE_DEVICE_TABLE(pnp, wbcir_ids); static struct pnp_driver wbcir_driver = { .name = WBCIR_NAME, .id_table = wbcir_ids, .probe = wbcir_probe, .remove = wbcir_remove, .suspend = wbcir_suspend, .resume = wbcir_resume, .shutdown = wbcir_shutdown }; static int __init wbcir_init(void) { int ret; switch (protocol) { case IR_PROTOCOL_RC5: case IR_PROTOCOL_NEC: case IR_PROTOCOL_RC6: break; default: pr_err("Invalid power-on protocol\n"); } ret = pnp_register_driver(&wbcir_driver); if (ret) pr_err("Unable to register driver\n"); return ret; } static void __exit wbcir_exit(void) { pnp_unregister_driver(&wbcir_driver); } module_init(wbcir_init); module_exit(wbcir_exit); MODULE_AUTHOR("David Härdeman <david@hardeman.nu>"); MODULE_DESCRIPTION("Winbond SuperI/O Consumer IR Driver"); MODULE_LICENSE("GPL");
gpl-2.0
jeboo/kernel-msm
drivers/staging/usbip/userspace/libsrc/vhci_driver.c
2158
11346
/* * Copyright (C) 2005-2007 Takahiro Hirofuchi */ #include "usbip_common.h" #include "vhci_driver.h" #undef PROGNAME #define PROGNAME "libusbip" struct usbip_vhci_driver *vhci_driver; static struct usbip_imported_device * imported_device_init(struct usbip_imported_device *idev, char *busid) { struct sysfs_device *sudev; sudev = sysfs_open_device("usb", busid); if (!sudev) { dbg("sysfs_open_device failed: %s", busid); goto err; } read_usb_device(sudev, &idev->udev); sysfs_close_device(sudev); /* add class devices of this imported device */ struct usbip_class_device *cdev; dlist_for_each_data(vhci_driver->cdev_list, cdev, struct usbip_class_device) { if (!strncmp(cdev->dev_path, idev->udev.path, strlen(idev->udev.path))) { struct usbip_class_device *new_cdev; /* * alloc and copy because dlist is linked * from only one list */ new_cdev = calloc(1, sizeof(*new_cdev)); if (!new_cdev) goto err; memcpy(new_cdev, cdev, sizeof(*new_cdev)); dlist_unshift(idev->cdev_list, (void *) new_cdev); } } return idev; err: return NULL; } static int parse_status(char *value) { int ret = 0; char *c; for (int i = 0; i < vhci_driver->nports; i++) memset(&vhci_driver->idev[i], 0, sizeof(vhci_driver->idev[i])); /* skip a header line */ c = strchr(value, '\n'); if (!c) return -1; c++; while (*c != '\0') { int port, status, speed, devid; unsigned long socket; char lbusid[SYSFS_BUS_ID_SIZE]; ret = sscanf(c, "%d %d %d %x %lx %s\n", &port, &status, &speed, &devid, &socket, lbusid); if (ret < 5) { dbg("sscanf failed: %d", ret); BUG(); } dbg("port %d status %d speed %d devid %x", port, status, speed, devid); dbg("socket %lx lbusid %s", socket, lbusid); /* if a device is connected, look at it */ { struct usbip_imported_device *idev = &vhci_driver->idev[port]; idev->port = port; idev->status = status; idev->devid = devid; idev->busnum = (devid >> 16); idev->devnum = (devid & 0x0000ffff); idev->cdev_list = dlist_new(sizeof(struct usbip_class_device)); if (!idev->cdev_list) { dbg("dlist_new failed"); return -1; } if (idev->status != VDEV_ST_NULL && idev->status != VDEV_ST_NOTASSIGNED) { idev = imported_device_init(idev, lbusid); if (!idev) { dbg("imported_device_init failed"); return -1; } } } /* go to the next line */ c = strchr(c, '\n'); if (!c) break; c++; } dbg("exit"); return 0; } static int check_usbip_device(struct sysfs_class_device *cdev) { /* /sys/class/video4linux/video0/device */ char class_path[SYSFS_PATH_MAX]; /* /sys/devices/platform/vhci_hcd/usb6/6-1:1.1 */ char dev_path[SYSFS_PATH_MAX]; int ret; struct usbip_class_device *usbip_cdev; snprintf(class_path, sizeof(class_path), "%s/device", cdev->path); ret = sysfs_get_link(class_path, dev_path, sizeof(dev_path)); if (ret == 0) { if (!strncmp(dev_path, vhci_driver->hc_device->path, strlen(vhci_driver->hc_device->path))) { /* found usbip device */ usbip_cdev = calloc(1, sizeof(*usbip_cdev)); if (!usbip_cdev) { dbg("calloc failed"); return -1; } dlist_unshift(vhci_driver->cdev_list, usbip_cdev); strncpy(usbip_cdev->class_path, class_path, sizeof(usbip_cdev->class_path)); strncpy(usbip_cdev->dev_path, dev_path, sizeof(usbip_cdev->dev_path)); dbg("found: %s %s", class_path, dev_path); } } return 0; } static int search_class_for_usbip_device(char *cname) { struct sysfs_class *class; struct dlist *cdev_list; struct sysfs_class_device *cdev; int ret = 0; class = sysfs_open_class(cname); if (!class) { dbg("sysfs_open_class failed"); return -1; } dbg("class: %s", class->name); cdev_list = sysfs_get_class_devices(class); if (!cdev_list) /* nothing */ goto out; dlist_for_each_data(cdev_list, cdev, struct sysfs_class_device) { dbg("cdev: %s", cdev->name); ret = check_usbip_device(cdev); if (ret < 0) goto out; } out: sysfs_close_class(class); return ret; } static int refresh_class_device_list(void) { int ret; struct dlist *cname_list; char *cname; char sysfs_mntpath[SYSFS_PATH_MAX]; char class_path[SYSFS_PATH_MAX]; ret = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX); if (ret < 0) { dbg("sysfs_get_mnt_path failed"); return -1; } snprintf(class_path, sizeof(class_path), "%s/%s", sysfs_mntpath, SYSFS_CLASS_NAME); /* search under /sys/class */ cname_list = sysfs_open_directory_list(class_path); if (!cname_list) { dbg("sysfs_open_directory failed"); return -1; } dlist_for_each_data(cname_list, cname, char) { ret = search_class_for_usbip_device(cname); if (ret < 0) { sysfs_close_list(cname_list); return -1; } } sysfs_close_list(cname_list); /* seach under /sys/block */ ret = search_class_for_usbip_device(SYSFS_BLOCK_NAME); if (ret < 0) return -1; return 0; } static int refresh_imported_device_list(void) { struct sysfs_attribute *attr_status; attr_status = sysfs_get_device_attr(vhci_driver->hc_device, "status"); if (!attr_status) { dbg("sysfs_get_device_attr(\"status\") failed: %s", vhci_driver->hc_device->name); return -1; } dbg("name: %s path: %s len: %d method: %d value: %s", attr_status->name, attr_status->path, attr_status->len, attr_status->method, attr_status->value); return parse_status(attr_status->value); } static int get_nports(void) { char *c; int nports = 0; struct sysfs_attribute *attr_status; attr_status = sysfs_get_device_attr(vhci_driver->hc_device, "status"); if (!attr_status) { dbg("sysfs_get_device_attr(\"status\") failed: %s", vhci_driver->hc_device->name); return -1; } dbg("name: %s path: %s len: %d method: %d value: %s", attr_status->name, attr_status->path, attr_status->len, attr_status->method, attr_status->value); /* skip a header line */ c = strchr(attr_status->value, '\n'); if (!c) return 0; c++; while (*c != '\0') { /* go to the next line */ c = strchr(c, '\n'); if (!c) return nports; c++; nports += 1; } return nports; } static int get_hc_busid(char *sysfs_mntpath, char *hc_busid) { struct sysfs_driver *sdriver; char sdriver_path[SYSFS_PATH_MAX]; struct sysfs_device *hc_dev; struct dlist *hc_devs; int found = 0; snprintf(sdriver_path, SYSFS_PATH_MAX, "%s/%s/%s/%s/%s", sysfs_mntpath, SYSFS_BUS_NAME, USBIP_VHCI_BUS_TYPE, SYSFS_DRIVERS_NAME, USBIP_VHCI_DRV_NAME); sdriver = sysfs_open_driver_path(sdriver_path); if (!sdriver) { dbg("sysfs_open_driver_path failed: %s", sdriver_path); dbg("make sure " USBIP_CORE_MOD_NAME ".ko and " USBIP_VHCI_DRV_NAME ".ko are loaded!"); return -1; } hc_devs = sysfs_get_driver_devices(sdriver); if (!hc_devs) { dbg("sysfs_get_driver failed"); goto err; } /* assume only one vhci_hcd */ dlist_for_each_data(hc_devs, hc_dev, struct sysfs_device) { strncpy(hc_busid, hc_dev->bus_id, SYSFS_BUS_ID_SIZE); found = 1; } err: sysfs_close_driver(sdriver); if (found) return 0; dbg("%s not found", hc_busid); return -1; } /* ---------------------------------------------------------------------- */ int usbip_vhci_driver_open(void) { int ret; char hc_busid[SYSFS_BUS_ID_SIZE]; vhci_driver = (struct usbip_vhci_driver *) calloc(1, sizeof(*vhci_driver)); if (!vhci_driver) { dbg("calloc failed"); return -1; } ret = sysfs_get_mnt_path(vhci_driver->sysfs_mntpath, SYSFS_PATH_MAX); if (ret < 0) { dbg("sysfs_get_mnt_path failed"); goto err; } ret = get_hc_busid(vhci_driver->sysfs_mntpath, hc_busid); if (ret < 0) goto err; /* will be freed in usbip_driver_close() */ vhci_driver->hc_device = sysfs_open_device(USBIP_VHCI_BUS_TYPE, hc_busid); if (!vhci_driver->hc_device) { dbg("sysfs_open_device failed"); goto err; } vhci_driver->nports = get_nports(); dbg("available ports: %d", vhci_driver->nports); vhci_driver->cdev_list = dlist_new(sizeof(struct usbip_class_device)); if (!vhci_driver->cdev_list) goto err; if (refresh_class_device_list()) goto err; if (refresh_imported_device_list()) goto err; return 0; err: if (vhci_driver->cdev_list) dlist_destroy(vhci_driver->cdev_list); if (vhci_driver->hc_device) sysfs_close_device(vhci_driver->hc_device); if (vhci_driver) free(vhci_driver); vhci_driver = NULL; return -1; } void usbip_vhci_driver_close() { if (!vhci_driver) return; if (vhci_driver->cdev_list) dlist_destroy(vhci_driver->cdev_list); for (int i = 0; i < vhci_driver->nports; i++) { if (vhci_driver->idev[i].cdev_list) dlist_destroy(vhci_driver->idev[i].cdev_list); } if (vhci_driver->hc_device) sysfs_close_device(vhci_driver->hc_device); free(vhci_driver); vhci_driver = NULL; } int usbip_vhci_refresh_device_list(void) { if (vhci_driver->cdev_list) dlist_destroy(vhci_driver->cdev_list); for (int i = 0; i < vhci_driver->nports; i++) { if (vhci_driver->idev[i].cdev_list) dlist_destroy(vhci_driver->idev[i].cdev_list); } vhci_driver->cdev_list = dlist_new(sizeof(struct usbip_class_device)); if (!vhci_driver->cdev_list) goto err; if (refresh_class_device_list()) goto err; if (refresh_imported_device_list()) goto err; return 0; err: if (vhci_driver->cdev_list) dlist_destroy(vhci_driver->cdev_list); for (int i = 0; i < vhci_driver->nports; i++) { if (vhci_driver->idev[i].cdev_list) dlist_destroy(vhci_driver->idev[i].cdev_list); } dbg("failed to refresh device list"); return -1; } int usbip_vhci_get_free_port(void) { for (int i = 0; i < vhci_driver->nports; i++) { if (vhci_driver->idev[i].status == VDEV_ST_NULL) return i; } return -1; } int usbip_vhci_attach_device2(uint8_t port, int sockfd, uint32_t devid, uint32_t speed) { struct sysfs_attribute *attr_attach; char buff[200]; /* what size should be ? */ int ret; attr_attach = sysfs_get_device_attr(vhci_driver->hc_device, "attach"); if (!attr_attach) { dbg("sysfs_get_device_attr(\"attach\") failed: %s", vhci_driver->hc_device->name); return -1; } snprintf(buff, sizeof(buff), "%u %u %u %u", port, sockfd, devid, speed); dbg("writing: %s", buff); ret = sysfs_write_attribute(attr_attach, buff, strlen(buff)); if (ret < 0) { dbg("sysfs_write_attribute failed"); return -1; } dbg("attached port: %d", port); return 0; } static unsigned long get_devid(uint8_t busnum, uint8_t devnum) { return (busnum << 16) | devnum; } /* will be removed */ int usbip_vhci_attach_device(uint8_t port, int sockfd, uint8_t busnum, uint8_t devnum, uint32_t speed) { int devid = get_devid(busnum, devnum); return usbip_vhci_attach_device2(port, sockfd, devid, speed); } int usbip_vhci_detach_device(uint8_t port) { struct sysfs_attribute *attr_detach; char buff[200]; /* what size should be ? */ int ret; attr_detach = sysfs_get_device_attr(vhci_driver->hc_device, "detach"); if (!attr_detach) { dbg("sysfs_get_device_attr(\"detach\") failed: %s", vhci_driver->hc_device->name); return -1; } snprintf(buff, sizeof(buff), "%u", port); dbg("writing: %s", buff); ret = sysfs_write_attribute(attr_detach, buff, strlen(buff)); if (ret < 0) { dbg("sysfs_write_attribute failed"); return -1; } dbg("detached port: %d", port); return 0; }
gpl-2.0
sebirdman/kernel-msm
drivers/iio/adc/ad7923.c
2158
9672
/* * AD7904/AD7914/AD7923/AD7924 SPI ADC driver * * Copyright 2011 Analog Devices Inc (from AD7923 Driver) * Copyright 2012 CS Systemes d'Information * * Licensed under the GPL-2. */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/spi/spi.h> #include <linux/regulator/consumer.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/buffer.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #define AD7923_WRITE_CR (1 << 11) /* write control register */ #define AD7923_RANGE (1 << 1) /* range to REFin */ #define AD7923_CODING (1 << 0) /* coding is straight binary */ #define AD7923_PM_MODE_AS (1) /* auto shutdown */ #define AD7923_PM_MODE_FS (2) /* full shutdown */ #define AD7923_PM_MODE_OPS (3) /* normal operation */ #define AD7923_CHANNEL_0 (0) /* analog input 0 */ #define AD7923_CHANNEL_1 (1) /* analog input 1 */ #define AD7923_CHANNEL_2 (2) /* analog input 2 */ #define AD7923_CHANNEL_3 (3) /* analog input 3 */ #define AD7923_SEQUENCE_OFF (0) /* no sequence fonction */ #define AD7923_SEQUENCE_PROTECT (2) /* no interrupt write cycle */ #define AD7923_SEQUENCE_ON (3) /* continuous sequence */ #define AD7923_MAX_CHAN 4 #define AD7923_PM_MODE_WRITE(mode) (mode << 4) /* write mode */ #define AD7923_CHANNEL_WRITE(channel) (channel << 6) /* write channel */ #define AD7923_SEQUENCE_WRITE(sequence) (((sequence & 1) << 3) \ + ((sequence & 2) << 9)) /* write sequence fonction */ /* left shift for CR : bit 11 transmit in first */ #define AD7923_SHIFT_REGISTER 4 /* val = value, dec = left shift, bits = number of bits of the mask */ #define EXTRACT(val, dec, bits) ((val >> dec) & ((1 << bits) - 1)) struct ad7923_state { struct spi_device *spi; struct spi_transfer ring_xfer[5]; struct spi_transfer scan_single_xfer[2]; struct spi_message ring_msg; struct spi_message scan_single_msg; struct regulator *reg; unsigned int settings; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ __be16 rx_buf[4] ____cacheline_aligned; __be16 tx_buf[4]; }; struct ad7923_chip_info { const struct iio_chan_spec *channels; unsigned int num_channels; }; enum ad7923_id { AD7904, AD7914, AD7924, }; #define AD7923_V_CHAN(index, bits) \ { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .channel = index, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ .address = index, \ .scan_index = index, \ .scan_type = { \ .sign = 'u', \ .realbits = (bits), \ .storagebits = 16, \ .endianness = IIO_BE, \ }, \ } #define DECLARE_AD7923_CHANNELS(name, bits) \ const struct iio_chan_spec name ## _channels[] = { \ AD7923_V_CHAN(0, bits), \ AD7923_V_CHAN(1, bits), \ AD7923_V_CHAN(2, bits), \ AD7923_V_CHAN(3, bits), \ IIO_CHAN_SOFT_TIMESTAMP(4), \ } static DECLARE_AD7923_CHANNELS(ad7904, 8); static DECLARE_AD7923_CHANNELS(ad7914, 10); static DECLARE_AD7923_CHANNELS(ad7924, 12); static const struct ad7923_chip_info ad7923_chip_info[] = { [AD7904] = { .channels = ad7904_channels, .num_channels = ARRAY_SIZE(ad7904_channels), }, [AD7914] = { .channels = ad7914_channels, .num_channels = ARRAY_SIZE(ad7914_channels), }, [AD7924] = { .channels = ad7924_channels, .num_channels = ARRAY_SIZE(ad7924_channels), }, }; /** * ad7923_update_scan_mode() setup the spi transfer buffer for the new scan mask **/ static int ad7923_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *active_scan_mask) { struct ad7923_state *st = iio_priv(indio_dev); int i, cmd, len; len = 0; for_each_set_bit(i, active_scan_mask, AD7923_MAX_CHAN) { cmd = AD7923_WRITE_CR | AD7923_CHANNEL_WRITE(i) | AD7923_SEQUENCE_WRITE(AD7923_SEQUENCE_OFF) | st->settings; cmd <<= AD7923_SHIFT_REGISTER; st->tx_buf[len++] = cpu_to_be16(cmd); } /* build spi ring message */ st->ring_xfer[0].tx_buf = &st->tx_buf[0]; st->ring_xfer[0].len = len; st->ring_xfer[0].cs_change = 1; spi_message_init(&st->ring_msg); spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg); for (i = 0; i < len; i++) { st->ring_xfer[i + 1].rx_buf = &st->rx_buf[i]; st->ring_xfer[i + 1].len = 2; st->ring_xfer[i + 1].cs_change = 1; spi_message_add_tail(&st->ring_xfer[i + 1], &st->ring_msg); } /* make sure last transfer cs_change is not set */ st->ring_xfer[i + 1].cs_change = 0; return 0; } /** * ad7923_trigger_handler() bh of trigger launched polling to ring buffer * * Currently there is no option in this driver to disable the saving of * timestamps within the ring. **/ static irqreturn_t ad7923_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ad7923_state *st = iio_priv(indio_dev); s64 time_ns = 0; int b_sent; b_sent = spi_sync(st->spi, &st->ring_msg); if (b_sent) goto done; if (indio_dev->scan_timestamp) { time_ns = iio_get_time_ns(); memcpy((u8 *)st->rx_buf + indio_dev->scan_bytes - sizeof(s64), &time_ns, sizeof(time_ns)); } iio_push_to_buffers(indio_dev, (u8 *)st->rx_buf); done: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; } static int ad7923_scan_direct(struct ad7923_state *st, unsigned ch) { int ret, cmd; cmd = AD7923_WRITE_CR | AD7923_CHANNEL_WRITE(ch) | AD7923_SEQUENCE_WRITE(AD7923_SEQUENCE_OFF) | st->settings; cmd <<= AD7923_SHIFT_REGISTER; st->tx_buf[0] = cpu_to_be16(cmd); ret = spi_sync(st->spi, &st->scan_single_msg); if (ret) return ret; return be16_to_cpu(st->rx_buf[0]); } static int ad7923_get_range(struct ad7923_state *st) { int vref; vref = regulator_get_voltage(st->reg); if (vref < 0) return vref; vref /= 1000; if (!(st->settings & AD7923_RANGE)) vref *= 2; return vref; } static int ad7923_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long m) { int ret; struct ad7923_state *st = iio_priv(indio_dev); switch (m) { case IIO_CHAN_INFO_RAW: mutex_lock(&indio_dev->mlock); if (iio_buffer_enabled(indio_dev)) ret = -EBUSY; else ret = ad7923_scan_direct(st, chan->address); mutex_unlock(&indio_dev->mlock); if (ret < 0) return ret; if (chan->address == EXTRACT(ret, 12, 4)) *val = EXTRACT(ret, 0, 12); else return -EIO; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: ret = ad7923_get_range(st); if (ret < 0) return ret; *val = ret; *val2 = chan->scan_type.realbits; return IIO_VAL_FRACTIONAL_LOG2; } return -EINVAL; } static const struct iio_info ad7923_info = { .read_raw = &ad7923_read_raw, .update_scan_mode = ad7923_update_scan_mode, .driver_module = THIS_MODULE, }; static int ad7923_probe(struct spi_device *spi) { struct ad7923_state *st; struct iio_dev *indio_dev = iio_device_alloc(sizeof(*st)); const struct ad7923_chip_info *info; int ret; if (indio_dev == NULL) return -ENOMEM; st = iio_priv(indio_dev); spi_set_drvdata(spi, indio_dev); st->spi = spi; st->settings = AD7923_CODING | AD7923_RANGE | AD7923_PM_MODE_WRITE(AD7923_PM_MODE_OPS); info = &ad7923_chip_info[spi_get_device_id(spi)->driver_data]; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->dev.parent = &spi->dev; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = info->channels; indio_dev->num_channels = info->num_channels; indio_dev->info = &ad7923_info; /* Setup default message */ st->scan_single_xfer[0].tx_buf = &st->tx_buf[0]; st->scan_single_xfer[0].len = 2; st->scan_single_xfer[0].cs_change = 1; st->scan_single_xfer[1].rx_buf = &st->rx_buf[0]; st->scan_single_xfer[1].len = 2; spi_message_init(&st->scan_single_msg); spi_message_add_tail(&st->scan_single_xfer[0], &st->scan_single_msg); spi_message_add_tail(&st->scan_single_xfer[1], &st->scan_single_msg); st->reg = regulator_get(&spi->dev, "refin"); if (IS_ERR(st->reg)) { ret = PTR_ERR(st->reg); goto error_free; } ret = regulator_enable(st->reg); if (ret) goto error_put_reg; ret = iio_triggered_buffer_setup(indio_dev, NULL, &ad7923_trigger_handler, NULL); if (ret) goto error_disable_reg; ret = iio_device_register(indio_dev); if (ret) goto error_cleanup_ring; return 0; error_cleanup_ring: iio_triggered_buffer_cleanup(indio_dev); error_disable_reg: regulator_disable(st->reg); error_put_reg: regulator_put(st->reg); error_free: iio_device_free(indio_dev); return ret; } static int ad7923_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad7923_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); iio_triggered_buffer_cleanup(indio_dev); regulator_disable(st->reg); regulator_put(st->reg); iio_device_free(indio_dev); return 0; } static const struct spi_device_id ad7923_id[] = { {"ad7904", AD7904}, {"ad7914", AD7914}, {"ad7923", AD7924}, {"ad7924", AD7924}, {} }; MODULE_DEVICE_TABLE(spi, ad7923_id); static struct spi_driver ad7923_driver = { .driver = { .name = "ad7923", .owner = THIS_MODULE, }, .probe = ad7923_probe, .remove = ad7923_remove, .id_table = ad7923_id, }; module_spi_driver(ad7923_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_AUTHOR("Patrick Vasseur <patrick.vasseur@c-s.fr>"); MODULE_DESCRIPTION("Analog Devices AD7904/AD7914/AD7923/AD7924 ADC"); MODULE_LICENSE("GPL v2");
gpl-2.0
spleef/android_kernel_samsung_trlte
drivers/staging/usbip/stub_dev.c
2158
14072
/* * Copyright (C) 2003-2008 Takahiro Hirofuchi * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #include <linux/device.h> #include <linux/file.h> #include <linux/kthread.h> #include <linux/module.h> #include "usbip_common.h" #include "stub.h" /* * Define device IDs here if you want to explicitly limit exportable devices. * In most cases, wildcard matching will be okay because driver binding can be * changed dynamically by a userland program. */ static struct usb_device_id stub_table[] = { #if 0 /* just an example */ { USB_DEVICE(0x05ac, 0x0301) }, /* Mac 1 button mouse */ { USB_DEVICE(0x0430, 0x0009) }, /* Plat Home Keyboard */ { USB_DEVICE(0x059b, 0x0001) }, /* Iomega USB Zip 100 */ { USB_DEVICE(0x04b3, 0x4427) }, /* IBM USB CD-ROM */ { USB_DEVICE(0x05a9, 0xa511) }, /* LifeView USB cam */ { USB_DEVICE(0x55aa, 0x0201) }, /* Imation card reader */ { USB_DEVICE(0x046d, 0x0870) }, /* Qcam Express(QV-30) */ { USB_DEVICE(0x04bb, 0x0101) }, /* IO-DATA HD 120GB */ { USB_DEVICE(0x04bb, 0x0904) }, /* IO-DATA USB-ET/TX */ { USB_DEVICE(0x04bb, 0x0201) }, /* IO-DATA USB-ET/TX */ { USB_DEVICE(0x08bb, 0x2702) }, /* ONKYO USB Speaker */ { USB_DEVICE(0x046d, 0x08b2) }, /* Logicool Qcam 4000 Pro */ #endif /* magic for wild card */ { .driver_info = 1 }, { 0, } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, stub_table); /* * usbip_status shows the status of usbip-host as long as this driver is bound * to the target device. */ static ssize_t show_status(struct device *dev, struct device_attribute *attr, char *buf) { struct stub_device *sdev = dev_get_drvdata(dev); int status; if (!sdev) { dev_err(dev, "sdev is null\n"); return -ENODEV; } spin_lock_irq(&sdev->ud.lock); status = sdev->ud.status; spin_unlock_irq(&sdev->ud.lock); return snprintf(buf, PAGE_SIZE, "%d\n", status); } static DEVICE_ATTR(usbip_status, S_IRUGO, show_status, NULL); /* * usbip_sockfd gets a socket descriptor of an established TCP connection that * is used to transfer usbip requests by kernel threads. -1 is a magic number * by which usbip connection is finished. */ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct stub_device *sdev = dev_get_drvdata(dev); int sockfd = 0; struct socket *socket; ssize_t err = -EINVAL; if (!sdev) { dev_err(dev, "sdev is null\n"); return -ENODEV; } sscanf(buf, "%d", &sockfd); if (sockfd != -1) { dev_info(dev, "stub up\n"); spin_lock_irq(&sdev->ud.lock); if (sdev->ud.status != SDEV_ST_AVAILABLE) { dev_err(dev, "not ready\n"); goto err; } socket = sockfd_to_socket(sockfd); if (!socket) goto err; sdev->ud.tcp_socket = socket; spin_unlock_irq(&sdev->ud.lock); sdev->ud.tcp_rx = kthread_get_run(stub_rx_loop, &sdev->ud, "stub_rx"); sdev->ud.tcp_tx = kthread_get_run(stub_tx_loop, &sdev->ud, "stub_tx"); spin_lock_irq(&sdev->ud.lock); sdev->ud.status = SDEV_ST_USED; spin_unlock_irq(&sdev->ud.lock); } else { dev_info(dev, "stub down\n"); spin_lock_irq(&sdev->ud.lock); if (sdev->ud.status != SDEV_ST_USED) goto err; spin_unlock_irq(&sdev->ud.lock); usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN); } return count; err: spin_unlock_irq(&sdev->ud.lock); return err; } static DEVICE_ATTR(usbip_sockfd, S_IWUSR, NULL, store_sockfd); static int stub_add_files(struct device *dev) { int err = 0; err = device_create_file(dev, &dev_attr_usbip_status); if (err) goto err_status; err = device_create_file(dev, &dev_attr_usbip_sockfd); if (err) goto err_sockfd; err = device_create_file(dev, &dev_attr_usbip_debug); if (err) goto err_debug; return 0; err_debug: device_remove_file(dev, &dev_attr_usbip_sockfd); err_sockfd: device_remove_file(dev, &dev_attr_usbip_status); err_status: return err; } static void stub_remove_files(struct device *dev) { device_remove_file(dev, &dev_attr_usbip_status); device_remove_file(dev, &dev_attr_usbip_sockfd); device_remove_file(dev, &dev_attr_usbip_debug); } static void stub_shutdown_connection(struct usbip_device *ud) { struct stub_device *sdev = container_of(ud, struct stub_device, ud); /* * When removing an exported device, kernel panic sometimes occurred * and then EIP was sk_wait_data of stub_rx thread. Is this because * sk_wait_data returned though stub_rx thread was already finished by * step 1? */ if (ud->tcp_socket) { dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n", ud->tcp_socket); kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); } /* 1. stop threads */ if (ud->tcp_rx) { kthread_stop_put(ud->tcp_rx); ud->tcp_rx = NULL; } if (ud->tcp_tx) { kthread_stop_put(ud->tcp_tx); ud->tcp_tx = NULL; } /* * 2. close the socket * * tcp_socket is freed after threads are killed so that usbip_xmit does * not touch NULL socket. */ if (ud->tcp_socket) { fput(ud->tcp_socket->file); ud->tcp_socket = NULL; } /* 3. free used data */ stub_device_cleanup_urbs(sdev); /* 4. free stub_unlink */ { unsigned long flags; struct stub_unlink *unlink, *tmp; spin_lock_irqsave(&sdev->priv_lock, flags); list_for_each_entry_safe(unlink, tmp, &sdev->unlink_tx, list) { list_del(&unlink->list); kfree(unlink); } list_for_each_entry_safe(unlink, tmp, &sdev->unlink_free, list) { list_del(&unlink->list); kfree(unlink); } spin_unlock_irqrestore(&sdev->priv_lock, flags); } } static void stub_device_reset(struct usbip_device *ud) { struct stub_device *sdev = container_of(ud, struct stub_device, ud); struct usb_device *udev = sdev->udev; int ret; dev_dbg(&udev->dev, "device reset"); ret = usb_lock_device_for_reset(udev, sdev->interface); if (ret < 0) { dev_err(&udev->dev, "lock for reset\n"); spin_lock_irq(&ud->lock); ud->status = SDEV_ST_ERROR; spin_unlock_irq(&ud->lock); return; } /* try to reset the device */ ret = usb_reset_device(udev); usb_unlock_device(udev); spin_lock_irq(&ud->lock); if (ret) { dev_err(&udev->dev, "device reset\n"); ud->status = SDEV_ST_ERROR; } else { dev_info(&udev->dev, "device reset\n"); ud->status = SDEV_ST_AVAILABLE; } spin_unlock_irq(&ud->lock); } static void stub_device_unusable(struct usbip_device *ud) { spin_lock_irq(&ud->lock); ud->status = SDEV_ST_ERROR; spin_unlock_irq(&ud->lock); } /** * stub_device_alloc - allocate a new stub_device struct * @interface: usb_interface of a new device * * Allocates and initializes a new stub_device struct. */ static struct stub_device *stub_device_alloc(struct usb_device *udev, struct usb_interface *interface) { struct stub_device *sdev; int busnum = interface_to_busnum(interface); int devnum = interface_to_devnum(interface); dev_dbg(&interface->dev, "allocating stub device"); /* yes, it's a new device */ sdev = kzalloc(sizeof(struct stub_device), GFP_KERNEL); if (!sdev) return NULL; sdev->interface = usb_get_intf(interface); sdev->udev = usb_get_dev(udev); /* * devid is defined with devnum when this driver is first allocated. * devnum may change later if a device is reset. However, devid never * changes during a usbip connection. */ sdev->devid = (busnum << 16) | devnum; sdev->ud.side = USBIP_STUB; sdev->ud.status = SDEV_ST_AVAILABLE; spin_lock_init(&sdev->ud.lock); sdev->ud.tcp_socket = NULL; INIT_LIST_HEAD(&sdev->priv_init); INIT_LIST_HEAD(&sdev->priv_tx); INIT_LIST_HEAD(&sdev->priv_free); INIT_LIST_HEAD(&sdev->unlink_free); INIT_LIST_HEAD(&sdev->unlink_tx); spin_lock_init(&sdev->priv_lock); init_waitqueue_head(&sdev->tx_waitq); sdev->ud.eh_ops.shutdown = stub_shutdown_connection; sdev->ud.eh_ops.reset = stub_device_reset; sdev->ud.eh_ops.unusable = stub_device_unusable; usbip_start_eh(&sdev->ud); dev_dbg(&interface->dev, "register new interface\n"); return sdev; } static void stub_device_free(struct stub_device *sdev) { kfree(sdev); } /* * If a usb device has multiple active interfaces, this driver is bound to all * the active interfaces. However, usbip exports *a* usb device (i.e., not *an* * active interface). Currently, a userland program must ensure that it * looks at the usbip's sysfs entries of only the first active interface. * * TODO: use "struct usb_device_driver" to bind a usb device. * However, it seems it is not fully supported in mainline kernel yet * (2.6.19.2). */ static int stub_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct stub_device *sdev = NULL; const char *udev_busid = dev_name(interface->dev.parent); int err = 0; struct bus_id_priv *busid_priv; dev_dbg(&interface->dev, "Enter\n"); /* check we should claim or not by busid_table */ busid_priv = get_busid_priv(udev_busid); if (!busid_priv || (busid_priv->status == STUB_BUSID_REMOV) || (busid_priv->status == STUB_BUSID_OTHER)) { dev_info(&interface->dev, "%s is not in match_busid table... " "skip!\n", udev_busid); /* * Return value should be ENODEV or ENOXIO to continue trying * other matched drivers by the driver core. * See driver_probe_device() in driver/base/dd.c */ return -ENODEV; } if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) { dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n", udev_busid); return -ENODEV; } if (!strcmp(udev->bus->bus_name, "vhci_hcd")) { dev_dbg(&udev->dev, "%s is attached on vhci_hcd... skip!\n", udev_busid); return -ENODEV; } if (busid_priv->status == STUB_BUSID_ALLOC) { sdev = busid_priv->sdev; if (!sdev) return -ENODEV; busid_priv->interf_count++; dev_info(&interface->dev, "usbip-host: register new interface " "(bus %u dev %u ifn %u)\n", udev->bus->busnum, udev->devnum, interface->cur_altsetting->desc.bInterfaceNumber); /* set private data to usb_interface */ usb_set_intfdata(interface, sdev); err = stub_add_files(&interface->dev); if (err) { dev_err(&interface->dev, "stub_add_files for %s\n", udev_busid); usb_set_intfdata(interface, NULL); busid_priv->interf_count--; return err; } usb_get_intf(interface); return 0; } /* ok, this is my device */ sdev = stub_device_alloc(udev, interface); if (!sdev) return -ENOMEM; dev_info(&interface->dev, "usbip-host: register new device " "(bus %u dev %u ifn %u)\n", udev->bus->busnum, udev->devnum, interface->cur_altsetting->desc.bInterfaceNumber); busid_priv->interf_count = 0; busid_priv->shutdown_busid = 0; /* set private data to usb_interface */ usb_set_intfdata(interface, sdev); busid_priv->interf_count++; busid_priv->sdev = sdev; err = stub_add_files(&interface->dev); if (err) { dev_err(&interface->dev, "stub_add_files for %s\n", udev_busid); usb_set_intfdata(interface, NULL); usb_put_intf(interface); usb_put_dev(udev); kthread_stop_put(sdev->ud.eh); busid_priv->interf_count = 0; busid_priv->sdev = NULL; stub_device_free(sdev); return err; } busid_priv->status = STUB_BUSID_ALLOC; return 0; } static void shutdown_busid(struct bus_id_priv *busid_priv) { if (busid_priv->sdev && !busid_priv->shutdown_busid) { busid_priv->shutdown_busid = 1; usbip_event_add(&busid_priv->sdev->ud, SDEV_EVENT_REMOVED); /* wait for the stop of the event handler */ usbip_stop_eh(&busid_priv->sdev->ud); } } /* * called in usb_disconnect() or usb_deregister() * but only if actconfig(active configuration) exists */ static void stub_disconnect(struct usb_interface *interface) { struct stub_device *sdev; const char *udev_busid = dev_name(interface->dev.parent); struct bus_id_priv *busid_priv; dev_dbg(&interface->dev, "Enter\n"); busid_priv = get_busid_priv(udev_busid); if (!busid_priv) { BUG(); return; } sdev = usb_get_intfdata(interface); /* get stub_device */ if (!sdev) { dev_err(&interface->dev, "could not get device"); return; } usb_set_intfdata(interface, NULL); /* * NOTE: rx/tx threads are invoked for each usb_device. */ stub_remove_files(&interface->dev); /* If usb reset is called from event handler */ if (busid_priv->sdev->ud.eh == current) { busid_priv->interf_count--; return; } if (busid_priv->interf_count > 1) { busid_priv->interf_count--; shutdown_busid(busid_priv); usb_put_intf(interface); return; } busid_priv->interf_count = 0; /* shutdown the current connection */ shutdown_busid(busid_priv); usb_put_dev(sdev->udev); usb_put_intf(interface); /* free sdev */ busid_priv->sdev = NULL; stub_device_free(sdev); if (busid_priv->status == STUB_BUSID_ALLOC) { busid_priv->status = STUB_BUSID_ADDED; } else { busid_priv->status = STUB_BUSID_OTHER; del_match_busid((char *)udev_busid); } } /* * Presence of pre_reset and post_reset prevents the driver from being unbound * when the device is being reset */ static int stub_pre_reset(struct usb_interface *interface) { dev_dbg(&interface->dev, "pre_reset\n"); return 0; } static int stub_post_reset(struct usb_interface *interface) { dev_dbg(&interface->dev, "post_reset\n"); return 0; } struct usb_driver stub_driver = { .name = "usbip-host", .probe = stub_probe, .disconnect = stub_disconnect, .id_table = stub_table, .pre_reset = stub_pre_reset, .post_reset = stub_post_reset, };
gpl-2.0
bigzz/ZenKernel_Shamu
arch/sparc/kernel/leon_pci.c
2414
4637
/* * leon_pci.c: LEON Host PCI support * * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom * * Code is partially derived from pcic.c */ #include <linux/of_device.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/export.h> #include <asm/leon.h> #include <asm/leon_pci.h> /* The LEON architecture does not rely on a BIOS or bootloader to setup * PCI for us. The Linux generic routines are used to setup resources, * reset values of configuration-space register settings are preserved. * * PCI Memory and Prefetchable Memory is direct-mapped. However I/O Space is * accessed through a Window which is translated to low 64KB in PCI space, the * first 4KB is not used so 60KB is available. */ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info) { LIST_HEAD(resources); struct pci_bus *root_bus; pci_add_resource_offset(&resources, &info->io_space, info->io_space.start - 0x1000); pci_add_resource(&resources, &info->mem_space); info->busn.flags = IORESOURCE_BUS; pci_add_resource(&resources, &info->busn); root_bus = pci_scan_root_bus(&ofdev->dev, 0, info->ops, info, &resources); if (root_bus) { /* Setup IRQs of all devices using custom routines */ pci_fixup_irqs(pci_common_swizzle, info->map_irq); /* Assign devices with resources */ pci_assign_unassigned_resources(); } else { pci_free_resource_list(&resources); } } void pcibios_fixup_bus(struct pci_bus *pbus) { struct pci_dev *dev; int i, has_io, has_mem; u16 cmd; list_for_each_entry(dev, &pbus->devices, bus_list) { /* * We can not rely on that the bootloader has enabled I/O * or memory access to PCI devices. Instead we enable it here * if the device has BARs of respective type. */ has_io = has_mem = 0; for (i = 0; i < PCI_ROM_RESOURCE; i++) { unsigned long f = dev->resource[i].flags; if (f & IORESOURCE_IO) has_io = 1; else if (f & IORESOURCE_MEM) has_mem = 1; } /* ROM BARs are mapped into 32-bit memory space */ if (dev->resource[PCI_ROM_RESOURCE].end != 0) { dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_ENABLE; has_mem = 1; } pci_bus_read_config_word(pbus, dev->devfn, PCI_COMMAND, &cmd); if (has_io && !(cmd & PCI_COMMAND_IO)) { #ifdef CONFIG_PCI_DEBUG printk(KERN_INFO "LEONPCI: Enabling I/O for dev %s\n", pci_name(dev)); #endif cmd |= PCI_COMMAND_IO; pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND, cmd); } if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) { #ifdef CONFIG_PCI_DEBUG printk(KERN_INFO "LEONPCI: Enabling MEMORY for dev" "%s\n", pci_name(dev)); #endif cmd |= PCI_COMMAND_MEMORY; pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND, cmd); } } } resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { return res->start; } int pcibios_enable_device(struct pci_dev *dev, int mask) { return pci_enable_resources(dev, mask); } /* in/out routines taken from pcic.c * * This probably belongs here rather than ioport.c because * we do not want this crud linked into SBus kernels. * Also, think for a moment about likes of floppy.c that * include architecture specific parts. They may want to redefine ins/outs. * * We do not use horrible macros here because we want to * advance pointer by sizeof(size). */ void outsb(unsigned long addr, const void *src, unsigned long count) { while (count) { count -= 1; outb(*(const char *)src, addr); src += 1; /* addr += 1; */ } } EXPORT_SYMBOL(outsb); void outsw(unsigned long addr, const void *src, unsigned long count) { while (count) { count -= 2; outw(*(const short *)src, addr); src += 2; /* addr += 2; */ } } EXPORT_SYMBOL(outsw); void outsl(unsigned long addr, const void *src, unsigned long count) { while (count) { count -= 4; outl(*(const long *)src, addr); src += 4; /* addr += 4; */ } } EXPORT_SYMBOL(outsl); void insb(unsigned long addr, void *dst, unsigned long count) { while (count) { count -= 1; *(unsigned char *)dst = inb(addr); dst += 1; /* addr += 1; */ } } EXPORT_SYMBOL(insb); void insw(unsigned long addr, void *dst, unsigned long count) { while (count) { count -= 2; *(unsigned short *)dst = inw(addr); dst += 2; /* addr += 2; */ } } EXPORT_SYMBOL(insw); void insl(unsigned long addr, void *dst, unsigned long count) { while (count) { count -= 4; /* * XXX I am sure we are in for an unaligned trap here. */ *(unsigned long *)dst = inl(addr); dst += 4; /* addr += 4; */ } } EXPORT_SYMBOL(insl);
gpl-2.0
blacktigersoftware/hummingboard-cuboxi-kernel
arch/arm/mach-spear6xx/spear6xx.c
2926
4252
/* * arch/arm/mach-spear6xx/spear6xx.c * * SPEAr6XX machines common source file * * Copyright (C) 2009 ST Microelectronics * Rajeev Kumar<rajeev-dlh.kumar@st.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/types.h> #include <linux/amba/pl061.h> #include <linux/ptrace.h> #include <linux/io.h> #include <asm/hardware/vic.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <mach/generic.h> #include <mach/hardware.h> #include <mach/irqs.h> /* Add spear6xx machines common devices here */ /* uart device registration */ struct amba_device uart_device[] = { { .dev = { .init_name = "uart0", }, .res = { .start = SPEAR6XX_ICM1_UART0_BASE, .end = SPEAR6XX_ICM1_UART0_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, .irq = {IRQ_UART_0, NO_IRQ}, }, { .dev = { .init_name = "uart1", }, .res = { .start = SPEAR6XX_ICM1_UART1_BASE, .end = SPEAR6XX_ICM1_UART1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, .irq = {IRQ_UART_1, NO_IRQ}, } }; /* gpio device registration */ static struct pl061_platform_data gpio_plat_data[] = { { .gpio_base = 0, .irq_base = SPEAR_GPIO0_INT_BASE, }, { .gpio_base = 8, .irq_base = SPEAR_GPIO1_INT_BASE, }, { .gpio_base = 16, .irq_base = SPEAR_GPIO2_INT_BASE, }, }; struct amba_device gpio_device[] = { { .dev = { .init_name = "gpio0", .platform_data = &gpio_plat_data[0], }, .res = { .start = SPEAR6XX_CPU_GPIO_BASE, .end = SPEAR6XX_CPU_GPIO_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, .irq = {IRQ_LOCAL_GPIO, NO_IRQ}, }, { .dev = { .init_name = "gpio1", .platform_data = &gpio_plat_data[1], }, .res = { .start = SPEAR6XX_ICM3_GPIO_BASE, .end = SPEAR6XX_ICM3_GPIO_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, .irq = {IRQ_BASIC_GPIO, NO_IRQ}, }, { .dev = { .init_name = "gpio2", .platform_data = &gpio_plat_data[2], }, .res = { .start = SPEAR6XX_ICM2_GPIO_BASE, .end = SPEAR6XX_ICM2_GPIO_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, .irq = {IRQ_APPL_GPIO, NO_IRQ}, } }; /* This will add devices, and do machine specific tasks */ void __init spear6xx_init(void) { /* nothing to do for now */ } /* This will initialize vic */ void __init spear6xx_init_irq(void) { vic_init((void __iomem *)VA_SPEAR6XX_CPU_VIC_PRI_BASE, 0, ~0, 0); vic_init((void __iomem *)VA_SPEAR6XX_CPU_VIC_SEC_BASE, 32, ~0, 0); } /* Following will create static virtual/physical mappings */ static struct map_desc spear6xx_io_desc[] __initdata = { { .virtual = VA_SPEAR6XX_ICM1_UART0_BASE, .pfn = __phys_to_pfn(SPEAR6XX_ICM1_UART0_BASE), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = VA_SPEAR6XX_CPU_VIC_PRI_BASE, .pfn = __phys_to_pfn(SPEAR6XX_CPU_VIC_PRI_BASE), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = VA_SPEAR6XX_CPU_VIC_SEC_BASE, .pfn = __phys_to_pfn(SPEAR6XX_CPU_VIC_SEC_BASE), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = VA_SPEAR6XX_ICM3_SYS_CTRL_BASE, .pfn = __phys_to_pfn(SPEAR6XX_ICM3_SYS_CTRL_BASE), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = VA_SPEAR6XX_ICM3_MISC_REG_BASE, .pfn = __phys_to_pfn(SPEAR6XX_ICM3_MISC_REG_BASE), .length = SZ_4K, .type = MT_DEVICE }, }; /* This will create static memory mapping for selected devices */ void __init spear6xx_map_io(void) { iotable_init(spear6xx_io_desc, ARRAY_SIZE(spear6xx_io_desc)); /* This will initialize clock framework */ spear6xx_clk_init(); } static void __init spear6xx_timer_init(void) { char pclk_name[] = "pll3_48m_clk"; struct clk *gpt_clk, *pclk; /* get the system timer clock */ gpt_clk = clk_get_sys("gpt0", NULL); if (IS_ERR(gpt_clk)) { pr_err("%s:couldn't get clk for gpt\n", __func__); BUG(); } /* get the suitable parent clock for timer*/ pclk = clk_get(NULL, pclk_name); if (IS_ERR(pclk)) { pr_err("%s:couldn't get %s as parent for gpt\n", __func__, pclk_name); BUG(); } clk_set_parent(gpt_clk, pclk); clk_put(gpt_clk); clk_put(pclk); spear_setup_timer(); } struct sys_timer spear6xx_timer = { .init = spear6xx_timer_init, };
gpl-2.0
talnoah/m8_sense
drivers/md/persistent-data/dm-btree-remove.c
3438
14893
/* * Copyright (C) 2011 Red Hat, Inc. * * This file is released under the GPL. */ #include "dm-btree.h" #include "dm-btree-internal.h" #include "dm-transaction-manager.h" #include <linux/export.h> /* * Removing an entry from a btree * ============================== * * A very important constraint for our btree is that no node, except the * root, may have fewer than a certain number of entries. * (MIN_ENTRIES <= nr_entries <= MAX_ENTRIES). * * Ensuring this is complicated by the way we want to only ever hold the * locks on 2 nodes concurrently, and only change nodes in a top to bottom * fashion. * * Each node may have a left or right sibling. When decending the spine, * if a node contains only MIN_ENTRIES then we try and increase this to at * least MIN_ENTRIES + 1. We do this in the following ways: * * [A] No siblings => this can only happen if the node is the root, in which * case we copy the childs contents over the root. * * [B] No left sibling * ==> rebalance(node, right sibling) * * [C] No right sibling * ==> rebalance(left sibling, node) * * [D] Both siblings, total_entries(left, node, right) <= DEL_THRESHOLD * ==> delete node adding it's contents to left and right * * [E] Both siblings, total_entries(left, node, right) > DEL_THRESHOLD * ==> rebalance(left, node, right) * * After these operations it's possible that the our original node no * longer contains the desired sub tree. For this reason this rebalancing * is performed on the children of the current node. This also avoids * having a special case for the root. * * Once this rebalancing has occurred we can then step into the child node * for internal nodes. Or delete the entry for leaf nodes. */ /* * Some little utilities for moving node data around. */ static void node_shift(struct node *n, int shift) { uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); uint32_t value_size = le32_to_cpu(n->header.value_size); if (shift < 0) { shift = -shift; BUG_ON(shift > nr_entries); BUG_ON((void *) key_ptr(n, shift) >= value_ptr(n, shift)); memmove(key_ptr(n, 0), key_ptr(n, shift), (nr_entries - shift) * sizeof(__le64)); memmove(value_ptr(n, 0), value_ptr(n, shift), (nr_entries - shift) * value_size); } else { BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries)); memmove(key_ptr(n, shift), key_ptr(n, 0), nr_entries * sizeof(__le64)); memmove(value_ptr(n, shift), value_ptr(n, 0), nr_entries * value_size); } } static void node_copy(struct node *left, struct node *right, int shift) { uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t value_size = le32_to_cpu(left->header.value_size); BUG_ON(value_size != le32_to_cpu(right->header.value_size)); if (shift < 0) { shift = -shift; BUG_ON(nr_left + shift > le32_to_cpu(left->header.max_entries)); memcpy(key_ptr(left, nr_left), key_ptr(right, 0), shift * sizeof(__le64)); memcpy(value_ptr(left, nr_left), value_ptr(right, 0), shift * value_size); } else { BUG_ON(shift > le32_to_cpu(right->header.max_entries)); memcpy(key_ptr(right, 0), key_ptr(left, nr_left - shift), shift * sizeof(__le64)); memcpy(value_ptr(right, 0), value_ptr(left, nr_left - shift), shift * value_size); } } /* * Delete a specific entry from a leaf node. */ static void delete_at(struct node *n, unsigned index) { unsigned nr_entries = le32_to_cpu(n->header.nr_entries); unsigned nr_to_copy = nr_entries - (index + 1); uint32_t value_size = le32_to_cpu(n->header.value_size); BUG_ON(index >= nr_entries); if (nr_to_copy) { memmove(key_ptr(n, index), key_ptr(n, index + 1), nr_to_copy * sizeof(__le64)); memmove(value_ptr(n, index), value_ptr(n, index + 1), nr_to_copy * value_size); } n->header.nr_entries = cpu_to_le32(nr_entries - 1); } static unsigned merge_threshold(struct node *n) { return le32_to_cpu(n->header.max_entries) / 3; } struct child { unsigned index; struct dm_block *block; struct node *n; }; static struct dm_btree_value_type le64_type = { .context = NULL, .size = sizeof(__le64), .inc = NULL, .dec = NULL, .equal = NULL }; static int init_child(struct dm_btree_info *info, struct node *parent, unsigned index, struct child *result) { int r, inc; dm_block_t root; result->index = index; root = value64(parent, index); r = dm_tm_shadow_block(info->tm, root, &btree_node_validator, &result->block, &inc); if (r) return r; result->n = dm_block_data(result->block); if (inc) inc_children(info->tm, result->n, &le64_type); *((__le64 *) value_ptr(parent, index)) = cpu_to_le64(dm_block_location(result->block)); return 0; } static int exit_child(struct dm_btree_info *info, struct child *c) { return dm_tm_unlock(info->tm, c->block); } static void shift(struct node *left, struct node *right, int count) { uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t nr_right = le32_to_cpu(right->header.nr_entries); uint32_t max_entries = le32_to_cpu(left->header.max_entries); uint32_t r_max_entries = le32_to_cpu(right->header.max_entries); BUG_ON(max_entries != r_max_entries); BUG_ON(nr_left - count > max_entries); BUG_ON(nr_right + count > max_entries); if (!count) return; if (count > 0) { node_shift(right, count); node_copy(left, right, count); } else { node_copy(left, right, count); node_shift(right, count); } left->header.nr_entries = cpu_to_le32(nr_left - count); right->header.nr_entries = cpu_to_le32(nr_right + count); } static void __rebalance2(struct dm_btree_info *info, struct node *parent, struct child *l, struct child *r) { struct node *left = l->n; struct node *right = r->n; uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t nr_right = le32_to_cpu(right->header.nr_entries); unsigned threshold = 2 * merge_threshold(left) + 1; if (nr_left + nr_right < threshold) { /* * Merge */ node_copy(left, right, -nr_right); left->header.nr_entries = cpu_to_le32(nr_left + nr_right); delete_at(parent, r->index); /* * We need to decrement the right block, but not it's * children, since they're still referenced by left. */ dm_tm_dec(info->tm, dm_block_location(r->block)); } else { /* * Rebalance. */ unsigned target_left = (nr_left + nr_right) / 2; shift(left, right, nr_left - target_left); *key_ptr(parent, r->index) = right->keys[0]; } } static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, unsigned left_index) { int r; struct node *parent; struct child left, right; parent = dm_block_data(shadow_current(s)); r = init_child(info, parent, left_index, &left); if (r) return r; r = init_child(info, parent, left_index + 1, &right); if (r) { exit_child(info, &left); return r; } __rebalance2(info, parent, &left, &right); r = exit_child(info, &left); if (r) { exit_child(info, &right); return r; } return exit_child(info, &right); } /* * We dump as many entries from center as possible into left, then the rest * in right, then rebalance2. This wastes some cpu, but I want something * simple atm. */ static void delete_center_node(struct dm_btree_info *info, struct node *parent, struct child *l, struct child *c, struct child *r, struct node *left, struct node *center, struct node *right, uint32_t nr_left, uint32_t nr_center, uint32_t nr_right) { uint32_t max_entries = le32_to_cpu(left->header.max_entries); unsigned shift = min(max_entries - nr_left, nr_center); BUG_ON(nr_left + shift > max_entries); node_copy(left, center, -shift); left->header.nr_entries = cpu_to_le32(nr_left + shift); if (shift != nr_center) { shift = nr_center - shift; BUG_ON((nr_right + shift) > max_entries); node_shift(right, shift); node_copy(center, right, shift); right->header.nr_entries = cpu_to_le32(nr_right + shift); } *key_ptr(parent, r->index) = right->keys[0]; delete_at(parent, c->index); r->index--; dm_tm_dec(info->tm, dm_block_location(c->block)); __rebalance2(info, parent, l, r); } /* * Redistributes entries among 3 sibling nodes. */ static void redistribute3(struct dm_btree_info *info, struct node *parent, struct child *l, struct child *c, struct child *r, struct node *left, struct node *center, struct node *right, uint32_t nr_left, uint32_t nr_center, uint32_t nr_right) { int s; uint32_t max_entries = le32_to_cpu(left->header.max_entries); unsigned target = (nr_left + nr_center + nr_right) / 3; BUG_ON(target > max_entries); if (nr_left < nr_right) { s = nr_left - target; if (s < 0 && nr_center < -s) { /* not enough in central node */ shift(left, center, nr_center); s = nr_center - target; shift(left, right, s); nr_right += s; } else shift(left, center, s); shift(center, right, target - nr_right); } else { s = target - nr_right; if (s > 0 && nr_center < s) { /* not enough in central node */ shift(center, right, nr_center); s = target - nr_center; shift(left, right, s); nr_left -= s; } else shift(center, right, s); shift(left, center, nr_left - target); } *key_ptr(parent, c->index) = center->keys[0]; *key_ptr(parent, r->index) = right->keys[0]; } static void __rebalance3(struct dm_btree_info *info, struct node *parent, struct child *l, struct child *c, struct child *r) { struct node *left = l->n; struct node *center = c->n; struct node *right = r->n; uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t nr_center = le32_to_cpu(center->header.nr_entries); uint32_t nr_right = le32_to_cpu(right->header.nr_entries); unsigned threshold = merge_threshold(left) * 4 + 1; BUG_ON(left->header.max_entries != center->header.max_entries); BUG_ON(center->header.max_entries != right->header.max_entries); if ((nr_left + nr_center + nr_right) < threshold) delete_center_node(info, parent, l, c, r, left, center, right, nr_left, nr_center, nr_right); else redistribute3(info, parent, l, c, r, left, center, right, nr_left, nr_center, nr_right); } static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, unsigned left_index) { int r; struct node *parent = dm_block_data(shadow_current(s)); struct child left, center, right; /* * FIXME: fill out an array? */ r = init_child(info, parent, left_index, &left); if (r) return r; r = init_child(info, parent, left_index + 1, &center); if (r) { exit_child(info, &left); return r; } r = init_child(info, parent, left_index + 2, &right); if (r) { exit_child(info, &left); exit_child(info, &center); return r; } __rebalance3(info, parent, &left, &center, &right); r = exit_child(info, &left); if (r) { exit_child(info, &center); exit_child(info, &right); return r; } r = exit_child(info, &center); if (r) { exit_child(info, &right); return r; } r = exit_child(info, &right); if (r) return r; return 0; } static int get_nr_entries(struct dm_transaction_manager *tm, dm_block_t b, uint32_t *result) { int r; struct dm_block *block; struct node *n; r = dm_tm_read_lock(tm, b, &btree_node_validator, &block); if (r) return r; n = dm_block_data(block); *result = le32_to_cpu(n->header.nr_entries); return dm_tm_unlock(tm, block); } static int rebalance_children(struct shadow_spine *s, struct dm_btree_info *info, uint64_t key) { int i, r, has_left_sibling, has_right_sibling; uint32_t child_entries; struct node *n; n = dm_block_data(shadow_current(s)); if (le32_to_cpu(n->header.nr_entries) == 1) { struct dm_block *child; dm_block_t b = value64(n, 0); r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child); if (r) return r; memcpy(n, dm_block_data(child), dm_bm_block_size(dm_tm_get_bm(info->tm))); r = dm_tm_unlock(info->tm, child); if (r) return r; dm_tm_dec(info->tm, dm_block_location(child)); return 0; } i = lower_bound(n, key); if (i < 0) return -ENODATA; r = get_nr_entries(info->tm, value64(n, i), &child_entries); if (r) return r; has_left_sibling = i > 0; has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); if (!has_left_sibling) r = rebalance2(s, info, i); else if (!has_right_sibling) r = rebalance2(s, info, i - 1); else r = rebalance3(s, info, i - 1); return r; } static int do_leaf(struct node *n, uint64_t key, unsigned *index) { int i = lower_bound(n, key); if ((i < 0) || (i >= le32_to_cpu(n->header.nr_entries)) || (le64_to_cpu(n->keys[i]) != key)) return -ENODATA; *index = i; return 0; } /* * Prepares for removal from one level of the hierarchy. The caller must * call delete_at() to remove the entry at index. */ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, struct dm_btree_value_type *vt, dm_block_t root, uint64_t key, unsigned *index) { int i = *index, r; struct node *n; for (;;) { r = shadow_step(s, root, vt); if (r < 0) break; /* * We have to patch up the parent node, ugly, but I don't * see a way to do this automatically as part of the spine * op. */ if (shadow_has_parent(s)) { __le64 location = cpu_to_le64(dm_block_location(shadow_current(s))); memcpy(value_ptr(dm_block_data(shadow_parent(s)), i), &location, sizeof(__le64)); } n = dm_block_data(shadow_current(s)); if (le32_to_cpu(n->header.flags) & LEAF_NODE) return do_leaf(n, key, index); r = rebalance_children(s, info, key); if (r) break; n = dm_block_data(shadow_current(s)); if (le32_to_cpu(n->header.flags) & LEAF_NODE) return do_leaf(n, key, index); i = lower_bound(n, key); /* * We know the key is present, or else * rebalance_children would have returned * -ENODATA */ root = value64(n, i); } return r; } int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, uint64_t *keys, dm_block_t *new_root) { unsigned level, last_level = info->levels - 1; int index = 0, r = 0; struct shadow_spine spine; struct node *n; init_shadow_spine(&spine, info); for (level = 0; level < info->levels; level++) { r = remove_raw(&spine, info, (level == last_level ? &info->value_type : &le64_type), root, keys[level], (unsigned *)&index); if (r < 0) break; n = dm_block_data(shadow_current(&spine)); if (level != last_level) { root = value64(n, index); continue; } BUG_ON(index < 0 || index >= le32_to_cpu(n->header.nr_entries)); if (info->value_type.dec) info->value_type.dec(info->value_type.context, value_ptr(n, index)); delete_at(n, index); } *new_root = shadow_root(&spine); exit_shadow_spine(&spine); return r; } EXPORT_SYMBOL_GPL(dm_btree_remove);
gpl-2.0
zanezam/boeffla-kernel-oos-bacon
net/bridge/br_multicast.c
3438
39246
/* * Bridge multicast support. * * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <linux/err.h> #include <linux/if_ether.h> #include <linux/igmp.h> #include <linux/jhash.h> #include <linux/kernel.h> #include <linux/log2.h> #include <linux/netdevice.h> #include <linux/netfilter_bridge.h> #include <linux/random.h> #include <linux/rculist.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/timer.h> #include <net/ip.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> #include <net/mld.h> #include <net/addrconf.h> #include <net/ip6_checksum.h> #endif #include "br_private.h" #define mlock_dereference(X, br) \ rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) #if IS_ENABLED(CONFIG_IPV6) static inline int ipv6_is_transient_multicast(const struct in6_addr *addr) { if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr)) return 1; return 0; } #endif static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) { if (a->proto != b->proto) return 0; switch (a->proto) { case htons(ETH_P_IP): return a->u.ip4 == b->u.ip4; #if IS_ENABLED(CONFIG_IPV6) case htons(ETH_P_IPV6): return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); #endif } return 0; } static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) { return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1); } #if IS_ENABLED(CONFIG_IPV6) static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, const struct in6_addr *ip) { return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1); } #endif static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, struct br_ip *ip) { switch (ip->proto) { case htons(ETH_P_IP): return __br_ip4_hash(mdb, ip->u.ip4); #if IS_ENABLED(CONFIG_IPV6) case htons(ETH_P_IPV6): return __br_ip6_hash(mdb, &ip->u.ip6); #endif } return 0; } static struct net_bridge_mdb_entry *__br_mdb_ip_get( struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) { struct net_bridge_mdb_entry *mp; struct hlist_node *p; hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { if (br_ip_equal(&mp->addr, dst)) return mp; } return NULL; } static struct net_bridge_mdb_entry *br_mdb_ip_get( struct net_bridge_mdb_htable *mdb, struct br_ip *dst) { if (!mdb) return NULL; return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); } static struct net_bridge_mdb_entry *br_mdb_ip4_get( struct net_bridge_mdb_htable *mdb, __be32 dst) { struct br_ip br_dst; br_dst.u.ip4 = dst; br_dst.proto = htons(ETH_P_IP); return br_mdb_ip_get(mdb, &br_dst); } #if IS_ENABLED(CONFIG_IPV6) static struct net_bridge_mdb_entry *br_mdb_ip6_get( struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst) { struct br_ip br_dst; br_dst.u.ip6 = *dst; br_dst.proto = htons(ETH_P_IPV6); return br_mdb_ip_get(mdb, &br_dst); } #endif struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, struct sk_buff *skb) { struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); struct br_ip ip; if (br->multicast_disabled) return NULL; if (BR_INPUT_SKB_CB(skb)->igmp) return NULL; ip.proto = skb->protocol; switch (skb->protocol) { case htons(ETH_P_IP): ip.u.ip4 = ip_hdr(skb)->daddr; break; #if IS_ENABLED(CONFIG_IPV6) case htons(ETH_P_IPV6): ip.u.ip6 = ipv6_hdr(skb)->daddr; break; #endif default: return NULL; } return br_mdb_ip_get(mdb, &ip); } static void br_mdb_free(struct rcu_head *head) { struct net_bridge_mdb_htable *mdb = container_of(head, struct net_bridge_mdb_htable, rcu); struct net_bridge_mdb_htable *old = mdb->old; mdb->old = NULL; kfree(old->mhash); kfree(old); } static int br_mdb_copy(struct net_bridge_mdb_htable *new, struct net_bridge_mdb_htable *old, int elasticity) { struct net_bridge_mdb_entry *mp; struct hlist_node *p; int maxlen; int len; int i; for (i = 0; i < old->max; i++) hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) hlist_add_head(&mp->hlist[new->ver], &new->mhash[br_ip_hash(new, &mp->addr)]); if (!elasticity) return 0; maxlen = 0; for (i = 0; i < new->max; i++) { len = 0; hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver]) len++; if (len > maxlen) maxlen = len; } return maxlen > elasticity ? -EINVAL : 0; } static void br_multicast_free_pg(struct rcu_head *head) { struct net_bridge_port_group *p = container_of(head, struct net_bridge_port_group, rcu); kfree(p); } static void br_multicast_free_group(struct rcu_head *head) { struct net_bridge_mdb_entry *mp = container_of(head, struct net_bridge_mdb_entry, rcu); kfree(mp); } static void br_multicast_group_expired(unsigned long data) { struct net_bridge_mdb_entry *mp = (void *)data; struct net_bridge *br = mp->br; struct net_bridge_mdb_htable *mdb; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || timer_pending(&mp->timer)) goto out; mp->mglist = false; if (mp->ports) goto out; mdb = mlock_dereference(br->mdb, br); hlist_del_rcu(&mp->hlist[mdb->ver]); mdb->size--; call_rcu_bh(&mp->rcu, br_multicast_free_group); out: spin_unlock(&br->multicast_lock); } static void br_multicast_del_pg(struct net_bridge *br, struct net_bridge_port_group *pg) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; mdb = mlock_dereference(br->mdb, br); mp = br_mdb_ip_get(mdb, &pg->addr); if (WARN_ON(!mp)) return; for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { if (p != pg) continue; rcu_assign_pointer(*pp, p->next); hlist_del_init(&p->mglist); del_timer(&p->timer); call_rcu_bh(&p->rcu, br_multicast_free_pg); if (!mp->ports && !mp->mglist && netif_running(br->dev)) mod_timer(&mp->timer, jiffies); return; } WARN_ON(1); } static void br_multicast_port_group_expired(unsigned long data) { struct net_bridge_port_group *pg = (void *)data; struct net_bridge *br = pg->port->br; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || timer_pending(&pg->timer) || hlist_unhashed(&pg->mglist)) goto out; br_multicast_del_pg(br, pg); out: spin_unlock(&br->multicast_lock); } static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, int elasticity) { struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); struct net_bridge_mdb_htable *mdb; int err; mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); if (!mdb) return -ENOMEM; mdb->max = max; mdb->old = old; mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); if (!mdb->mhash) { kfree(mdb); return -ENOMEM; } mdb->size = old ? old->size : 0; mdb->ver = old ? old->ver ^ 1 : 0; if (!old || elasticity) get_random_bytes(&mdb->secret, sizeof(mdb->secret)); else mdb->secret = old->secret; if (!old) goto out; err = br_mdb_copy(mdb, old, elasticity); if (err) { kfree(mdb->mhash); kfree(mdb); return err; } call_rcu_bh(&mdb->rcu, br_mdb_free); out: rcu_assign_pointer(*mdbp, mdb); return 0; } static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, __be32 group) { struct sk_buff *skb; struct igmphdr *ih; struct ethhdr *eth; struct iphdr *iph; skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + sizeof(*ih) + 4); if (!skb) goto out; skb->protocol = htons(ETH_P_IP); skb_reset_mac_header(skb); eth = eth_hdr(skb); memcpy(eth->h_source, br->dev->dev_addr, 6); eth->h_dest[0] = 1; eth->h_dest[1] = 0; eth->h_dest[2] = 0x5e; eth->h_dest[3] = 0; eth->h_dest[4] = 0; eth->h_dest[5] = 1; eth->h_proto = htons(ETH_P_IP); skb_put(skb, sizeof(*eth)); skb_set_network_header(skb, skb->len); iph = ip_hdr(skb); iph->version = 4; iph->ihl = 6; iph->tos = 0xc0; iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); iph->id = 0; iph->frag_off = htons(IP_DF); iph->ttl = 1; iph->protocol = IPPROTO_IGMP; iph->saddr = 0; iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); ((u8 *)&iph[1])[0] = IPOPT_RA; ((u8 *)&iph[1])[1] = 4; ((u8 *)&iph[1])[2] = 0; ((u8 *)&iph[1])[3] = 0; ip_send_check(iph); skb_put(skb, 24); skb_set_transport_header(skb, skb->len); ih = igmp_hdr(skb); ih->type = IGMP_HOST_MEMBERSHIP_QUERY; ih->code = (group ? br->multicast_last_member_interval : br->multicast_query_response_interval) / (HZ / IGMP_TIMER_SCALE); ih->group = group; ih->csum = 0; ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); skb_put(skb, sizeof(*ih)); __skb_pull(skb, sizeof(*eth)); out: return skb; } #if IS_ENABLED(CONFIG_IPV6) static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, const struct in6_addr *group) { struct sk_buff *skb; struct ipv6hdr *ip6h; struct mld_msg *mldq; struct ethhdr *eth; u8 *hopopt; unsigned long interval; skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 8 + sizeof(*mldq)); if (!skb) goto out; skb->protocol = htons(ETH_P_IPV6); /* Ethernet header */ skb_reset_mac_header(skb); eth = eth_hdr(skb); memcpy(eth->h_source, br->dev->dev_addr, 6); eth->h_proto = htons(ETH_P_IPV6); skb_put(skb, sizeof(*eth)); /* IPv6 header + HbH option */ skb_set_network_header(skb, skb->len); ip6h = ipv6_hdr(skb); *(__force __be32 *)ip6h = htonl(0x60000000); ip6h->payload_len = htons(8 + sizeof(*mldq)); ip6h->nexthdr = IPPROTO_HOPOPTS; ip6h->hop_limit = 1; ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, &ip6h->saddr)) { kfree_skb(skb); return NULL; } ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); hopopt = (u8 *)(ip6h + 1); hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ hopopt[1] = 0; /* length of HbH */ hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ hopopt[3] = 2; /* Length of RA Option */ hopopt[4] = 0; /* Type = 0x0000 (MLD) */ hopopt[5] = 0; hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */ hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */ skb_put(skb, sizeof(*ip6h) + 8); /* ICMPv6 */ skb_set_transport_header(skb, skb->len); mldq = (struct mld_msg *) icmp6_hdr(skb); interval = ipv6_addr_any(group) ? br->multicast_last_member_interval : br->multicast_query_response_interval; mldq->mld_type = ICMPV6_MGM_QUERY; mldq->mld_code = 0; mldq->mld_cksum = 0; mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); mldq->mld_reserved = 0; mldq->mld_mca = *group; /* checksum */ mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, sizeof(*mldq), IPPROTO_ICMPV6, csum_partial(mldq, sizeof(*mldq), 0)); skb_put(skb, sizeof(*mldq)); __skb_pull(skb, sizeof(*eth)); out: return skb; } #endif static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, struct br_ip *addr) { switch (addr->proto) { case htons(ETH_P_IP): return br_ip4_multicast_alloc_query(br, addr->u.ip4); #if IS_ENABLED(CONFIG_IPV6) case htons(ETH_P_IPV6): return br_ip6_multicast_alloc_query(br, &addr->u.ip6); #endif } return NULL; } static struct net_bridge_mdb_entry *br_multicast_get_group( struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group, int hash) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct hlist_node *p; unsigned count = 0; unsigned max; int elasticity; int err; mdb = rcu_dereference_protected(br->mdb, 1); hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { count++; if (unlikely(br_ip_equal(group, &mp->addr))) return mp; } elasticity = 0; max = mdb->max; if (unlikely(count > br->hash_elasticity && count)) { if (net_ratelimit()) br_info(br, "Multicast hash table " "chain limit reached: %s\n", port ? port->dev->name : br->dev->name); elasticity = br->hash_elasticity; } if (mdb->size >= max) { max *= 2; if (unlikely(max >= br->hash_max)) { br_warn(br, "Multicast hash table maximum " "reached, disabling snooping: %s, %d\n", port ? port->dev->name : br->dev->name, max); err = -E2BIG; disable: br->multicast_disabled = 1; goto err; } } if (max > mdb->max || elasticity) { if (mdb->old) { if (net_ratelimit()) br_info(br, "Multicast hash table " "on fire: %s\n", port ? port->dev->name : br->dev->name); err = -EEXIST; goto err; } err = br_mdb_rehash(&br->mdb, max, elasticity); if (err) { br_warn(br, "Cannot rehash multicast " "hash table, disabling snooping: %s, %d, %d\n", port ? port->dev->name : br->dev->name, mdb->size, err); goto disable; } err = -EAGAIN; goto err; } return NULL; err: mp = ERR_PTR(err); return mp; } static struct net_bridge_mdb_entry *br_multicast_new_group( struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; int hash; int err; mdb = rcu_dereference_protected(br->mdb, 1); if (!mdb) { err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); if (err) return ERR_PTR(err); goto rehash; } hash = br_ip_hash(mdb, group); mp = br_multicast_get_group(br, port, group, hash); switch (PTR_ERR(mp)) { case 0: break; case -EAGAIN: rehash: mdb = rcu_dereference_protected(br->mdb, 1); hash = br_ip_hash(mdb, group); break; default: goto out; } mp = kzalloc(sizeof(*mp), GFP_ATOMIC); if (unlikely(!mp)) return ERR_PTR(-ENOMEM); mp->br = br; mp->addr = *group; setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp); hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); mdb->size++; out: return mp; } static int br_multicast_add_group(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group) { struct net_bridge_mdb_entry *mp; struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; unsigned long now = jiffies; int err; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || (port && port->state == BR_STATE_DISABLED)) goto out; mp = br_multicast_new_group(br, port, group); err = PTR_ERR(mp); if (IS_ERR(mp)) goto err; if (!port) { mp->mglist = true; mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; } for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { if (p->port == port) goto found; if ((unsigned long)p->port < (unsigned long)port) break; } p = kzalloc(sizeof(*p), GFP_ATOMIC); err = -ENOMEM; if (unlikely(!p)) goto err; p->addr = *group; p->port = port; p->next = *pp; hlist_add_head(&p->mglist, &port->mglist); setup_timer(&p->timer, br_multicast_port_group_expired, (unsigned long)p); rcu_assign_pointer(*pp, p); found: mod_timer(&p->timer, now + br->multicast_membership_interval); out: err = 0; err: spin_unlock(&br->multicast_lock); return err; } static int br_ip4_multicast_add_group(struct net_bridge *br, struct net_bridge_port *port, __be32 group) { struct br_ip br_group; if (ipv4_is_local_multicast(group)) return 0; br_group.u.ip4 = group; br_group.proto = htons(ETH_P_IP); return br_multicast_add_group(br, port, &br_group); } #if IS_ENABLED(CONFIG_IPV6) static int br_ip6_multicast_add_group(struct net_bridge *br, struct net_bridge_port *port, const struct in6_addr *group) { struct br_ip br_group; if (!ipv6_is_transient_multicast(group)) return 0; br_group.u.ip6 = *group; br_group.proto = htons(ETH_P_IPV6); return br_multicast_add_group(br, port, &br_group); } #endif static void br_multicast_router_expired(unsigned long data) { struct net_bridge_port *port = (void *)data; struct net_bridge *br = port->br; spin_lock(&br->multicast_lock); if (port->multicast_router != 1 || timer_pending(&port->multicast_router_timer) || hlist_unhashed(&port->rlist)) goto out; hlist_del_init_rcu(&port->rlist); out: spin_unlock(&br->multicast_lock); } static void br_multicast_local_router_expired(unsigned long data) { } static void __br_multicast_send_query(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *ip) { struct sk_buff *skb; skb = br_multicast_alloc_query(br, ip); if (!skb) return; if (port) { __skb_push(skb, sizeof(struct ethhdr)); skb->dev = port->dev; NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, dev_queue_xmit); } else netif_rx(skb); } static void br_multicast_send_query(struct net_bridge *br, struct net_bridge_port *port, u32 sent) { unsigned long time; struct br_ip br_group; if (!netif_running(br->dev) || br->multicast_disabled || timer_pending(&br->multicast_querier_timer)) return; memset(&br_group.u, 0, sizeof(br_group.u)); br_group.proto = htons(ETH_P_IP); __br_multicast_send_query(br, port, &br_group); #if IS_ENABLED(CONFIG_IPV6) br_group.proto = htons(ETH_P_IPV6); __br_multicast_send_query(br, port, &br_group); #endif time = jiffies; time += sent < br->multicast_startup_query_count ? br->multicast_startup_query_interval : br->multicast_query_interval; mod_timer(port ? &port->multicast_query_timer : &br->multicast_query_timer, time); } static void br_multicast_port_query_expired(unsigned long data) { struct net_bridge_port *port = (void *)data; struct net_bridge *br = port->br; spin_lock(&br->multicast_lock); if (port->state == BR_STATE_DISABLED || port->state == BR_STATE_BLOCKING) goto out; if (port->multicast_startup_queries_sent < br->multicast_startup_query_count) port->multicast_startup_queries_sent++; br_multicast_send_query(port->br, port, port->multicast_startup_queries_sent); out: spin_unlock(&br->multicast_lock); } void br_multicast_add_port(struct net_bridge_port *port) { port->multicast_router = 1; setup_timer(&port->multicast_router_timer, br_multicast_router_expired, (unsigned long)port); setup_timer(&port->multicast_query_timer, br_multicast_port_query_expired, (unsigned long)port); } void br_multicast_del_port(struct net_bridge_port *port) { del_timer_sync(&port->multicast_router_timer); } static void __br_multicast_enable_port(struct net_bridge_port *port) { port->multicast_startup_queries_sent = 0; if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || del_timer(&port->multicast_query_timer)) mod_timer(&port->multicast_query_timer, jiffies); } void br_multicast_enable_port(struct net_bridge_port *port) { struct net_bridge *br = port->br; spin_lock(&br->multicast_lock); if (br->multicast_disabled || !netif_running(br->dev)) goto out; __br_multicast_enable_port(port); out: spin_unlock(&br->multicast_lock); } void br_multicast_disable_port(struct net_bridge_port *port) { struct net_bridge *br = port->br; struct net_bridge_port_group *pg; struct hlist_node *p, *n; spin_lock(&br->multicast_lock); hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist) br_multicast_del_pg(br, pg); if (!hlist_unhashed(&port->rlist)) hlist_del_init_rcu(&port->rlist); del_timer(&port->multicast_router_timer); del_timer(&port->multicast_query_timer); spin_unlock(&br->multicast_lock); } static int br_ip4_multicast_igmp3_report(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { struct igmpv3_report *ih; struct igmpv3_grec *grec; int i; int len; int num; int type; int err = 0; __be32 group; if (!pskb_may_pull(skb, sizeof(*ih))) return -EINVAL; ih = igmpv3_report_hdr(skb); num = ntohs(ih->ngrec); len = sizeof(*ih); for (i = 0; i < num; i++) { len += sizeof(*grec); if (!pskb_may_pull(skb, len)) return -EINVAL; grec = (void *)(skb->data + len - sizeof(*grec)); group = grec->grec_mca; type = grec->grec_type; len += ntohs(grec->grec_nsrcs) * 4; if (!pskb_may_pull(skb, len)) return -EINVAL; /* We treat this as an IGMPv2 report for now. */ switch (type) { case IGMPV3_MODE_IS_INCLUDE: case IGMPV3_MODE_IS_EXCLUDE: case IGMPV3_CHANGE_TO_INCLUDE: case IGMPV3_CHANGE_TO_EXCLUDE: case IGMPV3_ALLOW_NEW_SOURCES: case IGMPV3_BLOCK_OLD_SOURCES: break; default: continue; } err = br_ip4_multicast_add_group(br, port, group); if (err) break; } return err; } #if IS_ENABLED(CONFIG_IPV6) static int br_ip6_multicast_mld2_report(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { struct icmp6hdr *icmp6h; struct mld2_grec *grec; int i; int len; int num; int err = 0; if (!pskb_may_pull(skb, sizeof(*icmp6h))) return -EINVAL; icmp6h = icmp6_hdr(skb); num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); len = sizeof(*icmp6h); for (i = 0; i < num; i++) { __be16 *nsrcs, _nsrcs; nsrcs = skb_header_pointer(skb, len + offsetof(struct mld2_grec, grec_nsrcs), sizeof(_nsrcs), &_nsrcs); if (!nsrcs) return -EINVAL; if (!pskb_may_pull(skb, len + sizeof(*grec) + sizeof(struct in6_addr) * ntohs(*nsrcs))) return -EINVAL; grec = (struct mld2_grec *)(skb->data + len); len += sizeof(*grec) + sizeof(struct in6_addr) * ntohs(*nsrcs); /* We treat these as MLDv1 reports for now. */ switch (grec->grec_type) { case MLD2_MODE_IS_INCLUDE: case MLD2_MODE_IS_EXCLUDE: case MLD2_CHANGE_TO_INCLUDE: case MLD2_CHANGE_TO_EXCLUDE: case MLD2_ALLOW_NEW_SOURCES: case MLD2_BLOCK_OLD_SOURCES: break; default: continue; } err = br_ip6_multicast_add_group(br, port, &grec->grec_mca); if (!err) break; } return err; } #endif /* * Add port to rotuer_list * list is maintained ordered by pointer value * and locked by br->multicast_lock and RCU */ static void br_multicast_add_router(struct net_bridge *br, struct net_bridge_port *port) { struct net_bridge_port *p; struct hlist_node *n, *slot = NULL; hlist_for_each_entry(p, n, &br->router_list, rlist) { if ((unsigned long) port >= (unsigned long) p) break; slot = n; } if (slot) hlist_add_after_rcu(slot, &port->rlist); else hlist_add_head_rcu(&port->rlist, &br->router_list); } static void br_multicast_mark_router(struct net_bridge *br, struct net_bridge_port *port) { unsigned long now = jiffies; if (!port) { if (br->multicast_router == 1) mod_timer(&br->multicast_router_timer, now + br->multicast_querier_interval); return; } if (port->multicast_router != 1) return; if (!hlist_unhashed(&port->rlist)) goto timer; br_multicast_add_router(br, port); timer: mod_timer(&port->multicast_router_timer, now + br->multicast_querier_interval); } static void br_multicast_query_received(struct net_bridge *br, struct net_bridge_port *port, int saddr) { if (saddr) mod_timer(&br->multicast_querier_timer, jiffies + br->multicast_querier_interval); else if (timer_pending(&br->multicast_querier_timer)) return; br_multicast_mark_router(br, port); } static int br_ip4_multicast_query(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); struct igmphdr *ih = igmp_hdr(skb); struct net_bridge_mdb_entry *mp; struct igmpv3_query *ih3; struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; unsigned long max_delay; unsigned long now = jiffies; __be32 group; int err = 0; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || (port && port->state == BR_STATE_DISABLED)) goto out; br_multicast_query_received(br, port, !!iph->saddr); group = ih->group; if (skb->len == sizeof(*ih)) { max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); if (!max_delay) { max_delay = 10 * HZ; group = 0; } } else { if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) { err = -EINVAL; goto out; } ih3 = igmpv3_query_hdr(skb); if (ih3->nsrcs) goto out; max_delay = ih3->code ? IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; } if (!group) goto out; mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group); if (!mp) goto out; max_delay *= br->multicast_last_member_count; if (mp->mglist && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, now + max_delay) : try_to_del_timer_sync(&mp->timer) >= 0)) mod_timer(&mp->timer, now + max_delay); for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { if (timer_pending(&p->timer) ? time_after(p->timer.expires, now + max_delay) : try_to_del_timer_sync(&p->timer) >= 0) mod_timer(&p->timer, now + max_delay); } out: spin_unlock(&br->multicast_lock); return err; } #if IS_ENABLED(CONFIG_IPV6) static int br_ip6_multicast_query(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); struct net_bridge_mdb_entry *mp; struct mld2_query *mld2q; struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; unsigned long max_delay; unsigned long now = jiffies; const struct in6_addr *group = NULL; int err = 0; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || (port && port->state == BR_STATE_DISABLED)) goto out; br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr)); if (skb->len == sizeof(*mld)) { if (!pskb_may_pull(skb, sizeof(*mld))) { err = -EINVAL; goto out; } mld = (struct mld_msg *) icmp6_hdr(skb); max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay)); if (max_delay) group = &mld->mld_mca; } else if (skb->len >= sizeof(*mld2q)) { if (!pskb_may_pull(skb, sizeof(*mld2q))) { err = -EINVAL; goto out; } mld2q = (struct mld2_query *)icmp6_hdr(skb); if (!mld2q->mld2q_nsrcs) group = &mld2q->mld2q_mca; max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1; } if (!group) goto out; mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group); if (!mp) goto out; max_delay *= br->multicast_last_member_count; if (mp->mglist && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, now + max_delay) : try_to_del_timer_sync(&mp->timer) >= 0)) mod_timer(&mp->timer, now + max_delay); for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { if (timer_pending(&p->timer) ? time_after(p->timer.expires, now + max_delay) : try_to_del_timer_sync(&p->timer) >= 0) mod_timer(&p->timer, now + max_delay); } out: spin_unlock(&br->multicast_lock); return err; } #endif static void br_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct net_bridge_port_group *p; unsigned long now; unsigned long time; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || (port && port->state == BR_STATE_DISABLED) || timer_pending(&br->multicast_querier_timer)) goto out; mdb = mlock_dereference(br->mdb, br); mp = br_mdb_ip_get(mdb, group); if (!mp) goto out; now = jiffies; time = now + br->multicast_last_member_count * br->multicast_last_member_interval; if (!port) { if (mp->mglist && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, time) : try_to_del_timer_sync(&mp->timer) >= 0)) { mod_timer(&mp->timer, time); } goto out; } for (p = mlock_dereference(mp->ports, br); p != NULL; p = mlock_dereference(p->next, br)) { if (p->port != port) continue; if (!hlist_unhashed(&p->mglist) && (timer_pending(&p->timer) ? time_after(p->timer.expires, time) : try_to_del_timer_sync(&p->timer) >= 0)) { mod_timer(&p->timer, time); } break; } out: spin_unlock(&br->multicast_lock); } static void br_ip4_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, __be32 group) { struct br_ip br_group; if (ipv4_is_local_multicast(group)) return; br_group.u.ip4 = group; br_group.proto = htons(ETH_P_IP); br_multicast_leave_group(br, port, &br_group); } #if IS_ENABLED(CONFIG_IPV6) static void br_ip6_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, const struct in6_addr *group) { struct br_ip br_group; if (!ipv6_is_transient_multicast(group)) return; br_group.u.ip6 = *group; br_group.proto = htons(ETH_P_IPV6); br_multicast_leave_group(br, port, &br_group); } #endif static int br_multicast_ipv4_rcv(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { struct sk_buff *skb2 = skb; const struct iphdr *iph; struct igmphdr *ih; unsigned len; unsigned offset; int err; /* We treat OOM as packet loss for now. */ if (!pskb_may_pull(skb, sizeof(*iph))) return -EINVAL; iph = ip_hdr(skb); if (iph->ihl < 5 || iph->version != 4) return -EINVAL; if (!pskb_may_pull(skb, ip_hdrlen(skb))) return -EINVAL; iph = ip_hdr(skb); if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) return -EINVAL; if (iph->protocol != IPPROTO_IGMP) { if ((iph->daddr & IGMP_LOCAL_GROUP_MASK) != IGMP_LOCAL_GROUP) BR_INPUT_SKB_CB(skb)->mrouters_only = 1; return 0; } len = ntohs(iph->tot_len); if (skb->len < len || len < ip_hdrlen(skb)) return -EINVAL; if (skb->len > len) { skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) return -ENOMEM; err = pskb_trim_rcsum(skb2, len); if (err) goto err_out; } len -= ip_hdrlen(skb2); offset = skb_network_offset(skb2) + ip_hdrlen(skb2); __skb_pull(skb2, offset); skb_reset_transport_header(skb2); err = -EINVAL; if (!pskb_may_pull(skb2, sizeof(*ih))) goto out; switch (skb2->ip_summed) { case CHECKSUM_COMPLETE: if (!csum_fold(skb2->csum)) break; /* fall through */ case CHECKSUM_NONE: skb2->csum = 0; if (skb_checksum_complete(skb2)) goto out; } err = 0; BR_INPUT_SKB_CB(skb)->igmp = 1; ih = igmp_hdr(skb2); switch (ih->type) { case IGMP_HOST_MEMBERSHIP_REPORT: case IGMPV2_HOST_MEMBERSHIP_REPORT: BR_INPUT_SKB_CB(skb)->mrouters_only = 1; err = br_ip4_multicast_add_group(br, port, ih->group); break; case IGMPV3_HOST_MEMBERSHIP_REPORT: err = br_ip4_multicast_igmp3_report(br, port, skb2); break; case IGMP_HOST_MEMBERSHIP_QUERY: err = br_ip4_multicast_query(br, port, skb2); break; case IGMP_HOST_LEAVE_MESSAGE: br_ip4_multicast_leave_group(br, port, ih->group); break; } out: __skb_push(skb2, offset); err_out: if (skb2 != skb) kfree_skb(skb2); return err; } #if IS_ENABLED(CONFIG_IPV6) static int br_multicast_ipv6_rcv(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { struct sk_buff *skb2; const struct ipv6hdr *ip6h; u8 icmp6_type; u8 nexthdr; __be16 frag_off; unsigned len; int offset; int err; if (!pskb_may_pull(skb, sizeof(*ip6h))) return -EINVAL; ip6h = ipv6_hdr(skb); /* * We're interested in MLD messages only. * - Version is 6 * - MLD has always Router Alert hop-by-hop option * - But we do not support jumbrograms. */ if (ip6h->version != 6 || ip6h->nexthdr != IPPROTO_HOPOPTS || ip6h->payload_len == 0) return 0; len = ntohs(ip6h->payload_len) + sizeof(*ip6h); if (skb->len < len) return -EINVAL; nexthdr = ip6h->nexthdr; offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off); if (offset < 0 || nexthdr != IPPROTO_ICMPV6) return 0; /* Okay, we found ICMPv6 header */ skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) return -ENOMEM; err = -EINVAL; if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr))) goto out; len -= offset - skb_network_offset(skb2); __skb_pull(skb2, offset); skb_reset_transport_header(skb2); skb_postpull_rcsum(skb2, skb_network_header(skb2), skb_network_header_len(skb2)); icmp6_type = icmp6_hdr(skb2)->icmp6_type; switch (icmp6_type) { case ICMPV6_MGM_QUERY: case ICMPV6_MGM_REPORT: case ICMPV6_MGM_REDUCTION: case ICMPV6_MLD2_REPORT: break; default: err = 0; goto out; } /* Okay, we found MLD message. Check further. */ if (skb2->len > len) { err = pskb_trim_rcsum(skb2, len); if (err) goto out; err = -EINVAL; } ip6h = ipv6_hdr(skb2); switch (skb2->ip_summed) { case CHECKSUM_COMPLETE: if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len, IPPROTO_ICMPV6, skb2->csum)) break; /*FALLTHROUGH*/ case CHECKSUM_NONE: skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len, IPPROTO_ICMPV6, 0)); if (__skb_checksum_complete(skb2)) goto out; } err = 0; BR_INPUT_SKB_CB(skb)->igmp = 1; switch (icmp6_type) { case ICMPV6_MGM_REPORT: { struct mld_msg *mld; if (!pskb_may_pull(skb2, sizeof(*mld))) { err = -EINVAL; goto out; } mld = (struct mld_msg *)skb_transport_header(skb2); BR_INPUT_SKB_CB(skb)->mrouters_only = 1; err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); break; } case ICMPV6_MLD2_REPORT: err = br_ip6_multicast_mld2_report(br, port, skb2); break; case ICMPV6_MGM_QUERY: err = br_ip6_multicast_query(br, port, skb2); break; case ICMPV6_MGM_REDUCTION: { struct mld_msg *mld; if (!pskb_may_pull(skb2, sizeof(*mld))) { err = -EINVAL; goto out; } mld = (struct mld_msg *)skb_transport_header(skb2); br_ip6_multicast_leave_group(br, port, &mld->mld_mca); } } out: kfree_skb(skb2); return err; } #endif int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { BR_INPUT_SKB_CB(skb)->igmp = 0; BR_INPUT_SKB_CB(skb)->mrouters_only = 0; if (br->multicast_disabled) return 0; switch (skb->protocol) { case htons(ETH_P_IP): return br_multicast_ipv4_rcv(br, port, skb); #if IS_ENABLED(CONFIG_IPV6) case htons(ETH_P_IPV6): return br_multicast_ipv6_rcv(br, port, skb); #endif } return 0; } static void br_multicast_query_expired(unsigned long data) { struct net_bridge *br = (void *)data; spin_lock(&br->multicast_lock); if (br->multicast_startup_queries_sent < br->multicast_startup_query_count) br->multicast_startup_queries_sent++; br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); spin_unlock(&br->multicast_lock); } void br_multicast_init(struct net_bridge *br) { br->hash_elasticity = 4; br->hash_max = 512; br->multicast_router = 1; br->multicast_last_member_count = 2; br->multicast_startup_query_count = 2; br->multicast_last_member_interval = HZ; br->multicast_query_response_interval = 10 * HZ; br->multicast_startup_query_interval = 125 * HZ / 4; br->multicast_query_interval = 125 * HZ; br->multicast_querier_interval = 255 * HZ; br->multicast_membership_interval = 260 * HZ; spin_lock_init(&br->multicast_lock); setup_timer(&br->multicast_router_timer, br_multicast_local_router_expired, 0); setup_timer(&br->multicast_querier_timer, br_multicast_local_router_expired, 0); setup_timer(&br->multicast_query_timer, br_multicast_query_expired, (unsigned long)br); } void br_multicast_open(struct net_bridge *br) { br->multicast_startup_queries_sent = 0; if (br->multicast_disabled) return; mod_timer(&br->multicast_query_timer, jiffies); } void br_multicast_stop(struct net_bridge *br) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct hlist_node *p, *n; u32 ver; int i; del_timer_sync(&br->multicast_router_timer); del_timer_sync(&br->multicast_querier_timer); del_timer_sync(&br->multicast_query_timer); spin_lock_bh(&br->multicast_lock); mdb = mlock_dereference(br->mdb, br); if (!mdb) goto out; br->mdb = NULL; ver = mdb->ver; for (i = 0; i < mdb->max; i++) { hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], hlist[ver]) { del_timer(&mp->timer); call_rcu_bh(&mp->rcu, br_multicast_free_group); } } if (mdb->old) { spin_unlock_bh(&br->multicast_lock); rcu_barrier_bh(); spin_lock_bh(&br->multicast_lock); WARN_ON(mdb->old); } mdb->old = mdb; call_rcu_bh(&mdb->rcu, br_mdb_free); out: spin_unlock_bh(&br->multicast_lock); } int br_multicast_set_router(struct net_bridge *br, unsigned long val) { int err = -ENOENT; spin_lock_bh(&br->multicast_lock); if (!netif_running(br->dev)) goto unlock; switch (val) { case 0: case 2: del_timer(&br->multicast_router_timer); /* fall through */ case 1: br->multicast_router = val; err = 0; break; default: err = -EINVAL; break; } unlock: spin_unlock_bh(&br->multicast_lock); return err; } int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) { struct net_bridge *br = p->br; int err = -ENOENT; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED) goto unlock; switch (val) { case 0: case 1: case 2: p->multicast_router = val; err = 0; if (val < 2 && !hlist_unhashed(&p->rlist)) hlist_del_init_rcu(&p->rlist); if (val == 1) break; del_timer(&p->multicast_router_timer); if (val == 0) break; br_multicast_add_router(br, p); break; default: err = -EINVAL; break; } unlock: spin_unlock(&br->multicast_lock); return err; } int br_multicast_toggle(struct net_bridge *br, unsigned long val) { struct net_bridge_port *port; int err = 0; struct net_bridge_mdb_htable *mdb; spin_lock_bh(&br->multicast_lock); if (br->multicast_disabled == !val) goto unlock; br->multicast_disabled = !val; if (br->multicast_disabled) goto unlock; if (!netif_running(br->dev)) goto unlock; mdb = mlock_dereference(br->mdb, br); if (mdb) { if (mdb->old) { err = -EEXIST; rollback: br->multicast_disabled = !!val; goto unlock; } err = br_mdb_rehash(&br->mdb, mdb->max, br->hash_elasticity); if (err) goto rollback; } br_multicast_open(br); list_for_each_entry(port, &br->port_list, list) { if (port->state == BR_STATE_DISABLED || port->state == BR_STATE_BLOCKING) continue; __br_multicast_enable_port(port); } unlock: spin_unlock_bh(&br->multicast_lock); return err; } int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) { int err = -ENOENT; u32 old; struct net_bridge_mdb_htable *mdb; spin_lock(&br->multicast_lock); if (!netif_running(br->dev)) goto unlock; err = -EINVAL; if (!is_power_of_2(val)) goto unlock; mdb = mlock_dereference(br->mdb, br); if (mdb && val < mdb->size) goto unlock; err = 0; old = br->hash_max; br->hash_max = val; if (mdb) { if (mdb->old) { err = -EEXIST; rollback: br->hash_max = old; goto unlock; } err = br_mdb_rehash(&br->mdb, br->hash_max, br->hash_elasticity); if (err) goto rollback; } unlock: spin_unlock(&br->multicast_lock); return err; }
gpl-2.0
crpalmer/android_kernel_samsung_msm8974
drivers/usb/gadget/f_subset.c
3438
13998
/* * f_subset.c -- "CDC Subset" Ethernet link function driver * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2008 Nokia Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/etherdevice.h> #include "u_ether.h" /* * This function packages a simple "CDC Subset" Ethernet port with no real * control mechanisms; just raw data transfer over two bulk endpoints. * The data transfer model is exactly that of CDC Ethernet, which is * why we call it the "CDC Subset". * * Because it's not standardized, this has some interoperability issues. * They mostly relate to driver binding, since the data transfer model is * so simple (CDC Ethernet). The original versions of this protocol used * specific product/vendor IDs: byteswapped IDs for Digital Equipment's * SA-1100 "Itsy" board, which could run Linux 2.4 kernels and supported * daughtercards with USB peripheral connectors. (It was used more often * with other boards, using the Itsy identifiers.) Linux hosts recognized * this with CONFIG_USB_ARMLINUX; these devices have only one configuration * and one interface. * * At some point, MCCI defined a (nonconformant) CDC MDLM variant called * "SAFE", which happens to have a mode which is identical to the "CDC * Subset" in terms of data transfer and lack of control model. This was * adopted by later Sharp Zaurus models, and by some other software which * Linux hosts recognize with CONFIG_USB_NET_ZAURUS. * * Because Microsoft's RNDIS drivers are far from robust, we added a few * descriptors to the CDC Subset code, making this code look like a SAFE * implementation. This lets you use MCCI's host side MS-Windows drivers * if you get fed up with RNDIS. It also makes it easier for composite * drivers to work, since they can use class based binding instead of * caring about specific product and vendor IDs. */ struct f_gether { struct gether port; char ethaddr[14]; }; static inline struct f_gether *func_to_geth(struct usb_function *f) { return container_of(f, struct f_gether, port.func); } /*-------------------------------------------------------------------------*/ /* * "Simple" CDC-subset option is a simple vendor-neutral model that most * full speed controllers can handle: one interface, two bulk endpoints. * To assist host side drivers, we fancy it up a bit, and add descriptors so * some host side drivers will understand it as a "SAFE" variant. * * "SAFE" loosely follows CDC WMC MDLM, violating the spec in various ways. * Data endpoints live in the control interface, there's no data interface. * And it's not used to talk to a cell phone radio. */ /* interface descriptor: */ static struct usb_interface_descriptor subset_data_intf = { .bLength = sizeof subset_data_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bAlternateSetting = 0, .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, .bInterfaceProtocol = 0, /* .iInterface = DYNAMIC */ }; static struct usb_cdc_header_desc mdlm_header_desc = { .bLength = sizeof mdlm_header_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_HEADER_TYPE, .bcdCDC = cpu_to_le16(0x0110), }; static struct usb_cdc_mdlm_desc mdlm_desc = { .bLength = sizeof mdlm_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_MDLM_TYPE, .bcdVersion = cpu_to_le16(0x0100), .bGUID = { 0x5d, 0x34, 0xcf, 0x66, 0x11, 0x18, 0x11, 0xd6, 0xa2, 0x1a, 0x00, 0x01, 0x02, 0xca, 0x9a, 0x7f, }, }; /* since "usb_cdc_mdlm_detail_desc" is a variable length structure, we * can't really use its struct. All we do here is say that we're using * the submode of "SAFE" which directly matches the CDC Subset. */ static u8 mdlm_detail_desc[] = { 6, USB_DT_CS_INTERFACE, USB_CDC_MDLM_DETAIL_TYPE, 0, /* "SAFE" */ 0, /* network control capabilities (none) */ 0, /* network data capabilities ("raw" encapsulation) */ }; static struct usb_cdc_ether_desc ether_desc = { .bLength = sizeof ether_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_ETHERNET_TYPE, /* this descriptor actually adds value, surprise! */ /* .iMACAddress = DYNAMIC */ .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */ .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN), .wNumberMCFilters = cpu_to_le16(0), .bNumberPowerFilters = 0, }; /* full speed support: */ static struct usb_endpoint_descriptor fs_subset_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor fs_subset_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *fs_eth_function[] = { (struct usb_descriptor_header *) &subset_data_intf, (struct usb_descriptor_header *) &mdlm_header_desc, (struct usb_descriptor_header *) &mdlm_desc, (struct usb_descriptor_header *) &mdlm_detail_desc, (struct usb_descriptor_header *) &ether_desc, (struct usb_descriptor_header *) &fs_subset_in_desc, (struct usb_descriptor_header *) &fs_subset_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor hs_subset_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor hs_subset_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *hs_eth_function[] = { (struct usb_descriptor_header *) &subset_data_intf, (struct usb_descriptor_header *) &mdlm_header_desc, (struct usb_descriptor_header *) &mdlm_desc, (struct usb_descriptor_header *) &mdlm_detail_desc, (struct usb_descriptor_header *) &ether_desc, (struct usb_descriptor_header *) &hs_subset_in_desc, (struct usb_descriptor_header *) &hs_subset_out_desc, NULL, }; /* super speed support: */ static struct usb_endpoint_descriptor ss_subset_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(1024), }; static struct usb_endpoint_descriptor ss_subset_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(1024), }; static struct usb_ss_ep_comp_descriptor ss_subset_bulk_comp_desc = { .bLength = sizeof ss_subset_bulk_comp_desc, .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /* the following 2 values can be tweaked if necessary */ /* .bMaxBurst = 0, */ /* .bmAttributes = 0, */ }; static struct usb_descriptor_header *ss_eth_function[] = { (struct usb_descriptor_header *) &subset_data_intf, (struct usb_descriptor_header *) &mdlm_header_desc, (struct usb_descriptor_header *) &mdlm_desc, (struct usb_descriptor_header *) &mdlm_detail_desc, (struct usb_descriptor_header *) &ether_desc, (struct usb_descriptor_header *) &ss_subset_in_desc, (struct usb_descriptor_header *) &ss_subset_bulk_comp_desc, (struct usb_descriptor_header *) &ss_subset_out_desc, (struct usb_descriptor_header *) &ss_subset_bulk_comp_desc, NULL, }; /* string descriptors: */ static struct usb_string geth_string_defs[] = { [0].s = "CDC Ethernet Subset/SAFE", [1].s = NULL /* DYNAMIC */, { } /* end of list */ }; static struct usb_gadget_strings geth_string_table = { .language = 0x0409, /* en-us */ .strings = geth_string_defs, }; static struct usb_gadget_strings *geth_strings[] = { &geth_string_table, NULL, }; /*-------------------------------------------------------------------------*/ static int geth_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_gether *geth = func_to_geth(f); struct usb_composite_dev *cdev = f->config->cdev; struct net_device *net; /* we know alt == 0, so this is an activation or a reset */ if (geth->port.in_ep->driver_data) { DBG(cdev, "reset cdc subset\n"); gether_disconnect(&geth->port); } DBG(cdev, "init + activate cdc subset\n"); if (config_ep_by_speed(cdev->gadget, f, geth->port.in_ep) || config_ep_by_speed(cdev->gadget, f, geth->port.out_ep)) { geth->port.in_ep->desc = NULL; geth->port.out_ep->desc = NULL; return -EINVAL; } net = gether_connect(&geth->port); return IS_ERR(net) ? PTR_ERR(net) : 0; } static void geth_disable(struct usb_function *f) { struct f_gether *geth = func_to_geth(f); struct usb_composite_dev *cdev = f->config->cdev; DBG(cdev, "net deactivated\n"); gether_disconnect(&geth->port); } /*-------------------------------------------------------------------------*/ /* serial function driver setup/binding */ static int geth_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_gether *geth = func_to_geth(f); int status; struct usb_ep *ep; /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; subset_data_intf.bInterfaceNumber = status; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_in_desc); if (!ep) goto fail; geth->port.in_ep = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_out_desc); if (!ep) goto fail; geth->port.out_ep = ep; ep->driver_data = cdev; /* claim */ /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(fs_eth_function); if (!f->descriptors) goto fail; /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ if (gadget_is_dualspeed(c->cdev->gadget)) { hs_subset_in_desc.bEndpointAddress = fs_subset_in_desc.bEndpointAddress; hs_subset_out_desc.bEndpointAddress = fs_subset_out_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(hs_eth_function); if (!f->hs_descriptors) goto fail; } if (gadget_is_superspeed(c->cdev->gadget)) { ss_subset_in_desc.bEndpointAddress = fs_subset_in_desc.bEndpointAddress; ss_subset_out_desc.bEndpointAddress = fs_subset_out_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->ss_descriptors = usb_copy_descriptors(ss_eth_function); if (!f->ss_descriptors) goto fail; } /* NOTE: all that is done without knowing or caring about * the network link ... which is unavailable to this code * until we're activated via set_alt(). */ DBG(cdev, "CDC Subset: %s speed IN/%s OUT/%s\n", gadget_is_superspeed(c->cdev->gadget) ? "super" : gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", geth->port.in_ep->name, geth->port.out_ep->name); return 0; fail: if (f->descriptors) usb_free_descriptors(f->descriptors); if (f->hs_descriptors) usb_free_descriptors(f->hs_descriptors); /* we might as well release our claims on endpoints */ if (geth->port.out_ep->desc) geth->port.out_ep->driver_data = NULL; if (geth->port.in_ep->desc) geth->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); return status; } static void geth_unbind(struct usb_configuration *c, struct usb_function *f) { if (gadget_is_superspeed(c->cdev->gadget)) usb_free_descriptors(f->ss_descriptors); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); geth_string_defs[1].s = NULL; kfree(func_to_geth(f)); } /** * geth_bind_config - add CDC Subset network link to a configuration * @c: the configuration to support the network link * @ethaddr: a buffer in which the ethernet address of the host side * side of the link was recorded * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. * * Caller must have called @gether_setup(). Caller is also responsible * for calling @gether_cleanup() before module unload. */ int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]) { struct f_gether *geth; int status; if (!ethaddr) return -EINVAL; /* maybe allocate device-global string IDs */ if (geth_string_defs[0].id == 0) { /* interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; geth_string_defs[0].id = status; subset_data_intf.iInterface = status; /* MAC address */ status = usb_string_id(c->cdev); if (status < 0) return status; geth_string_defs[1].id = status; ether_desc.iMACAddress = status; } /* allocate and initialize one new instance */ geth = kzalloc(sizeof *geth, GFP_KERNEL); if (!geth) return -ENOMEM; /* export host's Ethernet address in CDC format */ snprintf(geth->ethaddr, sizeof geth->ethaddr, "%02X%02X%02X%02X%02X%02X", ethaddr[0], ethaddr[1], ethaddr[2], ethaddr[3], ethaddr[4], ethaddr[5]); geth_string_defs[1].s = geth->ethaddr; geth->port.cdc_filter = DEFAULT_FILTER; geth->port.func.name = "cdc_subset"; geth->port.func.strings = geth_strings; geth->port.func.bind = geth_bind; geth->port.func.unbind = geth_unbind; geth->port.func.set_alt = geth_set_alt; geth->port.func.disable = geth_disable; status = usb_add_function(c, &geth->port.func); if (status) { geth_string_defs[1].s = NULL; kfree(geth); } return status; }
gpl-2.0
CyanogenMod/android_kernel_samsung_lt03wifi
drivers/ide/pmac.c
5230
46850
/* * Support for IDE interfaces on PowerMacs. * * These IDE interfaces are memory-mapped and have a DBDMA channel * for doing DMA. * * Copyright (C) 1998-2003 Paul Mackerras & Ben. Herrenschmidt * Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Some code taken from drivers/ide/ide-dma.c: * * Copyright (c) 1995-1998 Mark Lord * * TODO: - Use pre-calculated (kauai) timing tables all the time and * get rid of the "rounded" tables used previously, so we have the * same table format for all controllers and can then just have one * big table * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/ide.h> #include <linux/notifier.h> #include <linux/module.h> #include <linux/reboot.h> #include <linux/pci.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <asm/prom.h> #include <asm/io.h> #include <asm/dbdma.h> #include <asm/ide.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/sections.h> #include <asm/irq.h> #include <asm/mediabay.h> #define DRV_NAME "ide-pmac" #undef IDE_PMAC_DEBUG #define DMA_WAIT_TIMEOUT 50 typedef struct pmac_ide_hwif { unsigned long regbase; int irq; int kind; int aapl_bus_id; unsigned broken_dma : 1; unsigned broken_dma_warn : 1; struct device_node* node; struct macio_dev *mdev; u32 timings[4]; volatile u32 __iomem * *kauai_fcr; ide_hwif_t *hwif; /* Those fields are duplicating what is in hwif. We currently * can't use the hwif ones because of some assumptions that are * beeing done by the generic code about the kind of dma controller * and format of the dma table. This will have to be fixed though. */ volatile struct dbdma_regs __iomem * dma_regs; struct dbdma_cmd* dma_table_cpu; } pmac_ide_hwif_t; enum { controller_ohare, /* OHare based */ controller_heathrow, /* Heathrow/Paddington */ controller_kl_ata3, /* KeyLargo ATA-3 */ controller_kl_ata4, /* KeyLargo ATA-4 */ controller_un_ata6, /* UniNorth2 ATA-6 */ controller_k2_ata6, /* K2 ATA-6 */ controller_sh_ata6, /* Shasta ATA-6 */ }; static const char* model_name[] = { "OHare ATA", /* OHare based */ "Heathrow ATA", /* Heathrow/Paddington */ "KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */ "KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */ "UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */ "K2 ATA-6", /* K2 ATA-6 (UDMA/100) */ "Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */ }; /* * Extra registers, both 32-bit little-endian */ #define IDE_TIMING_CONFIG 0x200 #define IDE_INTERRUPT 0x300 /* Kauai (U2) ATA has different register setup */ #define IDE_KAUAI_PIO_CONFIG 0x200 #define IDE_KAUAI_ULTRA_CONFIG 0x210 #define IDE_KAUAI_POLL_CONFIG 0x220 /* * Timing configuration register definitions */ /* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */ #define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS) #define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS) #define IDE_SYSCLK_NS 30 /* 33Mhz cell */ #define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */ /* 133Mhz cell, found in shasta. * See comments about 100 Mhz Uninorth 2... * Note that PIO_MASK and MDMA_MASK seem to overlap */ #define TR_133_PIOREG_PIO_MASK 0xff000fff #define TR_133_PIOREG_MDMA_MASK 0x00fff800 #define TR_133_UDMAREG_UDMA_MASK 0x0003ffff #define TR_133_UDMAREG_UDMA_EN 0x00000001 /* 100Mhz cell, found in Uninorth 2. I don't have much infos about * this one yet, it appears as a pci device (106b/0033) on uninorth * internal PCI bus and it's clock is controlled like gem or fw. It * appears to be an evolution of keylargo ATA4 with a timing register * extended to 2 32bits registers and a similar DBDMA channel. Other * registers seem to exist but I can't tell much about them. * * So far, I'm using pre-calculated tables for this extracted from * the values used by the MacOS X driver. * * The "PIO" register controls PIO and MDMA timings, the "ULTRA" * register controls the UDMA timings. At least, it seems bit 0 * of this one enables UDMA vs. MDMA, and bits 4..7 are the * cycle time in units of 10ns. Bits 8..15 are used by I don't * know their meaning yet */ #define TR_100_PIOREG_PIO_MASK 0xff000fff #define TR_100_PIOREG_MDMA_MASK 0x00fff000 #define TR_100_UDMAREG_UDMA_MASK 0x0000ffff #define TR_100_UDMAREG_UDMA_EN 0x00000001 /* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on * 40 connector cable and to 4 on 80 connector one. * Clock unit is 15ns (66Mhz) * * 3 Values can be programmed: * - Write data setup, which appears to match the cycle time. They * also call it DIOW setup. * - Ready to pause time (from spec) * - Address setup. That one is weird. I don't see where exactly * it fits in UDMA cycles, I got it's name from an obscure piece * of commented out code in Darwin. They leave it to 0, we do as * well, despite a comment that would lead to think it has a * min value of 45ns. * Apple also add 60ns to the write data setup (or cycle time ?) on * reads. */ #define TR_66_UDMA_MASK 0xfff00000 #define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */ #define TR_66_UDMA_ADDRSETUP_MASK 0xe0000000 /* Address setup */ #define TR_66_UDMA_ADDRSETUP_SHIFT 29 #define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */ #define TR_66_UDMA_RDY2PAUS_SHIFT 25 #define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */ #define TR_66_UDMA_WRDATASETUP_SHIFT 21 #define TR_66_MDMA_MASK 0x000ffc00 #define TR_66_MDMA_RECOVERY_MASK 0x000f8000 #define TR_66_MDMA_RECOVERY_SHIFT 15 #define TR_66_MDMA_ACCESS_MASK 0x00007c00 #define TR_66_MDMA_ACCESS_SHIFT 10 #define TR_66_PIO_MASK 0x000003ff #define TR_66_PIO_RECOVERY_MASK 0x000003e0 #define TR_66_PIO_RECOVERY_SHIFT 5 #define TR_66_PIO_ACCESS_MASK 0x0000001f #define TR_66_PIO_ACCESS_SHIFT 0 /* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo * Can do pio & mdma modes, clock unit is 30ns (33Mhz) * * The access time and recovery time can be programmed. Some older * Darwin code base limit OHare to 150ns cycle time. I decided to do * the same here fore safety against broken old hardware ;) * The HalfTick bit, when set, adds half a clock (15ns) to the access * time and removes one from recovery. It's not supported on KeyLargo * implementation afaik. The E bit appears to be set for PIO mode 0 and * is used to reach long timings used in this mode. */ #define TR_33_MDMA_MASK 0x003ff800 #define TR_33_MDMA_RECOVERY_MASK 0x001f0000 #define TR_33_MDMA_RECOVERY_SHIFT 16 #define TR_33_MDMA_ACCESS_MASK 0x0000f800 #define TR_33_MDMA_ACCESS_SHIFT 11 #define TR_33_MDMA_HALFTICK 0x00200000 #define TR_33_PIO_MASK 0x000007ff #define TR_33_PIO_E 0x00000400 #define TR_33_PIO_RECOVERY_MASK 0x000003e0 #define TR_33_PIO_RECOVERY_SHIFT 5 #define TR_33_PIO_ACCESS_MASK 0x0000001f #define TR_33_PIO_ACCESS_SHIFT 0 /* * Interrupt register definitions */ #define IDE_INTR_DMA 0x80000000 #define IDE_INTR_DEVICE 0x40000000 /* * FCR Register on Kauai. Not sure what bit 0x4 is ... */ #define KAUAI_FCR_UATA_MAGIC 0x00000004 #define KAUAI_FCR_UATA_RESET_N 0x00000002 #define KAUAI_FCR_UATA_ENABLE 0x00000001 /* Rounded Multiword DMA timings * * I gave up finding a generic formula for all controller * types and instead, built tables based on timing values * used by Apple in Darwin's implementation. */ struct mdma_timings_t { int accessTime; int recoveryTime; int cycleTime; }; struct mdma_timings_t mdma_timings_33[] = { { 240, 240, 480 }, { 180, 180, 360 }, { 135, 135, 270 }, { 120, 120, 240 }, { 105, 105, 210 }, { 90, 90, 180 }, { 75, 75, 150 }, { 75, 45, 120 }, { 0, 0, 0 } }; struct mdma_timings_t mdma_timings_33k[] = { { 240, 240, 480 }, { 180, 180, 360 }, { 150, 150, 300 }, { 120, 120, 240 }, { 90, 120, 210 }, { 90, 90, 180 }, { 90, 60, 150 }, { 90, 30, 120 }, { 0, 0, 0 } }; struct mdma_timings_t mdma_timings_66[] = { { 240, 240, 480 }, { 180, 180, 360 }, { 135, 135, 270 }, { 120, 120, 240 }, { 105, 105, 210 }, { 90, 90, 180 }, { 90, 75, 165 }, { 75, 45, 120 }, { 0, 0, 0 } }; /* KeyLargo ATA-4 Ultra DMA timings (rounded) */ struct { int addrSetup; /* ??? */ int rdy2pause; int wrDataSetup; } kl66_udma_timings[] = { { 0, 180, 120 }, /* Mode 0 */ { 0, 150, 90 }, /* 1 */ { 0, 120, 60 }, /* 2 */ { 0, 90, 45 }, /* 3 */ { 0, 90, 30 } /* 4 */ }; /* UniNorth 2 ATA/100 timings */ struct kauai_timing { int cycle_time; u32 timing_reg; }; static struct kauai_timing kauai_pio_timings[] = { { 930 , 0x08000fff }, { 600 , 0x08000a92 }, { 383 , 0x0800060f }, { 360 , 0x08000492 }, { 330 , 0x0800048f }, { 300 , 0x080003cf }, { 270 , 0x080003cc }, { 240 , 0x0800038b }, { 239 , 0x0800030c }, { 180 , 0x05000249 }, { 120 , 0x04000148 }, { 0 , 0 }, }; static struct kauai_timing kauai_mdma_timings[] = { { 1260 , 0x00fff000 }, { 480 , 0x00618000 }, { 360 , 0x00492000 }, { 270 , 0x0038e000 }, { 240 , 0x0030c000 }, { 210 , 0x002cb000 }, { 180 , 0x00249000 }, { 150 , 0x00209000 }, { 120 , 0x00148000 }, { 0 , 0 }, }; static struct kauai_timing kauai_udma_timings[] = { { 120 , 0x000070c0 }, { 90 , 0x00005d80 }, { 60 , 0x00004a60 }, { 45 , 0x00003a50 }, { 30 , 0x00002a30 }, { 20 , 0x00002921 }, { 0 , 0 }, }; static struct kauai_timing shasta_pio_timings[] = { { 930 , 0x08000fff }, { 600 , 0x0A000c97 }, { 383 , 0x07000712 }, { 360 , 0x040003cd }, { 330 , 0x040003cd }, { 300 , 0x040003cd }, { 270 , 0x040003cd }, { 240 , 0x040003cd }, { 239 , 0x040003cd }, { 180 , 0x0400028b }, { 120 , 0x0400010a }, { 0 , 0 }, }; static struct kauai_timing shasta_mdma_timings[] = { { 1260 , 0x00fff000 }, { 480 , 0x00820800 }, { 360 , 0x00820800 }, { 270 , 0x00820800 }, { 240 , 0x00820800 }, { 210 , 0x00820800 }, { 180 , 0x00820800 }, { 150 , 0x0028b000 }, { 120 , 0x001ca000 }, { 0 , 0 }, }; static struct kauai_timing shasta_udma133_timings[] = { { 120 , 0x00035901, }, { 90 , 0x000348b1, }, { 60 , 0x00033881, }, { 45 , 0x00033861, }, { 30 , 0x00033841, }, { 20 , 0x00033031, }, { 15 , 0x00033021, }, { 0 , 0 }, }; static inline u32 kauai_lookup_timing(struct kauai_timing* table, int cycle_time) { int i; for (i=0; table[i].cycle_time; i++) if (cycle_time > table[i+1].cycle_time) return table[i].timing_reg; BUG(); return 0; } /* allow up to 256 DBDMA commands per xfer */ #define MAX_DCMDS 256 /* * Wait 1s for disk to answer on IDE bus after a hard reset * of the device (via GPIO/FCR). * * Some devices seem to "pollute" the bus even after dropping * the BSY bit (typically some combo drives slave on the UDMA * bus) after a hard reset. Since we hard reset all drives on * KeyLargo ATA66, we have to keep that delay around. I may end * up not hard resetting anymore on these and keep the delay only * for older interfaces instead (we have to reset when coming * from MacOS...) --BenH. */ #define IDE_WAKEUP_DELAY (1*HZ) static int pmac_ide_init_dma(ide_hwif_t *, const struct ide_port_info *); #define PMAC_IDE_REG(x) \ ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x))) /* * Apply the timings of the proper unit (master/slave) to the shared * timing register when selecting that unit. This version is for * ASICs with a single timing register */ static void pmac_ide_apply_timings(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); if (drive->dn & 1) writel(pmif->timings[1], PMAC_IDE_REG(IDE_TIMING_CONFIG)); else writel(pmif->timings[0], PMAC_IDE_REG(IDE_TIMING_CONFIG)); (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG)); } /* * Apply the timings of the proper unit (master/slave) to the shared * timing register when selecting that unit. This version is for * ASICs with a dual timing register (Kauai) */ static void pmac_ide_kauai_apply_timings(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); if (drive->dn & 1) { writel(pmif->timings[1], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG)); writel(pmif->timings[3], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG)); } else { writel(pmif->timings[0], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG)); writel(pmif->timings[2], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG)); } (void)readl(PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG)); } /* * Force an update of controller timing values for a given drive */ static void pmac_ide_do_update_timings(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); if (pmif->kind == controller_sh_ata6 || pmif->kind == controller_un_ata6 || pmif->kind == controller_k2_ata6) pmac_ide_kauai_apply_timings(drive); else pmac_ide_apply_timings(drive); } static void pmac_dev_select(ide_drive_t *drive) { pmac_ide_apply_timings(drive); writeb(drive->select | ATA_DEVICE_OBS, (void __iomem *)drive->hwif->io_ports.device_addr); } static void pmac_kauai_dev_select(ide_drive_t *drive) { pmac_ide_kauai_apply_timings(drive); writeb(drive->select | ATA_DEVICE_OBS, (void __iomem *)drive->hwif->io_ports.device_addr); } static void pmac_exec_command(ide_hwif_t *hwif, u8 cmd) { writeb(cmd, (void __iomem *)hwif->io_ports.command_addr); (void)readl((void __iomem *)(hwif->io_ports.data_addr + IDE_TIMING_CONFIG)); } static void pmac_write_devctl(ide_hwif_t *hwif, u8 ctl) { writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr); (void)readl((void __iomem *)(hwif->io_ports.data_addr + IDE_TIMING_CONFIG)); } /* * Old tuning functions (called on hdparm -p), sets up drive PIO timings */ static void pmac_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); const u8 pio = drive->pio_mode - XFER_PIO_0; struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio); u32 *timings, t; unsigned accessTicks, recTicks; unsigned accessTime, recTime; unsigned int cycle_time; /* which drive is it ? */ timings = &pmif->timings[drive->dn & 1]; t = *timings; cycle_time = ide_pio_cycle_time(drive, pio); switch (pmif->kind) { case controller_sh_ata6: { /* 133Mhz cell */ u32 tr = kauai_lookup_timing(shasta_pio_timings, cycle_time); t = (t & ~TR_133_PIOREG_PIO_MASK) | tr; break; } case controller_un_ata6: case controller_k2_ata6: { /* 100Mhz cell */ u32 tr = kauai_lookup_timing(kauai_pio_timings, cycle_time); t = (t & ~TR_100_PIOREG_PIO_MASK) | tr; break; } case controller_kl_ata4: /* 66Mhz cell */ recTime = cycle_time - tim->active - tim->setup; recTime = max(recTime, 150U); accessTime = tim->active; accessTime = max(accessTime, 150U); accessTicks = SYSCLK_TICKS_66(accessTime); accessTicks = min(accessTicks, 0x1fU); recTicks = SYSCLK_TICKS_66(recTime); recTicks = min(recTicks, 0x1fU); t = (t & ~TR_66_PIO_MASK) | (accessTicks << TR_66_PIO_ACCESS_SHIFT) | (recTicks << TR_66_PIO_RECOVERY_SHIFT); break; default: { /* 33Mhz cell */ int ebit = 0; recTime = cycle_time - tim->active - tim->setup; recTime = max(recTime, 150U); accessTime = tim->active; accessTime = max(accessTime, 150U); accessTicks = SYSCLK_TICKS(accessTime); accessTicks = min(accessTicks, 0x1fU); accessTicks = max(accessTicks, 4U); recTicks = SYSCLK_TICKS(recTime); recTicks = min(recTicks, 0x1fU); recTicks = max(recTicks, 5U) - 4; if (recTicks > 9) { recTicks--; /* guess, but it's only for PIO0, so... */ ebit = 1; } t = (t & ~TR_33_PIO_MASK) | (accessTicks << TR_33_PIO_ACCESS_SHIFT) | (recTicks << TR_33_PIO_RECOVERY_SHIFT); if (ebit) t |= TR_33_PIO_E; break; } } #ifdef IDE_PMAC_DEBUG printk(KERN_ERR "%s: Set PIO timing for mode %d, reg: 0x%08x\n", drive->name, pio, *timings); #endif *timings = t; pmac_ide_do_update_timings(drive); } /* * Calculate KeyLargo ATA/66 UDMA timings */ static int set_timings_udma_ata4(u32 *timings, u8 speed) { unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks; if (speed > XFER_UDMA_4) return 1; rdyToPauseTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].rdy2pause); wrDataSetupTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].wrDataSetup); addrTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].addrSetup); *timings = ((*timings) & ~(TR_66_UDMA_MASK | TR_66_MDMA_MASK)) | (wrDataSetupTicks << TR_66_UDMA_WRDATASETUP_SHIFT) | (rdyToPauseTicks << TR_66_UDMA_RDY2PAUS_SHIFT) | (addrTicks <<TR_66_UDMA_ADDRSETUP_SHIFT) | TR_66_UDMA_EN; #ifdef IDE_PMAC_DEBUG printk(KERN_ERR "ide_pmac: Set UDMA timing for mode %d, reg: 0x%08x\n", speed & 0xf, *timings); #endif return 0; } /* * Calculate Kauai ATA/100 UDMA timings */ static int set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed) { struct ide_timing *t = ide_timing_find_mode(speed); u32 tr; if (speed > XFER_UDMA_5 || t == NULL) return 1; tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma); *ultra_timings = ((*ultra_timings) & ~TR_100_UDMAREG_UDMA_MASK) | tr; *ultra_timings = (*ultra_timings) | TR_100_UDMAREG_UDMA_EN; return 0; } /* * Calculate Shasta ATA/133 UDMA timings */ static int set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed) { struct ide_timing *t = ide_timing_find_mode(speed); u32 tr; if (speed > XFER_UDMA_6 || t == NULL) return 1; tr = kauai_lookup_timing(shasta_udma133_timings, (int)t->udma); *ultra_timings = ((*ultra_timings) & ~TR_133_UDMAREG_UDMA_MASK) | tr; *ultra_timings = (*ultra_timings) | TR_133_UDMAREG_UDMA_EN; return 0; } /* * Calculate MDMA timings for all cells */ static void set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2, u8 speed) { u16 *id = drive->id; int cycleTime, accessTime = 0, recTime = 0; unsigned accessTicks, recTicks; struct mdma_timings_t* tm = NULL; int i; /* Get default cycle time for mode */ switch(speed & 0xf) { case 0: cycleTime = 480; break; case 1: cycleTime = 150; break; case 2: cycleTime = 120; break; default: BUG(); break; } /* Check if drive provides explicit DMA cycle time */ if ((id[ATA_ID_FIELD_VALID] & 2) && id[ATA_ID_EIDE_DMA_TIME]) cycleTime = max_t(int, id[ATA_ID_EIDE_DMA_TIME], cycleTime); /* OHare limits according to some old Apple sources */ if ((intf_type == controller_ohare) && (cycleTime < 150)) cycleTime = 150; /* Get the proper timing array for this controller */ switch(intf_type) { case controller_sh_ata6: case controller_un_ata6: case controller_k2_ata6: break; case controller_kl_ata4: tm = mdma_timings_66; break; case controller_kl_ata3: tm = mdma_timings_33k; break; default: tm = mdma_timings_33; break; } if (tm != NULL) { /* Lookup matching access & recovery times */ i = -1; for (;;) { if (tm[i+1].cycleTime < cycleTime) break; i++; } cycleTime = tm[i].cycleTime; accessTime = tm[i].accessTime; recTime = tm[i].recoveryTime; #ifdef IDE_PMAC_DEBUG printk(KERN_ERR "%s: MDMA, cycleTime: %d, accessTime: %d, recTime: %d\n", drive->name, cycleTime, accessTime, recTime); #endif } switch(intf_type) { case controller_sh_ata6: { /* 133Mhz cell */ u32 tr = kauai_lookup_timing(shasta_mdma_timings, cycleTime); *timings = ((*timings) & ~TR_133_PIOREG_MDMA_MASK) | tr; *timings2 = (*timings2) & ~TR_133_UDMAREG_UDMA_EN; } case controller_un_ata6: case controller_k2_ata6: { /* 100Mhz cell */ u32 tr = kauai_lookup_timing(kauai_mdma_timings, cycleTime); *timings = ((*timings) & ~TR_100_PIOREG_MDMA_MASK) | tr; *timings2 = (*timings2) & ~TR_100_UDMAREG_UDMA_EN; } break; case controller_kl_ata4: /* 66Mhz cell */ accessTicks = SYSCLK_TICKS_66(accessTime); accessTicks = min(accessTicks, 0x1fU); accessTicks = max(accessTicks, 0x1U); recTicks = SYSCLK_TICKS_66(recTime); recTicks = min(recTicks, 0x1fU); recTicks = max(recTicks, 0x3U); /* Clear out mdma bits and disable udma */ *timings = ((*timings) & ~(TR_66_MDMA_MASK | TR_66_UDMA_MASK)) | (accessTicks << TR_66_MDMA_ACCESS_SHIFT) | (recTicks << TR_66_MDMA_RECOVERY_SHIFT); break; case controller_kl_ata3: /* 33Mhz cell on KeyLargo */ accessTicks = SYSCLK_TICKS(accessTime); accessTicks = max(accessTicks, 1U); accessTicks = min(accessTicks, 0x1fU); accessTime = accessTicks * IDE_SYSCLK_NS; recTicks = SYSCLK_TICKS(recTime); recTicks = max(recTicks, 1U); recTicks = min(recTicks, 0x1fU); *timings = ((*timings) & ~TR_33_MDMA_MASK) | (accessTicks << TR_33_MDMA_ACCESS_SHIFT) | (recTicks << TR_33_MDMA_RECOVERY_SHIFT); break; default: { /* 33Mhz cell on others */ int halfTick = 0; int origAccessTime = accessTime; int origRecTime = recTime; accessTicks = SYSCLK_TICKS(accessTime); accessTicks = max(accessTicks, 1U); accessTicks = min(accessTicks, 0x1fU); accessTime = accessTicks * IDE_SYSCLK_NS; recTicks = SYSCLK_TICKS(recTime); recTicks = max(recTicks, 2U) - 1; recTicks = min(recTicks, 0x1fU); recTime = (recTicks + 1) * IDE_SYSCLK_NS; if ((accessTicks > 1) && ((accessTime - IDE_SYSCLK_NS/2) >= origAccessTime) && ((recTime - IDE_SYSCLK_NS/2) >= origRecTime)) { halfTick = 1; accessTicks--; } *timings = ((*timings) & ~TR_33_MDMA_MASK) | (accessTicks << TR_33_MDMA_ACCESS_SHIFT) | (recTicks << TR_33_MDMA_RECOVERY_SHIFT); if (halfTick) *timings |= TR_33_MDMA_HALFTICK; } } #ifdef IDE_PMAC_DEBUG printk(KERN_ERR "%s: Set MDMA timing for mode %d, reg: 0x%08x\n", drive->name, speed & 0xf, *timings); #endif } static void pmac_ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); int ret = 0; u32 *timings, *timings2, tl[2]; u8 unit = drive->dn & 1; const u8 speed = drive->dma_mode; timings = &pmif->timings[unit]; timings2 = &pmif->timings[unit+2]; /* Copy timings to local image */ tl[0] = *timings; tl[1] = *timings2; if (speed >= XFER_UDMA_0) { if (pmif->kind == controller_kl_ata4) ret = set_timings_udma_ata4(&tl[0], speed); else if (pmif->kind == controller_un_ata6 || pmif->kind == controller_k2_ata6) ret = set_timings_udma_ata6(&tl[0], &tl[1], speed); else if (pmif->kind == controller_sh_ata6) ret = set_timings_udma_shasta(&tl[0], &tl[1], speed); else ret = -1; } else set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed); if (ret) return; /* Apply timings to controller */ *timings = tl[0]; *timings2 = tl[1]; pmac_ide_do_update_timings(drive); } /* * Blast some well known "safe" values to the timing registers at init or * wakeup from sleep time, before we do real calculation */ static void sanitize_timings(pmac_ide_hwif_t *pmif) { unsigned int value, value2 = 0; switch(pmif->kind) { case controller_sh_ata6: value = 0x0a820c97; value2 = 0x00033031; break; case controller_un_ata6: case controller_k2_ata6: value = 0x08618a92; value2 = 0x00002921; break; case controller_kl_ata4: value = 0x0008438c; break; case controller_kl_ata3: value = 0x00084526; break; case controller_heathrow: case controller_ohare: default: value = 0x00074526; break; } pmif->timings[0] = pmif->timings[1] = value; pmif->timings[2] = pmif->timings[3] = value2; } static int on_media_bay(pmac_ide_hwif_t *pmif) { return pmif->mdev && pmif->mdev->media_bay != NULL; } /* Suspend call back, should be called after the child devices * have actually been suspended */ static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif) { /* We clear the timings */ pmif->timings[0] = 0; pmif->timings[1] = 0; disable_irq(pmif->irq); /* The media bay will handle itself just fine */ if (on_media_bay(pmif)) return 0; /* Kauai has bus control FCRs directly here */ if (pmif->kauai_fcr) { u32 fcr = readl(pmif->kauai_fcr); fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE); writel(fcr, pmif->kauai_fcr); } /* Disable the bus on older machines and the cell on kauai */ ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 0); return 0; } /* Resume call back, should be called before the child devices * are resumed */ static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif) { /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */ if (!on_media_bay(pmif)) { ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1); ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1); msleep(10); ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 0); /* Kauai has it different */ if (pmif->kauai_fcr) { u32 fcr = readl(pmif->kauai_fcr); fcr |= KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE; writel(fcr, pmif->kauai_fcr); } msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY)); } /* Sanitize drive timings */ sanitize_timings(pmif); enable_irq(pmif->irq); return 0; } static u8 pmac_ide_cable_detect(ide_hwif_t *hwif) { pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); struct device_node *np = pmif->node; const char *cable = of_get_property(np, "cable-type", NULL); struct device_node *root = of_find_node_by_path("/"); const char *model = of_get_property(root, "model", NULL); /* Get cable type from device-tree. */ if (cable && !strncmp(cable, "80-", 3)) { /* Some drives fail to detect 80c cable in PowerBook */ /* These machine use proprietary short IDE cable anyway */ if (!strncmp(model, "PowerBook", 9)) return ATA_CBL_PATA40_SHORT; else return ATA_CBL_PATA80; } /* * G5's seem to have incorrect cable type in device-tree. * Let's assume they have a 80 conductor cable, this seem * to be always the case unless the user mucked around. */ if (of_device_is_compatible(np, "K2-UATA") || of_device_is_compatible(np, "shasta-ata")) return ATA_CBL_PATA80; return ATA_CBL_PATA40; } static void pmac_ide_init_dev(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); if (on_media_bay(pmif)) { if (check_media_bay(pmif->mdev->media_bay) == MB_CD) { drive->dev_flags &= ~IDE_DFLAG_NOPROBE; return; } drive->dev_flags |= IDE_DFLAG_NOPROBE; } } static const struct ide_tp_ops pmac_tp_ops = { .exec_command = pmac_exec_command, .read_status = ide_read_status, .read_altstatus = ide_read_altstatus, .write_devctl = pmac_write_devctl, .dev_select = pmac_dev_select, .tf_load = ide_tf_load, .tf_read = ide_tf_read, .input_data = ide_input_data, .output_data = ide_output_data, }; static const struct ide_tp_ops pmac_ata6_tp_ops = { .exec_command = pmac_exec_command, .read_status = ide_read_status, .read_altstatus = ide_read_altstatus, .write_devctl = pmac_write_devctl, .dev_select = pmac_kauai_dev_select, .tf_load = ide_tf_load, .tf_read = ide_tf_read, .input_data = ide_input_data, .output_data = ide_output_data, }; static const struct ide_port_ops pmac_ide_ata4_port_ops = { .init_dev = pmac_ide_init_dev, .set_pio_mode = pmac_ide_set_pio_mode, .set_dma_mode = pmac_ide_set_dma_mode, .cable_detect = pmac_ide_cable_detect, }; static const struct ide_port_ops pmac_ide_port_ops = { .init_dev = pmac_ide_init_dev, .set_pio_mode = pmac_ide_set_pio_mode, .set_dma_mode = pmac_ide_set_dma_mode, }; static const struct ide_dma_ops pmac_dma_ops; static const struct ide_port_info pmac_port_info = { .name = DRV_NAME, .init_dma = pmac_ide_init_dma, .chipset = ide_pmac, .tp_ops = &pmac_tp_ops, .port_ops = &pmac_ide_port_ops, .dma_ops = &pmac_dma_ops, .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | IDE_HFLAG_POST_SET_MODE | IDE_HFLAG_MMIO | IDE_HFLAG_UNMASK_IRQS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, }; /* * Setup, register & probe an IDE channel driven by this driver, this is * called by one of the 2 probe functions (macio or PCI). */ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw) { struct device_node *np = pmif->node; const int *bidp; struct ide_host *host; ide_hwif_t *hwif; struct ide_hw *hws[] = { hw }; struct ide_port_info d = pmac_port_info; int rc; pmif->broken_dma = pmif->broken_dma_warn = 0; if (of_device_is_compatible(np, "shasta-ata")) { pmif->kind = controller_sh_ata6; d.tp_ops = &pmac_ata6_tp_ops; d.port_ops = &pmac_ide_ata4_port_ops; d.udma_mask = ATA_UDMA6; } else if (of_device_is_compatible(np, "kauai-ata")) { pmif->kind = controller_un_ata6; d.tp_ops = &pmac_ata6_tp_ops; d.port_ops = &pmac_ide_ata4_port_ops; d.udma_mask = ATA_UDMA5; } else if (of_device_is_compatible(np, "K2-UATA")) { pmif->kind = controller_k2_ata6; d.tp_ops = &pmac_ata6_tp_ops; d.port_ops = &pmac_ide_ata4_port_ops; d.udma_mask = ATA_UDMA5; } else if (of_device_is_compatible(np, "keylargo-ata")) { if (strcmp(np->name, "ata-4") == 0) { pmif->kind = controller_kl_ata4; d.port_ops = &pmac_ide_ata4_port_ops; d.udma_mask = ATA_UDMA4; } else pmif->kind = controller_kl_ata3; } else if (of_device_is_compatible(np, "heathrow-ata")) { pmif->kind = controller_heathrow; } else { pmif->kind = controller_ohare; pmif->broken_dma = 1; } bidp = of_get_property(np, "AAPL,bus-id", NULL); pmif->aapl_bus_id = bidp ? *bidp : 0; /* On Kauai-type controllers, we make sure the FCR is correct */ if (pmif->kauai_fcr) writel(KAUAI_FCR_UATA_MAGIC | KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr); /* Make sure we have sane timings */ sanitize_timings(pmif); /* If we are on a media bay, wait for it to settle and lock it */ if (pmif->mdev) lock_media_bay(pmif->mdev->media_bay); host = ide_host_alloc(&d, hws, 1); if (host == NULL) { rc = -ENOMEM; goto bail; } hwif = pmif->hwif = host->ports[0]; if (on_media_bay(pmif)) { /* Fixup bus ID for media bay */ if (!bidp) pmif->aapl_bus_id = 1; } else if (pmif->kind == controller_ohare) { /* The code below is having trouble on some ohare machines * (timing related ?). Until I can put my hand on one of these * units, I keep the old way */ ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1); } else { /* This is necessary to enable IDE when net-booting */ ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1); ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1); msleep(10); ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0); msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY)); } printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), " "bus ID %d%s, irq %d\n", model_name[pmif->kind], pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id, on_media_bay(pmif) ? " (mediabay)" : "", hw->irq); rc = ide_host_register(host, &d, hws); if (rc) pmif->hwif = NULL; if (pmif->mdev) unlock_media_bay(pmif->mdev->media_bay); bail: if (rc && host) ide_host_free(host); return rc; } static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base) { int i; for (i = 0; i < 8; ++i) hw->io_ports_array[i] = base + i * 0x10; hw->io_ports.ctl_addr = base + 0x160; } /* * Attach to a macio probed interface */ static int __devinit pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) { void __iomem *base; unsigned long regbase; pmac_ide_hwif_t *pmif; int irq, rc; struct ide_hw hw; pmif = kzalloc(sizeof(*pmif), GFP_KERNEL); if (pmif == NULL) return -ENOMEM; if (macio_resource_count(mdev) == 0) { printk(KERN_WARNING "ide-pmac: no address for %s\n", mdev->ofdev.dev.of_node->full_name); rc = -ENXIO; goto out_free_pmif; } /* Request memory resource for IO ports */ if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) { printk(KERN_ERR "ide-pmac: can't request MMIO resource for " "%s!\n", mdev->ofdev.dev.of_node->full_name); rc = -EBUSY; goto out_free_pmif; } /* XXX This is bogus. Should be fixed in the registry by checking * the kind of host interrupt controller, a bit like gatwick * fixes in irq.c. That works well enough for the single case * where that happens though... */ if (macio_irq_count(mdev) == 0) { printk(KERN_WARNING "ide-pmac: no intrs for device %s, using " "13\n", mdev->ofdev.dev.of_node->full_name); irq = irq_create_mapping(NULL, 13); } else irq = macio_irq(mdev, 0); base = ioremap(macio_resource_start(mdev, 0), 0x400); regbase = (unsigned long) base; pmif->mdev = mdev; pmif->node = mdev->ofdev.dev.of_node; pmif->regbase = regbase; pmif->irq = irq; pmif->kauai_fcr = NULL; if (macio_resource_count(mdev) >= 2) { if (macio_request_resource(mdev, 1, "ide-pmac (dma)")) printk(KERN_WARNING "ide-pmac: can't request DMA " "resource for %s!\n", mdev->ofdev.dev.of_node->full_name); else pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000); } else pmif->dma_regs = NULL; dev_set_drvdata(&mdev->ofdev.dev, pmif); memset(&hw, 0, sizeof(hw)); pmac_ide_init_ports(&hw, pmif->regbase); hw.irq = irq; hw.dev = &mdev->bus->pdev->dev; hw.parent = &mdev->ofdev.dev; rc = pmac_ide_setup_device(pmif, &hw); if (rc != 0) { /* The inteface is released to the common IDE layer */ dev_set_drvdata(&mdev->ofdev.dev, NULL); iounmap(base); if (pmif->dma_regs) { iounmap(pmif->dma_regs); macio_release_resource(mdev, 1); } macio_release_resource(mdev, 0); kfree(pmif); } return rc; out_free_pmif: kfree(pmif); return rc; } static int pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg) { pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); int rc = 0; if (mesg.event != mdev->ofdev.dev.power.power_state.event && (mesg.event & PM_EVENT_SLEEP)) { rc = pmac_ide_do_suspend(pmif); if (rc == 0) mdev->ofdev.dev.power.power_state = mesg; } return rc; } static int pmac_ide_macio_resume(struct macio_dev *mdev) { pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); int rc = 0; if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) { rc = pmac_ide_do_resume(pmif); if (rc == 0) mdev->ofdev.dev.power.power_state = PMSG_ON; } return rc; } /* * Attach to a PCI probed interface */ static int __devinit pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) { struct device_node *np; pmac_ide_hwif_t *pmif; void __iomem *base; unsigned long rbase, rlen; int rc; struct ide_hw hw; np = pci_device_to_OF_node(pdev); if (np == NULL) { printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n"); return -ENODEV; } pmif = kzalloc(sizeof(*pmif), GFP_KERNEL); if (pmif == NULL) return -ENOMEM; if (pci_enable_device(pdev)) { printk(KERN_WARNING "ide-pmac: Can't enable PCI device for " "%s\n", np->full_name); rc = -ENXIO; goto out_free_pmif; } pci_set_master(pdev); if (pci_request_regions(pdev, "Kauai ATA")) { printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources for " "%s\n", np->full_name); rc = -ENXIO; goto out_free_pmif; } pmif->mdev = NULL; pmif->node = np; rbase = pci_resource_start(pdev, 0); rlen = pci_resource_len(pdev, 0); base = ioremap(rbase, rlen); pmif->regbase = (unsigned long) base + 0x2000; pmif->dma_regs = base + 0x1000; pmif->kauai_fcr = base; pmif->irq = pdev->irq; pci_set_drvdata(pdev, pmif); memset(&hw, 0, sizeof(hw)); pmac_ide_init_ports(&hw, pmif->regbase); hw.irq = pdev->irq; hw.dev = &pdev->dev; rc = pmac_ide_setup_device(pmif, &hw); if (rc != 0) { /* The inteface is released to the common IDE layer */ pci_set_drvdata(pdev, NULL); iounmap(base); pci_release_regions(pdev); kfree(pmif); } return rc; out_free_pmif: kfree(pmif); return rc; } static int pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) { pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev); int rc = 0; if (mesg.event != pdev->dev.power.power_state.event && (mesg.event & PM_EVENT_SLEEP)) { rc = pmac_ide_do_suspend(pmif); if (rc == 0) pdev->dev.power.power_state = mesg; } return rc; } static int pmac_ide_pci_resume(struct pci_dev *pdev) { pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev); int rc = 0; if (pdev->dev.power.power_state.event != PM_EVENT_ON) { rc = pmac_ide_do_resume(pmif); if (rc == 0) pdev->dev.power.power_state = PMSG_ON; } return rc; } #ifdef CONFIG_PMAC_MEDIABAY static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state) { pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); switch(mb_state) { case MB_CD: if (!pmif->hwif->present) ide_port_scan(pmif->hwif); break; default: if (pmif->hwif->present) ide_port_unregister_devices(pmif->hwif); } } #endif /* CONFIG_PMAC_MEDIABAY */ static struct of_device_id pmac_ide_macio_match[] = { { .name = "IDE", }, { .name = "ATA", }, { .type = "ide", }, { .type = "ata", }, {}, }; static struct macio_driver pmac_ide_macio_driver = { .driver = { .name = "ide-pmac", .owner = THIS_MODULE, .of_match_table = pmac_ide_macio_match, }, .probe = pmac_ide_macio_attach, .suspend = pmac_ide_macio_suspend, .resume = pmac_ide_macio_resume, #ifdef CONFIG_PMAC_MEDIABAY .mediabay_event = pmac_ide_macio_mb_event, #endif }; static const struct pci_device_id pmac_ide_pci_match[] = { { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 }, { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 }, { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 }, { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 }, { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 }, {}, }; static struct pci_driver pmac_ide_pci_driver = { .name = "ide-pmac", .id_table = pmac_ide_pci_match, .probe = pmac_ide_pci_attach, .suspend = pmac_ide_pci_suspend, .resume = pmac_ide_pci_resume, }; MODULE_DEVICE_TABLE(pci, pmac_ide_pci_match); int __init pmac_ide_probe(void) { int error; if (!machine_is(powermac)) return -ENODEV; #ifdef CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST error = pci_register_driver(&pmac_ide_pci_driver); if (error) goto out; error = macio_register_driver(&pmac_ide_macio_driver); if (error) { pci_unregister_driver(&pmac_ide_pci_driver); goto out; } #else error = macio_register_driver(&pmac_ide_macio_driver); if (error) goto out; error = pci_register_driver(&pmac_ide_pci_driver); if (error) { macio_unregister_driver(&pmac_ide_macio_driver); goto out; } #endif out: return error; } /* * pmac_ide_build_dmatable builds the DBDMA command list * for a transfer and sets the DBDMA channel to point to it. */ static int pmac_ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); struct dbdma_cmd *table; volatile struct dbdma_regs __iomem *dma = pmif->dma_regs; struct scatterlist *sg; int wr = !!(cmd->tf_flags & IDE_TFLAG_WRITE); int i = cmd->sg_nents, count = 0; /* DMA table is already aligned */ table = (struct dbdma_cmd *) pmif->dma_table_cpu; /* Make sure DMA controller is stopped (necessary ?) */ writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma->control); while (readl(&dma->status) & RUN) udelay(1); /* Build DBDMA commands list */ sg = hwif->sg_table; while (i && sg_dma_len(sg)) { u32 cur_addr; u32 cur_len; cur_addr = sg_dma_address(sg); cur_len = sg_dma_len(sg); if (pmif->broken_dma && cur_addr & (L1_CACHE_BYTES - 1)) { if (pmif->broken_dma_warn == 0) { printk(KERN_WARNING "%s: DMA on non aligned address, " "switching to PIO on Ohare chipset\n", drive->name); pmif->broken_dma_warn = 1; } return 0; } while (cur_len) { unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00; if (count++ >= MAX_DCMDS) { printk(KERN_WARNING "%s: DMA table too small\n", drive->name); return 0; } st_le16(&table->command, wr? OUTPUT_MORE: INPUT_MORE); st_le16(&table->req_count, tc); st_le32(&table->phy_addr, cur_addr); table->cmd_dep = 0; table->xfer_status = 0; table->res_count = 0; cur_addr += tc; cur_len -= tc; ++table; } sg = sg_next(sg); i--; } /* convert the last command to an input/output last command */ if (count) { st_le16(&table[-1].command, wr? OUTPUT_LAST: INPUT_LAST); /* add the stop command to the end of the list */ memset(table, 0, sizeof(struct dbdma_cmd)); st_le16(&table->command, DBDMA_STOP); mb(); writel(hwif->dmatable_dma, &dma->cmdptr); return 1; } printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name); return 0; /* revert to PIO for this request */ } /* * Prepare a DMA transfer. We build the DMA table, adjust the timings for * a read on KeyLargo ATA/66 and mark us as waiting for DMA completion */ static int pmac_ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); u8 unit = drive->dn & 1, ata4 = (pmif->kind == controller_kl_ata4); u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE); if (pmac_ide_build_dmatable(drive, cmd) == 0) return 1; /* Apple adds 60ns to wrDataSetup on reads */ if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) { writel(pmif->timings[unit] + (write ? 0 : 0x00800000UL), PMAC_IDE_REG(IDE_TIMING_CONFIG)); (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG)); } return 0; } /* * Kick the DMA controller into life after the DMA command has been issued * to the drive. */ static void pmac_ide_dma_start(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); volatile struct dbdma_regs __iomem *dma; dma = pmif->dma_regs; writel((RUN << 16) | RUN, &dma->control); /* Make sure it gets to the controller right now */ (void)readl(&dma->control); } /* * After a DMA transfer, make sure the controller is stopped */ static int pmac_ide_dma_end (ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); volatile struct dbdma_regs __iomem *dma = pmif->dma_regs; u32 dstat; dstat = readl(&dma->status); writel(((RUN|WAKE|DEAD) << 16), &dma->control); /* verify good dma status. we don't check for ACTIVE beeing 0. We should... * in theory, but with ATAPI decices doing buffer underruns, that would * cause us to disable DMA, which isn't what we want */ return (dstat & (RUN|DEAD)) != RUN; } /* * Check out that the interrupt we got was for us. We can't always know this * for sure with those Apple interfaces (well, we could on the recent ones but * that's not implemented yet), on the other hand, we don't have shared interrupts * so it's not really a problem */ static int pmac_ide_dma_test_irq (ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); volatile struct dbdma_regs __iomem *dma = pmif->dma_regs; unsigned long status, timeout; /* We have to things to deal with here: * * - The dbdma won't stop if the command was started * but completed with an error without transferring all * datas. This happens when bad blocks are met during * a multi-block transfer. * * - The dbdma fifo hasn't yet finished flushing to * to system memory when the disk interrupt occurs. * */ /* If ACTIVE is cleared, the STOP command have passed and * transfer is complete. */ status = readl(&dma->status); if (!(status & ACTIVE)) return 1; /* If dbdma didn't execute the STOP command yet, the * active bit is still set. We consider that we aren't * sharing interrupts (which is hopefully the case with * those controllers) and so we just try to flush the * channel for pending data in the fifo */ udelay(1); writel((FLUSH << 16) | FLUSH, &dma->control); timeout = 0; for (;;) { udelay(1); status = readl(&dma->status); if ((status & FLUSH) == 0) break; if (++timeout > 100) { printk(KERN_WARNING "ide%d, ide_dma_test_irq timeout flushing channel\n", hwif->index); break; } } return 1; } static void pmac_ide_dma_host_set(ide_drive_t *drive, int on) { } static void pmac_ide_dma_lost_irq (ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); volatile struct dbdma_regs __iomem *dma = pmif->dma_regs; unsigned long status = readl(&dma->status); printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status); } static const struct ide_dma_ops pmac_dma_ops = { .dma_host_set = pmac_ide_dma_host_set, .dma_setup = pmac_ide_dma_setup, .dma_start = pmac_ide_dma_start, .dma_end = pmac_ide_dma_end, .dma_test_irq = pmac_ide_dma_test_irq, .dma_lost_irq = pmac_ide_dma_lost_irq, }; /* * Allocate the data structures needed for using DMA with an interface * and fill the proper list of functions pointers */ static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d) { pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); struct pci_dev *dev = to_pci_dev(hwif->dev); /* We won't need pci_dev if we switch to generic consistent * DMA routines ... */ if (dev == NULL || pmif->dma_regs == 0) return -ENODEV; /* * Allocate space for the DBDMA commands. * The +2 is +1 for the stop command and +1 to allow for * aligning the start address to a multiple of 16 bytes. */ pmif->dma_table_cpu = pci_alloc_consistent( dev, (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd), &hwif->dmatable_dma); if (pmif->dma_table_cpu == NULL) { printk(KERN_ERR "%s: unable to allocate DMA command list\n", hwif->name); return -ENOMEM; } hwif->sg_max_nents = MAX_DCMDS; return 0; } module_init(pmac_ide_probe); MODULE_LICENSE("GPL");
gpl-2.0
MSM8226-Samsung/android_kernel_samsung_s3ve3g
drivers/net/wireless/b43/wa.c
8558
18347
/* Broadcom B43 wireless driver PHY workarounds. Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it> Copyright (c) 2005-2007 Michael Buesch <m@bues.ch> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43.h" #include "main.h" #include "tables.h" #include "phy_common.h" #include "wa.h" static void b43_wa_papd(struct b43_wldev *dev) { u16 backup; backup = b43_ofdmtab_read16(dev, B43_OFDMTAB_PWRDYN2, 0); b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, 7); b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 0, 0); b43_dummy_transmission(dev, true, true); b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, backup); } static void b43_wa_auxclipthr(struct b43_wldev *dev) { b43_phy_write(dev, B43_PHY_OFDM(0x8E), 0x3800); } static void b43_wa_afcdac(struct b43_wldev *dev) { b43_phy_write(dev, 0x0035, 0x03FF); b43_phy_write(dev, 0x0036, 0x0400); } static void b43_wa_txdc_offset(struct b43_wldev *dev) { b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 0, 0x0051); } void b43_wa_initgains(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; b43_phy_write(dev, B43_PHY_LNAHPFCTL, 0x1FF9); b43_phy_mask(dev, B43_PHY_LPFGAINCTL, 0xFF0F); if (phy->rev <= 2) b43_ofdmtab_write16(dev, B43_OFDMTAB_LPFGAIN, 0, 0x1FBF); b43_radio_write16(dev, 0x0002, 0x1FBF); b43_phy_write(dev, 0x0024, 0x4680); b43_phy_write(dev, 0x0020, 0x0003); b43_phy_write(dev, 0x001D, 0x0F40); b43_phy_write(dev, 0x001F, 0x1C00); if (phy->rev <= 3) b43_phy_maskset(dev, 0x002A, 0x00FF, 0x0400); else if (phy->rev == 5) { b43_phy_maskset(dev, 0x002A, 0x00FF, 0x1A00); b43_phy_write(dev, 0x00CC, 0x2121); } if (phy->rev >= 3) b43_phy_write(dev, 0x00BA, 0x3ED5); } static void b43_wa_divider(struct b43_wldev *dev) { b43_phy_mask(dev, 0x002B, ~0x0100); b43_phy_write(dev, 0x008E, 0x58C1); } static void b43_wa_gt(struct b43_wldev *dev) /* Gain table. */ { if (dev->phy.rev <= 2) { b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 0, 15); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 1, 31); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 2, 42); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 3, 48); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 4, 58); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 0, 3); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 1, 3); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 2, 7); } else { b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25); } } static void b43_wa_rssi_lt(struct b43_wldev *dev) /* RSSI lookup table */ { int i; if (0 /* FIXME: For APHY.rev=2 this might be needed */) { for (i = 0; i < 8; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_RSSI, i, i + 8); for (i = 8; i < 16; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_RSSI, i, i - 8); } else { for (i = 0; i < 64; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_RSSI, i, i); } } static void b43_wa_analog(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; u16 ofdmrev; ofdmrev = b43_phy_read(dev, B43_PHY_VERSION_OFDM) & B43_PHYVER_VERSION; if (ofdmrev > 2) { if (phy->type == B43_PHYTYPE_A) b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1808); else b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1000); } else { b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 3, 0x1044); b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 4, 0x7201); b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 6, 0x0040); } } static void b43_wa_dac(struct b43_wldev *dev) { if (dev->phy.analog == 1) b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0034) | 0x0008); else b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0078) | 0x0010); } static void b43_wa_fft(struct b43_wldev *dev) /* Fine frequency table */ { int i; if (dev->phy.type == B43_PHYTYPE_A) for (i = 0; i < B43_TAB_FINEFREQA_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqa[i]); else for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqg[i]); } static void b43_wa_nft(struct b43_wldev *dev) /* Noise figure table */ { struct b43_phy *phy = &dev->phy; int i; if (phy->type == B43_PHYTYPE_A) { if (phy->rev == 2) for (i = 0; i < B43_TAB_NOISEA2_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea2[i]); else for (i = 0; i < B43_TAB_NOISEA3_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea3[i]); } else { if (phy->rev == 1) for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg1[i]); else for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg2[i]); } } static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */ { int i; for (i = 0; i < B43_TAB_ROTOR_SIZE; i++) b43_ofdmtab_write32(dev, B43_OFDMTAB_ROTOR, i, b43_tab_rotor[i]); } static void b43_write_null_nst(struct b43_wldev *dev) { int i; for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE, i, 0); } static void b43_write_nst(struct b43_wldev *dev, const u16 *nst) { int i; for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE, i, nst[i]); } static void b43_wa_nst(struct b43_wldev *dev) /* Noise scale table */ { struct b43_phy *phy = &dev->phy; if (phy->type == B43_PHYTYPE_A) { if (phy->rev <= 1) b43_write_null_nst(dev); else if (phy->rev == 2) b43_write_nst(dev, b43_tab_noisescalea2); else if (phy->rev == 3) b43_write_nst(dev, b43_tab_noisescalea3); else b43_write_nst(dev, b43_tab_noisescaleg3); } else { if (phy->rev >= 6) { if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN) b43_write_nst(dev, b43_tab_noisescaleg3); else b43_write_nst(dev, b43_tab_noisescaleg2); } else { b43_write_nst(dev, b43_tab_noisescaleg1); } } } static void b43_wa_art(struct b43_wldev *dev) /* ADV retard table */ { int i; for (i = 0; i < B43_TAB_RETARD_SIZE; i++) b43_ofdmtab_write32(dev, B43_OFDMTAB_ADVRETARD, i, b43_tab_retard[i]); } static void b43_wa_txlna_gain(struct b43_wldev *dev) { b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 13, 0x0000); } static void b43_wa_crs_reset(struct b43_wldev *dev) { b43_phy_write(dev, 0x002C, 0x0064); } static void b43_wa_2060txlna_gain(struct b43_wldev *dev) { b43_hf_write(dev, b43_hf_read(dev) | B43_HF_2060W); } static void b43_wa_lms(struct b43_wldev *dev) { b43_phy_maskset(dev, 0x0055, 0xFFC0, 0x0004); } static void b43_wa_mixedsignal(struct b43_wldev *dev) { b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, 3); } static void b43_wa_msst(struct b43_wldev *dev) /* Min sigma square table */ { struct b43_phy *phy = &dev->phy; int i; const u16 *tab; if (phy->type == B43_PHYTYPE_A) { tab = b43_tab_sigmasqr1; } else if (phy->type == B43_PHYTYPE_G) { tab = b43_tab_sigmasqr2; } else { B43_WARN_ON(1); return; } for (i = 0; i < B43_TAB_SIGMASQR_SIZE; i++) { b43_ofdmtab_write16(dev, B43_OFDMTAB_MINSIGSQ, i, tab[i]); } } static void b43_wa_iqadc(struct b43_wldev *dev) { if (dev->phy.analog == 4) b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 0, b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 0) & ~0xF000); } static void b43_wa_crs_ed(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; if (phy->rev == 1) { b43_phy_write(dev, B43_PHY_CRSTHRES1_R1, 0x4F19); } else if (phy->rev == 2) { b43_phy_write(dev, B43_PHY_CRSTHRES1, 0x1861); b43_phy_write(dev, B43_PHY_CRSTHRES2, 0x0271); b43_phy_set(dev, B43_PHY_ANTDWELL, 0x0800); } else { b43_phy_write(dev, B43_PHY_CRSTHRES1, 0x0098); b43_phy_write(dev, B43_PHY_CRSTHRES2, 0x0070); b43_phy_write(dev, B43_PHY_OFDM(0xC9), 0x0080); b43_phy_set(dev, B43_PHY_ANTDWELL, 0x0800); } } static void b43_wa_crs_thr(struct b43_wldev *dev) { b43_phy_maskset(dev, B43_PHY_CRS0, ~0x03C0, 0xD000); } static void b43_wa_crs_blank(struct b43_wldev *dev) { b43_phy_write(dev, B43_PHY_OFDM(0x2C), 0x005A); } static void b43_wa_cck_shiftbits(struct b43_wldev *dev) { b43_phy_write(dev, B43_PHY_CCKSHIFTBITS, 0x0026); } static void b43_wa_wrssi_offset(struct b43_wldev *dev) { int i; if (dev->phy.rev == 1) { for (i = 0; i < 16; i++) { b43_ofdmtab_write16(dev, B43_OFDMTAB_WRSSI_R1, i, 0x0020); } } else { for (i = 0; i < 32; i++) { b43_ofdmtab_write16(dev, B43_OFDMTAB_WRSSI, i, 0x0820); } } } static void b43_wa_txpuoff_rxpuon(struct b43_wldev *dev) { b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_0F, 2, 15); b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_0F, 3, 20); } static void b43_wa_altagc(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; if (phy->rev == 1) { b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 0, 254); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 1, 13); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 2, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 3, 25); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, 0, 0x2710); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, 1, 0x9B83); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, 2, 0x9B83); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, 3, 0x0F8D); b43_phy_write(dev, B43_PHY_LMS, 4); } else { b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0, 254); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 1, 13); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 2, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 3, 25); } b43_phy_maskset(dev, B43_PHY_CCKSHIFTBITS_WA, 0x00FF, 0x5700); b43_phy_maskset(dev, B43_PHY_OFDM(0x1A), ~0x007F, 0x000F); b43_phy_maskset(dev, B43_PHY_OFDM(0x1A), ~0x3F80, 0x2B80); b43_phy_maskset(dev, B43_PHY_ANTWRSETT, 0xF0FF, 0x0300); b43_radio_set(dev, 0x7A, 0x0008); b43_phy_maskset(dev, B43_PHY_N1P1GAIN, ~0x000F, 0x0008); b43_phy_maskset(dev, B43_PHY_P1P2GAIN, ~0x0F00, 0x0600); b43_phy_maskset(dev, B43_PHY_N1N2GAIN, ~0x0F00, 0x0700); b43_phy_maskset(dev, B43_PHY_N1P1GAIN, ~0x0F00, 0x0100); if (phy->rev == 1) { b43_phy_maskset(dev, B43_PHY_N1N2GAIN, ~0x000F, 0x0007); } b43_phy_maskset(dev, B43_PHY_OFDM(0x88), ~0x00FF, 0x001C); b43_phy_maskset(dev, B43_PHY_OFDM(0x88), ~0x3F00, 0x0200); b43_phy_maskset(dev, B43_PHY_OFDM(0x96), ~0x00FF, 0x001C); b43_phy_maskset(dev, B43_PHY_OFDM(0x89), ~0x00FF, 0x0020); b43_phy_maskset(dev, B43_PHY_OFDM(0x89), ~0x3F00, 0x0200); b43_phy_maskset(dev, B43_PHY_OFDM(0x82), ~0x00FF, 0x002E); b43_phy_maskset(dev, B43_PHY_OFDM(0x96), 0x00FF, 0x1A00); b43_phy_maskset(dev, B43_PHY_OFDM(0x81), ~0x00FF, 0x0028); b43_phy_maskset(dev, B43_PHY_OFDM(0x81), 0x00FF, 0x2C00); if (phy->rev == 1) { b43_phy_write(dev, B43_PHY_PEAK_COUNT, 0x092B); b43_phy_maskset(dev, B43_PHY_OFDM(0x1B), ~0x001E, 0x0002); } else { b43_phy_mask(dev, B43_PHY_OFDM(0x1B), ~0x001E); b43_phy_write(dev, B43_PHY_OFDM(0x1F), 0x287A); b43_phy_maskset(dev, B43_PHY_LPFGAINCTL, ~0x000F, 0x0004); if (phy->rev >= 6) { b43_phy_write(dev, B43_PHY_OFDM(0x22), 0x287A); b43_phy_maskset(dev, B43_PHY_LPFGAINCTL, 0x0FFF, 0x3000); } } b43_phy_maskset(dev, B43_PHY_DIVSRCHIDX, 0x8080, 0x7874); b43_phy_write(dev, B43_PHY_OFDM(0x8E), 0x1C00); if (phy->rev == 1) { b43_phy_maskset(dev, B43_PHY_DIVP1P2GAIN, ~0x0F00, 0x0600); b43_phy_write(dev, B43_PHY_OFDM(0x8B), 0x005E); b43_phy_maskset(dev, B43_PHY_ANTWRSETT, ~0x00FF, 0x001E); b43_phy_write(dev, B43_PHY_OFDM(0x8D), 0x0002); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3_R1, 0, 0); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3_R1, 1, 7); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3_R1, 2, 16); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3_R1, 3, 28); } else { b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3, 0, 0); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3, 1, 7); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3, 2, 16); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3, 3, 28); } if (phy->rev >= 6) { b43_phy_mask(dev, B43_PHY_OFDM(0x26), ~0x0003); b43_phy_mask(dev, B43_PHY_OFDM(0x26), ~0x1000); } b43_phy_read(dev, B43_PHY_VERSION_OFDM); /* Dummy read */ } static void b43_wa_tr_ltov(struct b43_wldev *dev) /* TR Lookup Table Original Values */ { b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0xC480); } static void b43_wa_cpll_nonpilot(struct b43_wldev *dev) { b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_11, 0, 0); b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_11, 1, 0); } static void b43_wa_rssi_adc(struct b43_wldev *dev) { if (dev->phy.analog == 4) b43_phy_write(dev, 0x00DC, 0x7454); } static void b43_wa_boards_a(struct b43_wldev *dev) { if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM && dev->dev->board_type == SSB_BOARD_BU4306 && dev->dev->board_rev < 0x30) { b43_phy_write(dev, 0x0010, 0xE000); b43_phy_write(dev, 0x0013, 0x0140); b43_phy_write(dev, 0x0014, 0x0280); } else { if (dev->dev->board_type == SSB_BOARD_MP4318 && dev->dev->board_rev < 0x20) { b43_phy_write(dev, 0x0013, 0x0210); b43_phy_write(dev, 0x0014, 0x0840); } else { b43_phy_write(dev, 0x0013, 0x0140); b43_phy_write(dev, 0x0014, 0x0280); } if (dev->phy.rev <= 4) b43_phy_write(dev, 0x0010, 0xE000); else b43_phy_write(dev, 0x0010, 0x2000); b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 1, 0x0039); b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 7, 0x0040); } } static void b43_wa_boards_g(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; struct b43_phy *phy = &dev->phy; if (dev->dev->board_vendor != SSB_BOARDVENDOR_BCM || dev->dev->board_type != SSB_BOARD_BU4306 || dev->dev->board_rev != 0x17) { if (phy->rev < 2) { b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX_R1, 1, 0x0002); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX_R1, 2, 0x0001); } else { b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 1, 0x0002); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 2, 0x0001); if ((sprom->boardflags_lo & B43_BFL_EXTLNA) && (phy->rev >= 7)) { b43_phy_mask(dev, B43_PHY_EXTG(0x11), 0xF7FF); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0020, 0x0001); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0021, 0x0001); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0022, 0x0001); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0023, 0x0000); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0000, 0x0000); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0003, 0x0002); } } } if (sprom->boardflags_lo & B43_BFL_FEM) { b43_phy_write(dev, B43_PHY_GTABCTL, 0x3120); b43_phy_write(dev, B43_PHY_GTABDATA, 0xC480); } } void b43_wa_all(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; if (phy->type == B43_PHYTYPE_A) { switch (phy->rev) { case 2: b43_wa_papd(dev); b43_wa_auxclipthr(dev); b43_wa_afcdac(dev); b43_wa_txdc_offset(dev); b43_wa_initgains(dev); b43_wa_divider(dev); b43_wa_gt(dev); b43_wa_rssi_lt(dev); b43_wa_analog(dev); b43_wa_dac(dev); b43_wa_fft(dev); b43_wa_nft(dev); b43_wa_rt(dev); b43_wa_nst(dev); b43_wa_art(dev); b43_wa_txlna_gain(dev); b43_wa_crs_reset(dev); b43_wa_2060txlna_gain(dev); b43_wa_lms(dev); break; case 3: b43_wa_papd(dev); b43_wa_mixedsignal(dev); b43_wa_rssi_lt(dev); b43_wa_txdc_offset(dev); b43_wa_initgains(dev); b43_wa_dac(dev); b43_wa_nft(dev); b43_wa_nst(dev); b43_wa_msst(dev); b43_wa_analog(dev); b43_wa_gt(dev); b43_wa_txpuoff_rxpuon(dev); b43_wa_txlna_gain(dev); break; case 5: b43_wa_iqadc(dev); case 6: b43_wa_papd(dev); b43_wa_rssi_lt(dev); b43_wa_txdc_offset(dev); b43_wa_initgains(dev); b43_wa_dac(dev); b43_wa_nft(dev); b43_wa_nst(dev); b43_wa_msst(dev); b43_wa_analog(dev); b43_wa_gt(dev); b43_wa_txpuoff_rxpuon(dev); b43_wa_txlna_gain(dev); break; case 7: b43_wa_iqadc(dev); b43_wa_papd(dev); b43_wa_rssi_lt(dev); b43_wa_txdc_offset(dev); b43_wa_initgains(dev); b43_wa_dac(dev); b43_wa_nft(dev); b43_wa_nst(dev); b43_wa_msst(dev); b43_wa_analog(dev); b43_wa_gt(dev); b43_wa_txpuoff_rxpuon(dev); b43_wa_txlna_gain(dev); b43_wa_rssi_adc(dev); default: B43_WARN_ON(1); } b43_wa_boards_a(dev); } else if (phy->type == B43_PHYTYPE_G) { switch (phy->rev) { case 1://XXX review rev1 b43_wa_crs_ed(dev); b43_wa_crs_thr(dev); b43_wa_crs_blank(dev); b43_wa_cck_shiftbits(dev); b43_wa_fft(dev); b43_wa_nft(dev); b43_wa_rt(dev); b43_wa_nst(dev); b43_wa_art(dev); b43_wa_wrssi_offset(dev); b43_wa_altagc(dev); break; case 2: case 6: case 7: case 8: case 9: b43_wa_tr_ltov(dev); b43_wa_crs_ed(dev); b43_wa_rssi_lt(dev); b43_wa_nft(dev); b43_wa_nst(dev); b43_wa_msst(dev); b43_wa_wrssi_offset(dev); b43_wa_altagc(dev); b43_wa_analog(dev); b43_wa_txpuoff_rxpuon(dev); break; default: B43_WARN_ON(1); } b43_wa_boards_g(dev); } else { /* No N PHY support so far, LP PHY is in phy_lp.c */ B43_WARN_ON(1); } b43_wa_cpll_nonpilot(dev); }
gpl-2.0
kykc/m7u-3.4.10-g4dad4ce
drivers/net/wireless/ipw2x00/libipw_geo.c
9326
5482
/****************************************************************************** Copyright(c) 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. The full GNU General Public License is included in this distribution in the file called LICENSE. Contact Information: Intel Linux Wireless <ilw@linux.intel.com> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ******************************************************************************/ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/wireless.h> #include <linux/etherdevice.h> #include <asm/uaccess.h> #include "libipw.h" int libipw_is_valid_channel(struct libipw_device *ieee, u8 channel) { int i; /* Driver needs to initialize the geography map before using * these helper functions */ if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) return 0; if (ieee->freq_band & LIBIPW_24GHZ_BAND) for (i = 0; i < ieee->geo.bg_channels; i++) /* NOTE: If G mode is currently supported but * this is a B only channel, we don't see it * as valid. */ if ((ieee->geo.bg[i].channel == channel) && !(ieee->geo.bg[i].flags & LIBIPW_CH_INVALID) && (!(ieee->mode & IEEE_G) || !(ieee->geo.bg[i].flags & LIBIPW_CH_B_ONLY))) return LIBIPW_24GHZ_BAND; if (ieee->freq_band & LIBIPW_52GHZ_BAND) for (i = 0; i < ieee->geo.a_channels; i++) if ((ieee->geo.a[i].channel == channel) && !(ieee->geo.a[i].flags & LIBIPW_CH_INVALID)) return LIBIPW_52GHZ_BAND; return 0; } int libipw_channel_to_index(struct libipw_device *ieee, u8 channel) { int i; /* Driver needs to initialize the geography map before using * these helper functions */ if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) return -1; if (ieee->freq_band & LIBIPW_24GHZ_BAND) for (i = 0; i < ieee->geo.bg_channels; i++) if (ieee->geo.bg[i].channel == channel) return i; if (ieee->freq_band & LIBIPW_52GHZ_BAND) for (i = 0; i < ieee->geo.a_channels; i++) if (ieee->geo.a[i].channel == channel) return i; return -1; } u32 libipw_channel_to_freq(struct libipw_device * ieee, u8 channel) { const struct libipw_channel * ch; /* Driver needs to initialize the geography map before using * these helper functions */ if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) return 0; ch = libipw_get_channel(ieee, channel); if (!ch->channel) return 0; return ch->freq; } u8 libipw_freq_to_channel(struct libipw_device * ieee, u32 freq) { int i; /* Driver needs to initialize the geography map before using * these helper functions */ if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) return 0; freq /= 100000; if (ieee->freq_band & LIBIPW_24GHZ_BAND) for (i = 0; i < ieee->geo.bg_channels; i++) if (ieee->geo.bg[i].freq == freq) return ieee->geo.bg[i].channel; if (ieee->freq_band & LIBIPW_52GHZ_BAND) for (i = 0; i < ieee->geo.a_channels; i++) if (ieee->geo.a[i].freq == freq) return ieee->geo.a[i].channel; return 0; } int libipw_set_geo(struct libipw_device *ieee, const struct libipw_geo *geo) { memcpy(ieee->geo.name, geo->name, 3); ieee->geo.name[3] = '\0'; ieee->geo.bg_channels = geo->bg_channels; ieee->geo.a_channels = geo->a_channels; memcpy(ieee->geo.bg, geo->bg, geo->bg_channels * sizeof(struct libipw_channel)); memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels * sizeof(struct libipw_channel)); return 0; } const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee) { return &ieee->geo; } u8 libipw_get_channel_flags(struct libipw_device * ieee, u8 channel) { int index = libipw_channel_to_index(ieee, channel); if (index == -1) return LIBIPW_CH_INVALID; if (channel <= LIBIPW_24GHZ_CHANNELS) return ieee->geo.bg[index].flags; return ieee->geo.a[index].flags; } static const struct libipw_channel bad_channel = { .channel = 0, .flags = LIBIPW_CH_INVALID, .max_power = 0, }; const struct libipw_channel *libipw_get_channel(struct libipw_device *ieee, u8 channel) { int index = libipw_channel_to_index(ieee, channel); if (index == -1) return &bad_channel; if (channel <= LIBIPW_24GHZ_CHANNELS) return &ieee->geo.bg[index]; return &ieee->geo.a[index]; } EXPORT_SYMBOL(libipw_get_channel); EXPORT_SYMBOL(libipw_get_channel_flags); EXPORT_SYMBOL(libipw_is_valid_channel); EXPORT_SYMBOL(libipw_freq_to_channel); EXPORT_SYMBOL(libipw_channel_to_freq); EXPORT_SYMBOL(libipw_channel_to_index); EXPORT_SYMBOL(libipw_set_geo); EXPORT_SYMBOL(libipw_get_geo);
gpl-2.0
LiquidSmooth-Devices/kernel_samsung_jf
net/sunrpc/auth_gss/gss_krb5_seqnum.c
11630
4619
/* * linux/net/sunrpc/gss_krb5_seqnum.c * * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/util_seqnum.c * * Copyright (c) 2000 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@umich.edu> */ /* * Copyright 1993 by OpenVision Technologies, Inc. * * Permission to use, copy, modify, distribute, and sell this software * and its documentation for any purpose is hereby granted without fee, * provided that the above copyright notice appears in all copies and * that both that copyright notice and this permission notice appear in * supporting documentation, and that the name of OpenVision not be used * in advertising or publicity pertaining to distribution of the software * without specific, written prior permission. OpenVision makes no * representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied warranty. * * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #include <linux/types.h> #include <linux/sunrpc/gss_krb5.h> #include <linux/crypto.h> #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH #endif static s32 krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, unsigned char *cksum, unsigned char *buf) { struct crypto_blkcipher *cipher; unsigned char plain[8]; s32 code; dprintk("RPC: %s:\n", __func__); cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(cipher)) return PTR_ERR(cipher); plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); plain[3] = (unsigned char) ((seqnum >> 0) & 0xff); plain[4] = direction; plain[5] = direction; plain[6] = direction; plain[7] = direction; code = krb5_rc4_setup_seq_key(kctx, cipher, cksum); if (code) goto out; code = krb5_encrypt(cipher, cksum, plain, buf, 8); out: crypto_free_blkcipher(cipher); return code; } s32 krb5_make_seq_num(struct krb5_ctx *kctx, struct crypto_blkcipher *key, int direction, u32 seqnum, unsigned char *cksum, unsigned char *buf) { unsigned char plain[8]; if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) return krb5_make_rc4_seq_num(kctx, direction, seqnum, cksum, buf); plain[0] = (unsigned char) (seqnum & 0xff); plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); plain[3] = (unsigned char) ((seqnum >> 24) & 0xff); plain[4] = direction; plain[5] = direction; plain[6] = direction; plain[7] = direction; return krb5_encrypt(key, cksum, plain, buf, 8); } static s32 krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, unsigned char *buf, int *direction, s32 *seqnum) { struct crypto_blkcipher *cipher; unsigned char plain[8]; s32 code; dprintk("RPC: %s:\n", __func__); cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(cipher)) return PTR_ERR(cipher); code = krb5_rc4_setup_seq_key(kctx, cipher, cksum); if (code) goto out; code = krb5_decrypt(cipher, cksum, buf, plain, 8); if (code) goto out; if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || (plain[4] != plain[7])) { code = (s32)KG_BAD_SEQ; goto out; } *direction = plain[4]; *seqnum = ((plain[0] << 24) | (plain[1] << 16) | (plain[2] << 8) | (plain[3])); out: crypto_free_blkcipher(cipher); return code; } s32 krb5_get_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, unsigned char *buf, int *direction, u32 *seqnum) { s32 code; unsigned char plain[8]; struct crypto_blkcipher *key = kctx->seq; dprintk("RPC: krb5_get_seq_num:\n"); if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) return krb5_get_rc4_seq_num(kctx, cksum, buf, direction, seqnum); if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) return code; if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || (plain[4] != plain[7])) return (s32)KG_BAD_SEQ; *direction = plain[4]; *seqnum = ((plain[0]) | (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); return 0; }
gpl-2.0
kogone/android_kernel_oneplus_msm8974
net/bridge/netfilter/ebt_among.c
11630
5752
/* * ebt_among * * Authors: * Grzegorz Borowiak <grzes@gnu.univ.gda.pl> * * August, 2003 * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/ip.h> #include <linux/if_arp.h> #include <linux/module.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_among.h> static bool ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh, const char *mac, __be32 ip) { /* You may be puzzled as to how this code works. * Some tricks were used, refer to * include/linux/netfilter_bridge/ebt_among.h * as there you can find a solution of this mystery. */ const struct ebt_mac_wormhash_tuple *p; int start, limit, i; uint32_t cmp[2] = { 0, 0 }; int key = ((const unsigned char *)mac)[5]; memcpy(((char *) cmp) + 2, mac, 6); start = wh->table[key]; limit = wh->table[key + 1]; if (ip) { for (i = start; i < limit; i++) { p = &wh->pool[i]; if (cmp[1] == p->cmp[1] && cmp[0] == p->cmp[0]) if (p->ip == 0 || p->ip == ip) return true; } } else { for (i = start; i < limit; i++) { p = &wh->pool[i]; if (cmp[1] == p->cmp[1] && cmp[0] == p->cmp[0]) if (p->ip == 0) return true; } } return false; } static int ebt_mac_wormhash_check_integrity(const struct ebt_mac_wormhash *wh) { int i; for (i = 0; i < 256; i++) { if (wh->table[i] > wh->table[i + 1]) return -0x100 - i; if (wh->table[i] < 0) return -0x200 - i; if (wh->table[i] > wh->poolsize) return -0x300 - i; } if (wh->table[256] > wh->poolsize) return -0xc00; return 0; } static int get_ip_dst(const struct sk_buff *skb, __be32 *addr) { if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) { const struct iphdr *ih; struct iphdr _iph; ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (ih == NULL) return -1; *addr = ih->daddr; } else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) { const struct arphdr *ah; struct arphdr _arph; const __be32 *bp; __be32 buf; ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); if (ah == NULL || ah->ar_pln != sizeof(__be32) || ah->ar_hln != ETH_ALEN) return -1; bp = skb_header_pointer(skb, sizeof(struct arphdr) + 2 * ETH_ALEN + sizeof(__be32), sizeof(__be32), &buf); if (bp == NULL) return -1; *addr = *bp; } return 0; } static int get_ip_src(const struct sk_buff *skb, __be32 *addr) { if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) { const struct iphdr *ih; struct iphdr _iph; ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (ih == NULL) return -1; *addr = ih->saddr; } else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) { const struct arphdr *ah; struct arphdr _arph; const __be32 *bp; __be32 buf; ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); if (ah == NULL || ah->ar_pln != sizeof(__be32) || ah->ar_hln != ETH_ALEN) return -1; bp = skb_header_pointer(skb, sizeof(struct arphdr) + ETH_ALEN, sizeof(__be32), &buf); if (bp == NULL) return -1; *addr = *bp; } return 0; } static bool ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct ebt_among_info *info = par->matchinfo; const char *dmac, *smac; const struct ebt_mac_wormhash *wh_dst, *wh_src; __be32 dip = 0, sip = 0; wh_dst = ebt_among_wh_dst(info); wh_src = ebt_among_wh_src(info); if (wh_src) { smac = eth_hdr(skb)->h_source; if (get_ip_src(skb, &sip)) return false; if (!(info->bitmask & EBT_AMONG_SRC_NEG)) { /* we match only if it contains */ if (!ebt_mac_wormhash_contains(wh_src, smac, sip)) return false; } else { /* we match only if it DOES NOT contain */ if (ebt_mac_wormhash_contains(wh_src, smac, sip)) return false; } } if (wh_dst) { dmac = eth_hdr(skb)->h_dest; if (get_ip_dst(skb, &dip)) return false; if (!(info->bitmask & EBT_AMONG_DST_NEG)) { /* we match only if it contains */ if (!ebt_mac_wormhash_contains(wh_dst, dmac, dip)) return false; } else { /* we match only if it DOES NOT contain */ if (ebt_mac_wormhash_contains(wh_dst, dmac, dip)) return false; } } return true; } static int ebt_among_mt_check(const struct xt_mtchk_param *par) { const struct ebt_among_info *info = par->matchinfo; const struct ebt_entry_match *em = container_of(par->matchinfo, const struct ebt_entry_match, data); int expected_length = sizeof(struct ebt_among_info); const struct ebt_mac_wormhash *wh_dst, *wh_src; int err; wh_dst = ebt_among_wh_dst(info); wh_src = ebt_among_wh_src(info); expected_length += ebt_mac_wormhash_size(wh_dst); expected_length += ebt_mac_wormhash_size(wh_src); if (em->match_size != EBT_ALIGN(expected_length)) { pr_info("wrong size: %d against expected %d, rounded to %Zd\n", em->match_size, expected_length, EBT_ALIGN(expected_length)); return -EINVAL; } if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) { pr_info("dst integrity fail: %x\n", -err); return -EINVAL; } if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) { pr_info("src integrity fail: %x\n", -err); return -EINVAL; } return 0; } static struct xt_match ebt_among_mt_reg __read_mostly = { .name = "among", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_among_mt, .checkentry = ebt_among_mt_check, .matchsize = -1, /* special case */ .me = THIS_MODULE, }; static int __init ebt_among_init(void) { return xt_register_match(&ebt_among_mt_reg); } static void __exit ebt_among_fini(void) { xt_unregister_match(&ebt_among_mt_reg); } module_init(ebt_among_init); module_exit(ebt_among_fini); MODULE_DESCRIPTION("Ebtables: Combined MAC/IP address list matching"); MODULE_LICENSE("GPL");
gpl-2.0
rinrin-/cygwin-alternative
newlib/libm/math/w_exp2.c
111
1404
/* @(#)w_exp2.c 5.1 93/09/24 */ /* * ==================================================== * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. * * Developed at SunPro, a Sun Microsystems, Inc. business. * Permission to use, copy, modify, and distribute this * software is freely granted, provided that this notice * is preserved. * ==================================================== */ /* FUNCTION <<exp2>>, <<exp2f>>--exponential, base 2 INDEX exp2 INDEX exp2f ANSI_SYNOPSIS #include <math.h> double exp2(double <[x]>); float exp2f(float <[x]>); DESCRIPTION <<exp2>> and <<exp2f>> calculate 2 ^ <[x]>, that is, @ifnottex 2 raised to the power <[x]>. @end ifnottex @tex $2^x$ @end tex You can use the (non-ANSI) function <<matherr>> to specify error handling for these functions. RETURNS On success, <<exp2>> and <<exp2f>> return the calculated value. If the result underflows, the returned value is <<0>>. If the result overflows, the returned value is <<HUGE_VAL>>. In either case, <<errno>> is set to <<ERANGE>>. PORTABILITY ANSI C, POSIX. */ /* * wrapper exp2(x) */ #include "fdlibm.h" #include <errno.h> #include <math.h> #ifndef _DOUBLE_IS_32BITS #ifdef __STDC__ double exp2(double x) /* wrapper exp2 */ #else double exp2(x) /* wrapper exp2 */ double x; #endif { return pow(2.0, x); } #endif /* defined(_DOUBLE_IS_32BITS) */
gpl-2.0
lg-devs/android_kernel_lge_msm8996
fs/reiserfs/file.c
623
7887
/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */ #include <linux/time.h> #include "reiserfs.h" #include "acl.h" #include "xattr.h" #include <linux/uaccess.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> #include <linux/quotaops.h> /* * We pack the tails of files on file close, not at the time they are written. * This implies an unnecessary copy of the tail and an unnecessary indirect item * insertion/balancing, for files that are written in one write. * It avoids unnecessary tail packings (balances) for files that are written in * multiple writes and are small enough to have tails. * * file_release is called by the VFS layer when the file is closed. If * this is the last open file descriptor, and the file * small enough to have a tail, and the tail is currently in an * unformatted node, the tail is converted back into a direct item. * * We use reiserfs_truncate_file to pack the tail, since it already has * all the conditions coded. */ static int reiserfs_file_release(struct inode *inode, struct file *filp) { struct reiserfs_transaction_handle th; int err; int jbegin_failure = 0; BUG_ON(!S_ISREG(inode->i_mode)); if (atomic_add_unless(&REISERFS_I(inode)->openers, -1, 1)) return 0; mutex_lock(&REISERFS_I(inode)->tailpack); if (!atomic_dec_and_test(&REISERFS_I(inode)->openers)) { mutex_unlock(&REISERFS_I(inode)->tailpack); return 0; } /* fast out for when nothing needs to be done */ if ((!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) || !tail_has_to_be_packed(inode)) && REISERFS_I(inode)->i_prealloc_count <= 0) { mutex_unlock(&REISERFS_I(inode)->tailpack); return 0; } reiserfs_write_lock(inode->i_sb); /* * freeing preallocation only involves relogging blocks that * are already in the current transaction. preallocation gets * freed at the end of each transaction, so it is impossible for * us to log any additional blocks (including quota blocks) */ err = journal_begin(&th, inode->i_sb, 1); if (err) { /* * uh oh, we can't allow the inode to go away while there * is still preallocation blocks pending. Try to join the * aborted transaction */ jbegin_failure = err; err = journal_join_abort(&th, inode->i_sb); if (err) { /* * hmpf, our choices here aren't good. We can pin * the inode which will disallow unmount from ever * happening, we can do nothing, which will corrupt * random memory on unmount, or we can forcibly * remove the file from the preallocation list, which * will leak blocks on disk. Lets pin the inode * and let the admin know what is going on. */ igrab(inode); reiserfs_warning(inode->i_sb, "clm-9001", "pinning inode %lu because the " "preallocation can't be freed", inode->i_ino); goto out; } } reiserfs_update_inode_transaction(inode); #ifdef REISERFS_PREALLOCATE reiserfs_discard_prealloc(&th, inode); #endif err = journal_end(&th); /* copy back the error code from journal_begin */ if (!err) err = jbegin_failure; if (!err && (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) && tail_has_to_be_packed(inode)) { /* * if regular file is released by last holder and it has been * appended (we append by unformatted node only) or its direct * item(s) had to be converted, then it may have to be * indirect2direct converted */ err = reiserfs_truncate_file(inode, 0); } out: reiserfs_write_unlock(inode->i_sb); mutex_unlock(&REISERFS_I(inode)->tailpack); return err; } static int reiserfs_file_open(struct inode *inode, struct file *file) { int err = dquot_file_open(inode, file); /* somebody might be tailpacking on final close; wait for it */ if (!atomic_inc_not_zero(&REISERFS_I(inode)->openers)) { mutex_lock(&REISERFS_I(inode)->tailpack); atomic_inc(&REISERFS_I(inode)->openers); mutex_unlock(&REISERFS_I(inode)->tailpack); } return err; } void reiserfs_vfs_truncate_file(struct inode *inode) { mutex_lock(&REISERFS_I(inode)->tailpack); reiserfs_truncate_file(inode, 1); mutex_unlock(&REISERFS_I(inode)->tailpack); } /* Sync a reiserfs file. */ /* * FIXME: sync_mapping_buffers() never has anything to sync. Can * be removed... */ static int reiserfs_sync_file(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; int err; int barrier_done; err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (err) return err; mutex_lock(&inode->i_mutex); BUG_ON(!S_ISREG(inode->i_mode)); err = sync_mapping_buffers(inode->i_mapping); reiserfs_write_lock(inode->i_sb); barrier_done = reiserfs_commit_for_inode(inode); reiserfs_write_unlock(inode->i_sb); if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb)) blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); mutex_unlock(&inode->i_mutex); if (barrier_done < 0) return barrier_done; return (err < 0) ? -EIO : 0; } /* taken fs/buffer.c:__block_commit_write */ int reiserfs_commit_page(struct inode *inode, struct page *page, unsigned from, unsigned to) { unsigned block_start, block_end; int partial = 0; unsigned blocksize; struct buffer_head *bh, *head; unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT; int new; int logit = reiserfs_file_data_log(inode); struct super_block *s = inode->i_sb; int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize; struct reiserfs_transaction_handle th; int ret = 0; th.t_trans_id = 0; blocksize = 1 << inode->i_blkbits; if (logit) { reiserfs_write_lock(s); ret = journal_begin(&th, s, bh_per_page + 1); if (ret) goto drop_write_lock; reiserfs_update_inode_transaction(inode); } for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { new = buffer_new(bh); clear_buffer_new(bh); block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (!buffer_uptodate(bh)) partial = 1; } else { set_buffer_uptodate(bh); if (logit) { reiserfs_prepare_for_journal(s, bh, 1); journal_mark_dirty(&th, bh); } else if (!buffer_dirty(bh)) { mark_buffer_dirty(bh); /* * do data=ordered on any page past the end * of file and any buffer marked BH_New. */ if (reiserfs_data_ordered(inode->i_sb) && (new || page->index >= i_size_index)) { reiserfs_add_ordered_list(inode, bh); } } } } if (logit) { ret = journal_end(&th); drop_write_lock: reiserfs_write_unlock(s); } /* * If this is a partial write which happened to make all buffers * uptodate then we can optimize away a bogus readpage() for * the next read(). Here we 'discover' whether the page went * uptodate as a result of this (potentially partial) write. */ if (!partial) SetPageUptodate(page); return ret; } const struct file_operations reiserfs_file_operations = { .read = new_sync_read, .write = new_sync_write, .unlocked_ioctl = reiserfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = reiserfs_compat_ioctl, #endif .mmap = generic_file_mmap, .open = reiserfs_file_open, .release = reiserfs_file_release, .fsync = reiserfs_sync_file, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, .llseek = generic_file_llseek, }; const struct inode_operations reiserfs_file_inode_operations = { .setattr = reiserfs_setattr, .setxattr = reiserfs_setxattr, .getxattr = reiserfs_getxattr, .listxattr = reiserfs_listxattr, .removexattr = reiserfs_removexattr, .permission = reiserfs_permission, .get_acl = reiserfs_get_acl, .set_acl = reiserfs_set_acl, };
gpl-2.0
ipaccess/fsm99xx-kernel-sources
drivers/mtd/devices/elm.c
2159
10604
/* * Error Location Module * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/platform_device.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/of.h> #include <linux/pm_runtime.h> #include <linux/platform_data/elm.h> #define ELM_IRQSTATUS 0x018 #define ELM_IRQENABLE 0x01c #define ELM_LOCATION_CONFIG 0x020 #define ELM_PAGE_CTRL 0x080 #define ELM_SYNDROME_FRAGMENT_0 0x400 #define ELM_SYNDROME_FRAGMENT_6 0x418 #define ELM_LOCATION_STATUS 0x800 #define ELM_ERROR_LOCATION_0 0x880 /* ELM Interrupt Status Register */ #define INTR_STATUS_PAGE_VALID BIT(8) /* ELM Interrupt Enable Register */ #define INTR_EN_PAGE_MASK BIT(8) /* ELM Location Configuration Register */ #define ECC_BCH_LEVEL_MASK 0x3 /* ELM syndrome */ #define ELM_SYNDROME_VALID BIT(16) /* ELM_LOCATION_STATUS Register */ #define ECC_CORRECTABLE_MASK BIT(8) #define ECC_NB_ERRORS_MASK 0x1f /* ELM_ERROR_LOCATION_0-15 Registers */ #define ECC_ERROR_LOCATION_MASK 0x1fff #define ELM_ECC_SIZE 0x7ff #define SYNDROME_FRAGMENT_REG_SIZE 0x40 #define ERROR_LOCATION_SIZE 0x100 struct elm_info { struct device *dev; void __iomem *elm_base; struct completion elm_completion; struct list_head list; enum bch_ecc bch_type; }; static LIST_HEAD(elm_devices); static void elm_write_reg(struct elm_info *info, int offset, u32 val) { writel(val, info->elm_base + offset); } static u32 elm_read_reg(struct elm_info *info, int offset) { return readl(info->elm_base + offset); } /** * elm_config - Configure ELM module * @dev: ELM device * @bch_type: Type of BCH ecc */ int elm_config(struct device *dev, enum bch_ecc bch_type) { u32 reg_val; struct elm_info *info = dev_get_drvdata(dev); if (!info) { dev_err(dev, "Unable to configure elm - device not probed?\n"); return -ENODEV; } reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16); elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val); info->bch_type = bch_type; return 0; } EXPORT_SYMBOL(elm_config); /** * elm_configure_page_mode - Enable/Disable page mode * @info: elm info * @index: index number of syndrome fragment vector * @enable: enable/disable flag for page mode * * Enable page mode for syndrome fragment index */ static void elm_configure_page_mode(struct elm_info *info, int index, bool enable) { u32 reg_val; reg_val = elm_read_reg(info, ELM_PAGE_CTRL); if (enable) reg_val |= BIT(index); /* enable page mode */ else reg_val &= ~BIT(index); /* disable page mode */ elm_write_reg(info, ELM_PAGE_CTRL, reg_val); } /** * elm_load_syndrome - Load ELM syndrome reg * @info: elm info * @err_vec: elm error vectors * @ecc: buffer with calculated ecc * * Load syndrome fragment registers with calculated ecc in reverse order. */ static void elm_load_syndrome(struct elm_info *info, struct elm_errorvec *err_vec, u8 *ecc) { int i, offset; u32 val; for (i = 0; i < ERROR_VECTOR_MAX; i++) { /* Check error reported */ if (err_vec[i].error_reported) { elm_configure_page_mode(info, i, true); offset = ELM_SYNDROME_FRAGMENT_0 + SYNDROME_FRAGMENT_REG_SIZE * i; /* BCH8 */ if (info->bch_type) { /* syndrome fragment 0 = ecc[9-12B] */ val = cpu_to_be32(*(u32 *) &ecc[9]); elm_write_reg(info, offset, val); /* syndrome fragment 1 = ecc[5-8B] */ offset += 4; val = cpu_to_be32(*(u32 *) &ecc[5]); elm_write_reg(info, offset, val); /* syndrome fragment 2 = ecc[1-4B] */ offset += 4; val = cpu_to_be32(*(u32 *) &ecc[1]); elm_write_reg(info, offset, val); /* syndrome fragment 3 = ecc[0B] */ offset += 4; val = ecc[0]; elm_write_reg(info, offset, val); } else { /* syndrome fragment 0 = ecc[20-52b] bits */ val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) | ((ecc[2] & 0xf) << 28); elm_write_reg(info, offset, val); /* syndrome fragment 1 = ecc[0-20b] bits */ offset += 4; val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12; elm_write_reg(info, offset, val); } } /* Update ecc pointer with ecc byte size */ ecc += info->bch_type ? BCH8_SIZE : BCH4_SIZE; } } /** * elm_start_processing - start elm syndrome processing * @info: elm info * @err_vec: elm error vectors * * Set syndrome valid bit for syndrome fragment registers for which * elm syndrome fragment registers are loaded. This enables elm module * to start processing syndrome vectors. */ static void elm_start_processing(struct elm_info *info, struct elm_errorvec *err_vec) { int i, offset; u32 reg_val; /* * Set syndrome vector valid, so that ELM module * will process it for vectors error is reported */ for (i = 0; i < ERROR_VECTOR_MAX; i++) { if (err_vec[i].error_reported) { offset = ELM_SYNDROME_FRAGMENT_6 + SYNDROME_FRAGMENT_REG_SIZE * i; reg_val = elm_read_reg(info, offset); reg_val |= ELM_SYNDROME_VALID; elm_write_reg(info, offset, reg_val); } } } /** * elm_error_correction - locate correctable error position * @info: elm info * @err_vec: elm error vectors * * On completion of processing by elm module, error location status * register updated with correctable/uncorrectable error information. * In case of correctable errors, number of errors located from * elm location status register & read the positions from * elm error location register. */ static void elm_error_correction(struct elm_info *info, struct elm_errorvec *err_vec) { int i, j, errors = 0; int offset; u32 reg_val; for (i = 0; i < ERROR_VECTOR_MAX; i++) { /* Check error reported */ if (err_vec[i].error_reported) { offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i; reg_val = elm_read_reg(info, offset); /* Check correctable error or not */ if (reg_val & ECC_CORRECTABLE_MASK) { offset = ELM_ERROR_LOCATION_0 + ERROR_LOCATION_SIZE * i; /* Read count of correctable errors */ err_vec[i].error_count = reg_val & ECC_NB_ERRORS_MASK; /* Update the error locations in error vector */ for (j = 0; j < err_vec[i].error_count; j++) { reg_val = elm_read_reg(info, offset); err_vec[i].error_loc[j] = reg_val & ECC_ERROR_LOCATION_MASK; /* Update error location register */ offset += 4; } errors += err_vec[i].error_count; } else { err_vec[i].error_uncorrectable = true; } /* Clearing interrupts for processed error vectors */ elm_write_reg(info, ELM_IRQSTATUS, BIT(i)); /* Disable page mode */ elm_configure_page_mode(info, i, false); } } } /** * elm_decode_bch_error_page - Locate error position * @dev: device pointer * @ecc_calc: calculated ECC bytes from GPMC * @err_vec: elm error vectors * * Called with one or more error reported vectors & vectors with * error reported is updated in err_vec[].error_reported */ void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc, struct elm_errorvec *err_vec) { struct elm_info *info = dev_get_drvdata(dev); u32 reg_val; /* Enable page mode interrupt */ reg_val = elm_read_reg(info, ELM_IRQSTATUS); elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID); elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK); /* Load valid ecc byte to syndrome fragment register */ elm_load_syndrome(info, err_vec, ecc_calc); /* Enable syndrome processing for which syndrome fragment is updated */ elm_start_processing(info, err_vec); /* Wait for ELM module to finish locating error correction */ wait_for_completion(&info->elm_completion); /* Disable page mode interrupt */ reg_val = elm_read_reg(info, ELM_IRQENABLE); elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK); elm_error_correction(info, err_vec); } EXPORT_SYMBOL(elm_decode_bch_error_page); static irqreturn_t elm_isr(int this_irq, void *dev_id) { u32 reg_val; struct elm_info *info = dev_id; reg_val = elm_read_reg(info, ELM_IRQSTATUS); /* All error vectors processed */ if (reg_val & INTR_STATUS_PAGE_VALID) { elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID); complete(&info->elm_completion); return IRQ_HANDLED; } return IRQ_NONE; } static int elm_probe(struct platform_device *pdev) { int ret = 0; struct resource *res, *irq; struct elm_info *info; info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (!info) { dev_err(&pdev->dev, "failed to allocate memory\n"); return -ENOMEM; } info->dev = &pdev->dev; irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) { dev_err(&pdev->dev, "no irq resource defined\n"); return -ENODEV; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no memory resource defined\n"); return -ENODEV; } info->elm_base = devm_request_and_ioremap(&pdev->dev, res); if (!info->elm_base) return -EADDRNOTAVAIL; ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0, pdev->name, info); if (ret) { dev_err(&pdev->dev, "failure requesting irq %i\n", irq->start); return ret; } pm_runtime_enable(&pdev->dev); if (pm_runtime_get_sync(&pdev->dev)) { ret = -EINVAL; pm_runtime_disable(&pdev->dev); dev_err(&pdev->dev, "can't enable clock\n"); return ret; } init_completion(&info->elm_completion); INIT_LIST_HEAD(&info->list); list_add(&info->list, &elm_devices); platform_set_drvdata(pdev, info); return ret; } static int elm_remove(struct platform_device *pdev) { pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_OF static const struct of_device_id elm_of_match[] = { { .compatible = "ti,am3352-elm" }, {}, }; MODULE_DEVICE_TABLE(of, elm_of_match); #endif static struct platform_driver elm_driver = { .driver = { .name = "elm", .owner = THIS_MODULE, .of_match_table = of_match_ptr(elm_of_match), }, .probe = elm_probe, .remove = elm_remove, }; module_platform_driver(elm_driver); MODULE_DESCRIPTION("ELM driver for BCH error correction"); MODULE_AUTHOR("Texas Instruments"); MODULE_ALIAS("platform: elm"); MODULE_LICENSE("GPL v2");
gpl-2.0
u-ra/android_kernel_htc_msm8660
drivers/staging/msm/staging-devices.c
2159
7421
#include <linux/kernel.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/bootmem.h> #include <linux/delay.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/io.h> #include <asm/setup.h> #include <mach/board.h> #include <mach/irqs.h> #include <mach/sirc.h> #include <mach/gpio.h> #include "msm_mdp.h" #include "memory_ll.h" //#include "android_pmem.h" #ifdef CONFIG_MSM_SOC_REV_A #define MSM_SMI_BASE 0xE0000000 #else #define MSM_SMI_BASE 0x00000000 #endif #define TOUCHPAD_SUSPEND 34 #define TOUCHPAD_IRQ 38 #define MSM_PMEM_MDP_SIZE 0x1591000 #ifdef CONFIG_MSM_SOC_REV_A #define SMEM_SPINLOCK_I2C "D:I2C02000021" #else #define SMEM_SPINLOCK_I2C "S:6" #endif #define MSM_PMEM_ADSP_SIZE 0x1C00000 #define MSM_FB_SIZE 0x500000 #define MSM_FB_SIZE_ST15 0x800000 #define MSM_AUDIO_SIZE 0x80000 #define MSM_GPU_PHYS_SIZE SZ_2M #ifdef CONFIG_MSM_SOC_REV_A #define MSM_SMI_BASE 0xE0000000 #else #define MSM_SMI_BASE 0x00000000 #endif #define MSM_SHARED_RAM_PHYS (MSM_SMI_BASE + 0x00100000) #define MSM_PMEM_SMI_BASE (MSM_SMI_BASE + 0x02B00000) #define MSM_PMEM_SMI_SIZE 0x01500000 #define MSM_FB_BASE MSM_PMEM_SMI_BASE #define MSM_GPU_PHYS_BASE (MSM_FB_BASE + MSM_FB_SIZE) #define MSM_PMEM_SMIPOOL_BASE (MSM_GPU_PHYS_BASE + MSM_GPU_PHYS_SIZE) #define MSM_PMEM_SMIPOOL_SIZE (MSM_PMEM_SMI_SIZE - MSM_FB_SIZE \ - MSM_GPU_PHYS_SIZE) #if defined(CONFIG_FB_MSM_MDP40) #define MDP_BASE 0xA3F00000 #define PMDH_BASE 0xAD600000 #define EMDH_BASE 0xAD700000 #define TVENC_BASE 0xAD400000 #else #define MDP_BASE 0xAA200000 #define PMDH_BASE 0xAA600000 #define EMDH_BASE 0xAA700000 #define TVENC_BASE 0xAA400000 #endif #define PMEM_KERNEL_EBI1_SIZE (CONFIG_PMEM_KERNEL_SIZE * 1024 * 1024) static struct resource msm_fb_resources[] = { { .flags = IORESOURCE_DMA, } }; static struct resource msm_mdp_resources[] = { { .name = "mdp", .start = MDP_BASE, .end = MDP_BASE + 0x000F0000 - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device msm_mdp_device = { .name = "mdp", .id = 0, .num_resources = ARRAY_SIZE(msm_mdp_resources), .resource = msm_mdp_resources, }; static struct platform_device msm_lcdc_device = { .name = "lcdc", .id = 0, }; static int msm_fb_detect_panel(const char *name) { int ret = -EPERM; if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa()) { if (!strncmp(name, "mddi_toshiba_wvga_pt", 20)) ret = 0; else ret = -ENODEV; } else if ((machine_is_qsd8x50_surf() || machine_is_qsd8x50a_surf()) && !strcmp(name, "lcdc_external")) ret = 0; else if (machine_is_qsd8x50a_st1_5()) { if (!strcmp(name, "lcdc_st15") || !strcmp(name, "hdmi_sii9022")) ret = 0; else ret = -ENODEV; } return ret; } /* Only allow a small subset of machines to set the offset via FB PAN_DISPLAY */ static int msm_fb_allow_set_offset(void) { return (machine_is_qsd8x50_st1() || machine_is_qsd8x50a_st1_5()) ? 1 : 0; } static struct msm_fb_platform_data msm_fb_pdata = { .detect_client = msm_fb_detect_panel, .allow_set_offset = msm_fb_allow_set_offset, }; static struct platform_device msm_fb_device = { .name = "msm_fb", .id = 0, .num_resources = ARRAY_SIZE(msm_fb_resources), .resource = msm_fb_resources, .dev = { .platform_data = &msm_fb_pdata, } }; static void __init qsd8x50_allocate_memory_regions(void) { void *addr; unsigned long size; if (machine_is_qsd8x50a_st1_5()) size = MSM_FB_SIZE_ST15; else size = MSM_FB_SIZE; addr = alloc_bootmem(size); // (void *)MSM_FB_BASE; if (!addr) printk("Failed to allocate bootmem for framebuffer\n"); msm_fb_resources[0].start = __pa(addr); msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1; pr_info("using %lu bytes of SMI at %lx physical for fb\n", size, (unsigned long)addr); } static int msm_fb_lcdc_gpio_config(int on) { // return 0; if (machine_is_qsd8x50_st1()) { if (on) { gpio_set_value(32, 1); mdelay(100); gpio_set_value(20, 1); gpio_set_value(17, 1); gpio_set_value(19, 1); } else { gpio_set_value(17, 0); gpio_set_value(19, 0); gpio_set_value(20, 0); mdelay(100); gpio_set_value(32, 0); } } else if (machine_is_qsd8x50a_st1_5()) { if (on) { gpio_set_value(17, 1); gpio_set_value(19, 1); gpio_set_value(20, 1); gpio_set_value(22, 0); gpio_set_value(32, 1); gpio_set_value(155, 1); //st15_hdmi_power(1); gpio_set_value(22, 1); } else { gpio_set_value(17, 0); gpio_set_value(19, 0); gpio_set_value(22, 0); gpio_set_value(32, 0); gpio_set_value(155, 0); // st15_hdmi_power(0); } } return 0; } static struct lcdc_platform_data lcdc_pdata = { .lcdc_gpio_config = msm_fb_lcdc_gpio_config, }; static struct msm_gpio msm_fb_st15_gpio_config_data[] = { { GPIO_CFG(17, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "lcdc_en0" }, { GPIO_CFG(19, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "dat_pwr_sv" }, { GPIO_CFG(20, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "lvds_pwr_dn" }, { GPIO_CFG(22, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "lcdc_en1" }, { GPIO_CFG(32, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "lcdc_en2" }, { GPIO_CFG(103, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), "hdmi_irq" }, { GPIO_CFG(155, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "hdmi_3v3" }, }; static struct msm_panel_common_pdata mdp_pdata = { .gpio = 98, }; static struct platform_device *devices[] __initdata = { &msm_fb_device, }; static void __init msm_register_device(struct platform_device *pdev, void *data) { int ret; pdev->dev.platform_data = data; ret = platform_device_register(pdev); if (ret) dev_err(&pdev->dev, "%s: platform_device_register() failed = %d\n", __func__, ret); } void __init msm_fb_register_device(char *name, void *data) { if (!strncmp(name, "mdp", 3)) msm_register_device(&msm_mdp_device, data); /* else if (!strncmp(name, "pmdh", 4)) msm_register_device(&msm_mddi_device, data); else if (!strncmp(name, "emdh", 4)) msm_register_device(&msm_mddi_ext_device, data); else if (!strncmp(name, "ebi2", 4)) msm_register_device(&msm_ebi2_lcd_device, data); else if (!strncmp(name, "tvenc", 5)) msm_register_device(&msm_tvenc_device, data); else */ if (!strncmp(name, "lcdc", 4)) msm_register_device(&msm_lcdc_device, data); /*else printk(KERN_ERR "%s: unknown device! %s\n", __func__, name); */ } static void __init msm_fb_add_devices(void) { int rc; msm_fb_register_device("mdp", &mdp_pdata); // msm_fb_register_device("pmdh", &mddi_pdata); // msm_fb_register_device("emdh", &mddi_pdata); // msm_fb_register_device("tvenc", 0); if (machine_is_qsd8x50a_st1_5()) { /* rc = st15_hdmi_vreg_init(); if (rc) return; */ rc = msm_gpios_request_enable( msm_fb_st15_gpio_config_data, ARRAY_SIZE(msm_fb_st15_gpio_config_data)); if (rc) { printk(KERN_ERR "%s: unable to init lcdc gpios\n", __func__); return; } msm_fb_register_device("lcdc", &lcdc_pdata); } else msm_fb_register_device("lcdc", 0); } int __init staging_init_pmem(void) { qsd8x50_allocate_memory_regions(); return 0; } int __init staging_init_devices(void) { platform_add_devices(devices, ARRAY_SIZE(devices)); msm_fb_add_devices(); return 0; } arch_initcall(staging_init_pmem); arch_initcall(staging_init_devices);
gpl-2.0
klin1344/FusionUlt-AOSP_OLD
arch/arm/mach-spear3xx/spear310.c
2927
7041
/* * arch/arm/mach-spear3xx/spear310.c * * SPEAr310 machine source file * * Copyright (C) 2009 ST Microelectronics * Viresh Kumar<viresh.kumar@st.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/ptrace.h> #include <asm/irq.h> #include <plat/shirq.h> #include <mach/generic.h> #include <mach/hardware.h> /* pad multiplexing support */ /* muxing registers */ #define PAD_MUX_CONFIG_REG 0x08 /* devices */ static struct pmx_dev_mode pmx_emi_cs_0_1_4_5_modes[] = { { .ids = 0x00, .mask = PMX_TIMER_3_4_MASK, }, }; struct pmx_dev spear310_pmx_emi_cs_0_1_4_5 = { .name = "emi_cs_0_1_4_5", .modes = pmx_emi_cs_0_1_4_5_modes, .mode_count = ARRAY_SIZE(pmx_emi_cs_0_1_4_5_modes), .enb_on_reset = 1, }; static struct pmx_dev_mode pmx_emi_cs_2_3_modes[] = { { .ids = 0x00, .mask = PMX_TIMER_1_2_MASK, }, }; struct pmx_dev spear310_pmx_emi_cs_2_3 = { .name = "emi_cs_2_3", .modes = pmx_emi_cs_2_3_modes, .mode_count = ARRAY_SIZE(pmx_emi_cs_2_3_modes), .enb_on_reset = 1, }; static struct pmx_dev_mode pmx_uart1_modes[] = { { .ids = 0x00, .mask = PMX_FIRDA_MASK, }, }; struct pmx_dev spear310_pmx_uart1 = { .name = "uart1", .modes = pmx_uart1_modes, .mode_count = ARRAY_SIZE(pmx_uart1_modes), .enb_on_reset = 1, }; static struct pmx_dev_mode pmx_uart2_modes[] = { { .ids = 0x00, .mask = PMX_TIMER_1_2_MASK, }, }; struct pmx_dev spear310_pmx_uart2 = { .name = "uart2", .modes = pmx_uart2_modes, .mode_count = ARRAY_SIZE(pmx_uart2_modes), .enb_on_reset = 1, }; static struct pmx_dev_mode pmx_uart3_4_5_modes[] = { { .ids = 0x00, .mask = PMX_UART0_MODEM_MASK, }, }; struct pmx_dev spear310_pmx_uart3_4_5 = { .name = "uart3_4_5", .modes = pmx_uart3_4_5_modes, .mode_count = ARRAY_SIZE(pmx_uart3_4_5_modes), .enb_on_reset = 1, }; static struct pmx_dev_mode pmx_fsmc_modes[] = { { .ids = 0x00, .mask = PMX_SSP_CS_MASK, }, }; struct pmx_dev spear310_pmx_fsmc = { .name = "fsmc", .modes = pmx_fsmc_modes, .mode_count = ARRAY_SIZE(pmx_fsmc_modes), .enb_on_reset = 1, }; static struct pmx_dev_mode pmx_rs485_0_1_modes[] = { { .ids = 0x00, .mask = PMX_MII_MASK, }, }; struct pmx_dev spear310_pmx_rs485_0_1 = { .name = "rs485_0_1", .modes = pmx_rs485_0_1_modes, .mode_count = ARRAY_SIZE(pmx_rs485_0_1_modes), .enb_on_reset = 1, }; static struct pmx_dev_mode pmx_tdm0_modes[] = { { .ids = 0x00, .mask = PMX_MII_MASK, }, }; struct pmx_dev spear310_pmx_tdm0 = { .name = "tdm0", .modes = pmx_tdm0_modes, .mode_count = ARRAY_SIZE(pmx_tdm0_modes), .enb_on_reset = 1, }; /* pmx driver structure */ static struct pmx_driver pmx_driver = { .mux_reg = {.offset = PAD_MUX_CONFIG_REG, .mask = 0x00007fff}, }; /* spear3xx shared irq */ static struct shirq_dev_config shirq_ras1_config[] = { { .virq = SPEAR310_VIRQ_SMII0, .status_mask = SPEAR310_SMII0_IRQ_MASK, }, { .virq = SPEAR310_VIRQ_SMII1, .status_mask = SPEAR310_SMII1_IRQ_MASK, }, { .virq = SPEAR310_VIRQ_SMII2, .status_mask = SPEAR310_SMII2_IRQ_MASK, }, { .virq = SPEAR310_VIRQ_SMII3, .status_mask = SPEAR310_SMII3_IRQ_MASK, }, { .virq = SPEAR310_VIRQ_WAKEUP_SMII0, .status_mask = SPEAR310_WAKEUP_SMII0_IRQ_MASK, }, { .virq = SPEAR310_VIRQ_WAKEUP_SMII1, .status_mask = SPEAR310_WAKEUP_SMII1_IRQ_MASK, }, { .virq = SPEAR310_VIRQ_WAKEUP_SMII2, .status_mask = SPEAR310_WAKEUP_SMII2_IRQ_MASK, }, { .virq = SPEAR310_VIRQ_WAKEUP_SMII3, .status_mask = SPEAR310_WAKEUP_SMII3_IRQ_MASK, }, }; static struct spear_shirq shirq_ras1 = { .irq = SPEAR3XX_IRQ_GEN_RAS_1, .dev_config = shirq_ras1_config, .dev_count = ARRAY_SIZE(shirq_ras1_config), .regs = { .enb_reg = -1, .status_reg = SPEAR310_INT_STS_MASK_REG, .status_reg_mask = SPEAR310_SHIRQ_RAS1_MASK, .clear_reg = -1, }, }; static struct shirq_dev_config shirq_ras2_config[] = { { .virq = SPEAR310_VIRQ_UART1, .status_mask = SPEAR310_UART1_IRQ_MASK, }, { .virq = SPEAR310_VIRQ_UART2, .status_mask = SPEAR310_UART2_IRQ_MASK, }, { .virq = SPEAR310_VIRQ_UART3, .status_mask = SPEAR310_UART3_IRQ_MASK, }, { .virq = SPEAR310_VIRQ_UART4, .status_mask = SPEAR310_UART4_IRQ_MASK, }, { .virq = SPEAR310_VIRQ_UART5, .status_mask = SPEAR310_UART5_IRQ_MASK, }, }; static struct spear_shirq shirq_ras2 = { .irq = SPEAR3XX_IRQ_GEN_RAS_2, .dev_config = shirq_ras2_config, .dev_count = ARRAY_SIZE(shirq_ras2_config), .regs = { .enb_reg = -1, .status_reg = SPEAR310_INT_STS_MASK_REG, .status_reg_mask = SPEAR310_SHIRQ_RAS2_MASK, .clear_reg = -1, }, }; static struct shirq_dev_config shirq_ras3_config[] = { { .virq = SPEAR310_VIRQ_EMI, .status_mask = SPEAR310_EMI_IRQ_MASK, }, }; static struct spear_shirq shirq_ras3 = { .irq = SPEAR3XX_IRQ_GEN_RAS_3, .dev_config = shirq_ras3_config, .dev_count = ARRAY_SIZE(shirq_ras3_config), .regs = { .enb_reg = -1, .status_reg = SPEAR310_INT_STS_MASK_REG, .status_reg_mask = SPEAR310_SHIRQ_RAS3_MASK, .clear_reg = -1, }, }; static struct shirq_dev_config shirq_intrcomm_ras_config[] = { { .virq = SPEAR310_VIRQ_TDM_HDLC, .status_mask = SPEAR310_TDM_HDLC_IRQ_MASK, }, { .virq = SPEAR310_VIRQ_RS485_0, .status_mask = SPEAR310_RS485_0_IRQ_MASK, }, { .virq = SPEAR310_VIRQ_RS485_1, .status_mask = SPEAR310_RS485_1_IRQ_MASK, }, }; static struct spear_shirq shirq_intrcomm_ras = { .irq = SPEAR3XX_IRQ_INTRCOMM_RAS_ARM, .dev_config = shirq_intrcomm_ras_config, .dev_count = ARRAY_SIZE(shirq_intrcomm_ras_config), .regs = { .enb_reg = -1, .status_reg = SPEAR310_INT_STS_MASK_REG, .status_reg_mask = SPEAR310_SHIRQ_INTRCOMM_RAS_MASK, .clear_reg = -1, }, }; /* Add spear310 specific devices here */ /* spear310 routines */ void __init spear310_init(struct pmx_mode *pmx_mode, struct pmx_dev **pmx_devs, u8 pmx_dev_count) { void __iomem *base; int ret = 0; /* call spear3xx family common init function */ spear3xx_init(); /* shared irq registration */ base = ioremap(SPEAR310_SOC_CONFIG_BASE, SZ_4K); if (base) { /* shirq 1 */ shirq_ras1.regs.base = base; ret = spear_shirq_register(&shirq_ras1); if (ret) printk(KERN_ERR "Error registering Shared IRQ 1\n"); /* shirq 2 */ shirq_ras2.regs.base = base; ret = spear_shirq_register(&shirq_ras2); if (ret) printk(KERN_ERR "Error registering Shared IRQ 2\n"); /* shirq 3 */ shirq_ras3.regs.base = base; ret = spear_shirq_register(&shirq_ras3); if (ret) printk(KERN_ERR "Error registering Shared IRQ 3\n"); /* shirq 4 */ shirq_intrcomm_ras.regs.base = base; ret = spear_shirq_register(&shirq_intrcomm_ras); if (ret) printk(KERN_ERR "Error registering Shared IRQ 4\n"); } /* pmx initialization */ pmx_driver.base = base; pmx_driver.mode = pmx_mode; pmx_driver.devs = pmx_devs; pmx_driver.devs_count = pmx_dev_count; ret = pmx_register(&pmx_driver); if (ret) printk(KERN_ERR "padmux: registeration failed. err no: %d\n", ret); }
gpl-2.0
TimmyTossPot/kernel_endeavoru
drivers/watchdog/machzwd.c
3183
10253
/* * MachZ ZF-Logic Watchdog Timer driver for Linux * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * The author does NOT admit liability nor provide warranty for * any of this software. This material is provided "AS-IS" in * the hope that it may be useful for others. * * Author: Fernando Fuganti <fuganti@conectiva.com.br> * * Based on sbc60xxwdt.c by Jakob Oestergaard * * * We have two timers (wd#1, wd#2) driven by a 32 KHz clock with the * following periods: * wd#1 - 2 seconds; * wd#2 - 7.2 ms; * After the expiration of wd#1, it can generate a NMI, SCI, SMI, or * a system RESET and it starts wd#2 that unconditionally will RESET * the system when the counter reaches zero. * * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com> * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/jiffies.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/fs.h> #include <linux/ioport.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/io.h> #include <linux/uaccess.h> #include <asm/system.h> /* ports */ #define ZF_IOBASE 0x218 #define INDEX 0x218 #define DATA_B 0x219 #define DATA_W 0x21A #define DATA_D 0x21A /* indexes */ /* size */ #define ZFL_VERSION 0x02 /* 16 */ #define CONTROL 0x10 /* 16 */ #define STATUS 0x12 /* 8 */ #define COUNTER_1 0x0C /* 16 */ #define COUNTER_2 0x0E /* 8 */ #define PULSE_LEN 0x0F /* 8 */ /* controls */ #define ENABLE_WD1 0x0001 #define ENABLE_WD2 0x0002 #define RESET_WD1 0x0010 #define RESET_WD2 0x0020 #define GEN_SCI 0x0100 #define GEN_NMI 0x0200 #define GEN_SMI 0x0400 #define GEN_RESET 0x0800 /* utilities */ #define WD1 0 #define WD2 1 #define zf_writew(port, data) { outb(port, INDEX); outw(data, DATA_W); } #define zf_writeb(port, data) { outb(port, INDEX); outb(data, DATA_B); } #define zf_get_ZFL_version() zf_readw(ZFL_VERSION) static unsigned short zf_readw(unsigned char port) { outb(port, INDEX); return inw(DATA_W); } MODULE_AUTHOR("Fernando Fuganti <fuganti@conectiva.com.br>"); MODULE_DESCRIPTION("MachZ ZF-Logic Watchdog driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); #define PFX "machzwd" static const struct watchdog_info zf_info = { .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "ZF-Logic watchdog", }; /* * action refers to action taken when watchdog resets * 0 = GEN_RESET * 1 = GEN_SMI * 2 = GEN_NMI * 3 = GEN_SCI * defaults to GEN_RESET (0) */ static int action; module_param(action, int, 0); MODULE_PARM_DESC(action, "after watchdog resets, generate: " "0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI"); static void zf_ping(unsigned long data); static int zf_action = GEN_RESET; static unsigned long zf_is_open; static char zf_expect_close; static DEFINE_SPINLOCK(zf_port_lock); static DEFINE_TIMER(zf_timer, zf_ping, 0, 0); static unsigned long next_heartbeat; /* timeout for user land heart beat (10 seconds) */ #define ZF_USER_TIMEO (HZ*10) /* timeout for hardware watchdog (~500ms) */ #define ZF_HW_TIMEO (HZ/2) /* number of ticks on WD#1 (driven by a 32KHz clock, 2s) */ #define ZF_CTIMEOUT 0xffff #ifndef ZF_DEBUG # define dprintk(format, args...) #else # define dprintk(format, args...) printk(KERN_DEBUG PFX \ ":%s:%d: " format, __func__, __LINE__ , ## args) #endif static inline void zf_set_status(unsigned char new) { zf_writeb(STATUS, new); } /* CONTROL register functions */ static inline unsigned short zf_get_control(void) { return zf_readw(CONTROL); } static inline void zf_set_control(unsigned short new) { zf_writew(CONTROL, new); } /* WD#? counter functions */ /* * Just set counter value */ static inline void zf_set_timer(unsigned short new, unsigned char n) { switch (n) { case WD1: zf_writew(COUNTER_1, new); case WD2: zf_writeb(COUNTER_2, new > 0xff ? 0xff : new); default: return; } } /* * stop hardware timer */ static void zf_timer_off(void) { unsigned int ctrl_reg = 0; unsigned long flags; /* stop internal ping */ del_timer_sync(&zf_timer); spin_lock_irqsave(&zf_port_lock, flags); /* stop watchdog timer */ ctrl_reg = zf_get_control(); ctrl_reg |= (ENABLE_WD1|ENABLE_WD2); /* disable wd1 and wd2 */ ctrl_reg &= ~(ENABLE_WD1|ENABLE_WD2); zf_set_control(ctrl_reg); spin_unlock_irqrestore(&zf_port_lock, flags); printk(KERN_INFO PFX ": Watchdog timer is now disabled\n"); } /* * start hardware timer */ static void zf_timer_on(void) { unsigned int ctrl_reg = 0; unsigned long flags; spin_lock_irqsave(&zf_port_lock, flags); zf_writeb(PULSE_LEN, 0xff); zf_set_timer(ZF_CTIMEOUT, WD1); /* user land ping */ next_heartbeat = jiffies + ZF_USER_TIMEO; /* start the timer for internal ping */ mod_timer(&zf_timer, jiffies + ZF_HW_TIMEO); /* start watchdog timer */ ctrl_reg = zf_get_control(); ctrl_reg |= (ENABLE_WD1|zf_action); zf_set_control(ctrl_reg); spin_unlock_irqrestore(&zf_port_lock, flags); printk(KERN_INFO PFX ": Watchdog timer is now enabled\n"); } static void zf_ping(unsigned long data) { unsigned int ctrl_reg = 0; unsigned long flags; zf_writeb(COUNTER_2, 0xff); if (time_before(jiffies, next_heartbeat)) { dprintk("time_before: %ld\n", next_heartbeat - jiffies); /* * reset event is activated by transition from 0 to 1 on * RESET_WD1 bit and we assume that it is already zero... */ spin_lock_irqsave(&zf_port_lock, flags); ctrl_reg = zf_get_control(); ctrl_reg |= RESET_WD1; zf_set_control(ctrl_reg); /* ...and nothing changes until here */ ctrl_reg &= ~(RESET_WD1); zf_set_control(ctrl_reg); spin_unlock_irqrestore(&zf_port_lock, flags); mod_timer(&zf_timer, jiffies + ZF_HW_TIMEO); } else printk(KERN_CRIT PFX ": I will reset your machine\n"); } static ssize_t zf_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { /* See if we got the magic character */ if (count) { /* * no need to check for close confirmation * no way to disable watchdog ;) */ if (!nowayout) { size_t ofs; /* * note: just in case someone wrote the magic character * five months ago... */ zf_expect_close = 0; /* now scan */ for (ofs = 0; ofs != count; ofs++) { char c; if (get_user(c, buf + ofs)) return -EFAULT; if (c == 'V') { zf_expect_close = 42; dprintk("zf_expect_close = 42\n"); } } } /* * Well, anyhow someone wrote to us, * we should return that favour */ next_heartbeat = jiffies + ZF_USER_TIMEO; dprintk("user ping at %ld\n", jiffies); } return count; } static long zf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user(argp, &zf_info, sizeof(zf_info))) return -EFAULT; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_KEEPALIVE: zf_ping(0); break; default: return -ENOTTY; } return 0; } static int zf_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &zf_is_open)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); zf_timer_on(); return nonseekable_open(inode, file); } static int zf_close(struct inode *inode, struct file *file) { if (zf_expect_close == 42) zf_timer_off(); else { del_timer(&zf_timer); printk(KERN_ERR PFX ": device file closed unexpectedly. " "Will not stop the WDT!\n"); } clear_bit(0, &zf_is_open); zf_expect_close = 0; return 0; } /* * Notifier for system down */ static int zf_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) zf_timer_off(); return NOTIFY_DONE; } static const struct file_operations zf_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = zf_write, .unlocked_ioctl = zf_ioctl, .open = zf_open, .release = zf_close, }; static struct miscdevice zf_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &zf_fops, }; /* * The device needs to learn about soft shutdowns in order to * turn the timebomb registers off. */ static struct notifier_block zf_notifier = { .notifier_call = zf_notify_sys, }; static void __init zf_show_action(int act) { static const char * const str[] = { "RESET", "SMI", "NMI", "SCI" }; printk(KERN_INFO PFX ": Watchdog using action = %s\n", str[act]); } static int __init zf_init(void) { int ret; printk(KERN_INFO PFX ": MachZ ZF-Logic Watchdog driver initializing.\n"); ret = zf_get_ZFL_version(); if (!ret || ret == 0xffff) { printk(KERN_WARNING PFX ": no ZF-Logic found\n"); return -ENODEV; } if (action <= 3 && action >= 0) zf_action = zf_action >> action; else action = 0; zf_show_action(action); if (!request_region(ZF_IOBASE, 3, "MachZ ZFL WDT")) { printk(KERN_ERR "cannot reserve I/O ports at %d\n", ZF_IOBASE); ret = -EBUSY; goto no_region; } ret = register_reboot_notifier(&zf_notifier); if (ret) { printk(KERN_ERR "can't register reboot notifier (err=%d)\n", ret); goto no_reboot; } ret = misc_register(&zf_miscdev); if (ret) { printk(KERN_ERR "can't misc_register on minor=%d\n", WATCHDOG_MINOR); goto no_misc; } zf_set_status(0); zf_set_control(0); return 0; no_misc: unregister_reboot_notifier(&zf_notifier); no_reboot: release_region(ZF_IOBASE, 3); no_region: return ret; } static void __exit zf_exit(void) { zf_timer_off(); misc_deregister(&zf_miscdev); unregister_reboot_notifier(&zf_notifier); release_region(ZF_IOBASE, 3); } module_init(zf_init); module_exit(zf_exit);
gpl-2.0
bilalliberty/android_kernel_htc_villec2
drivers/scsi/aic7xxx/aic7xxx_core.c
3183
216264
/* * Core routines and tables shareable across OS platforms. * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $ */ #ifdef __linux__ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include "aicasm/aicasm_insformat.h" #else #include <dev/aic7xxx/aic7xxx_osm.h> #include <dev/aic7xxx/aic7xxx_inline.h> #include <dev/aic7xxx/aicasm/aicasm_insformat.h> #endif /***************************** Lookup Tables **********************************/ static const char *const ahc_chip_names[] = { "NONE", "aic7770", "aic7850", "aic7855", "aic7859", "aic7860", "aic7870", "aic7880", "aic7895", "aic7895C", "aic7890/91", "aic7896/97", "aic7892", "aic7899" }; static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names); /* * Hardware error codes. */ struct ahc_hard_error_entry { uint8_t errno; const char *errmesg; }; static const struct ahc_hard_error_entry ahc_hard_errors[] = { { ILLHADDR, "Illegal Host Access" }, { ILLSADDR, "Illegal Sequencer Address referrenced" }, { ILLOPCODE, "Illegal Opcode in sequencer program" }, { SQPARERR, "Sequencer Parity Error" }, { DPARERR, "Data-path Parity Error" }, { MPARERR, "Scratch or SCB Memory Parity Error" }, { PCIERRSTAT, "PCI Error detected" }, { CIOPARERR, "CIOBUS Parity Error" }, }; static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors); static const struct ahc_phase_table_entry ahc_phase_table[] = { { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, { P_COMMAND, MSG_NOOP, "in Command phase" }, { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, { P_BUSFREE, MSG_NOOP, "while idle" }, { 0, MSG_NOOP, "in unknown phase" } }; /* * In most cases we only wish to itterate over real phases, so * exclude the last element from the count. */ static const u_int num_phases = ARRAY_SIZE(ahc_phase_table) - 1; /* * Valid SCSIRATE values. (p. 3-17) * Provides a mapping of tranfer periods in ns to the proper value to * stick in the scsixfer reg. */ static const struct ahc_syncrate ahc_syncrates[] = { /* ultra2 fast/ultra period rate */ { 0x42, 0x000, 9, "80.0" }, { 0x03, 0x000, 10, "40.0" }, { 0x04, 0x000, 11, "33.0" }, { 0x05, 0x100, 12, "20.0" }, { 0x06, 0x110, 15, "16.0" }, { 0x07, 0x120, 18, "13.4" }, { 0x08, 0x000, 25, "10.0" }, { 0x19, 0x010, 31, "8.0" }, { 0x1a, 0x020, 37, "6.67" }, { 0x1b, 0x030, 43, "5.7" }, { 0x1c, 0x040, 50, "5.0" }, { 0x00, 0x050, 56, "4.4" }, { 0x00, 0x060, 62, "4.0" }, { 0x00, 0x070, 68, "3.6" }, { 0x00, 0x000, 0, NULL } }; /* Our Sequencer Program */ #include "aic7xxx_seq.h" /**************************** Function Declarations ***************************/ static void ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static struct ahc_tmode_tstate* ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel); #ifdef AHC_TARGET_MODE static void ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force); #endif static const struct ahc_syncrate* ahc_devlimited_syncrate(struct ahc_softc *ahc, struct ahc_initiator_tinfo *, u_int *period, u_int *ppr_options, role_t role); static void ahc_update_pending_scbs(struct ahc_softc *ahc); static void ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); static void ahc_assert_atn(struct ahc_softc *ahc); static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); static void ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset); static void ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int bus_width); static void ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options); static void ahc_clear_msg_state(struct ahc_softc *ahc); static void ahc_handle_proto_violation(struct ahc_softc *ahc); static void ahc_handle_message_phase(struct ahc_softc *ahc); typedef enum { AHCMSG_1B, AHCMSG_2B, AHCMSG_EXT } ahc_msgtype; static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full); static int ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static int ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); static void ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, cam_status status, char *message, int verbose_level); #ifdef AHC_TARGET_MODE static void ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); #endif static bus_dmamap_callback_t ahc_dmamap_cb; static void ahc_build_free_scb_list(struct ahc_softc *ahc); static int ahc_init_scbdata(struct ahc_softc *ahc); static void ahc_fini_scbdata(struct ahc_softc *ahc); static void ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, struct scb *scb); static int ahc_qinfifo_count(struct ahc_softc *ahc); static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr); static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); static u_int ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev); static void ahc_reset_current_bus(struct ahc_softc *ahc); #ifdef AHC_DUMP_SEQ static void ahc_dumpseq(struct ahc_softc *ahc); #endif static int ahc_loadseq(struct ahc_softc *ahc); static int ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch, u_int start_instr, u_int *skip_addr); static void ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts); #ifdef AHC_TARGET_MODE static void ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg); static void ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask); static int ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd); #endif static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl); static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl); static void ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int busyid); /************************** SCB and SCB queue management **********************/ static void ahc_run_untagged_queues(struct ahc_softc *ahc); static void ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue); /****************************** Initialization ********************************/ static void ahc_alloc_scbs(struct ahc_softc *ahc); static void ahc_shutdown(void *arg); /*************************** Interrupt Services *******************************/ static void ahc_clear_intstat(struct ahc_softc *ahc); static void ahc_run_qoutfifo(struct ahc_softc *ahc); #ifdef AHC_TARGET_MODE static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused); #endif static void ahc_handle_brkadrint(struct ahc_softc *ahc); static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat); static void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat); static void ahc_clear_critical_section(struct ahc_softc *ahc); /***************************** Error Recovery *********************************/ static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb); static int ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status); static void ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb); /*********************** Untagged Transaction Routines ************************/ static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc); static inline void ahc_release_untagged_queues(struct ahc_softc *ahc); /* * Block our completion routine from starting the next untagged * transaction for this target or target lun. */ static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc) { if ((ahc->flags & AHC_SCB_BTT) == 0) ahc->untagged_queue_lock++; } /* * Allow the next untagged transaction for this target or target lun * to be executed. We use a counting semaphore to allow the lock * to be acquired recursively. Once the count drops to zero, the * transaction queues will be run. */ static inline void ahc_release_untagged_queues(struct ahc_softc *ahc) { if ((ahc->flags & AHC_SCB_BTT) == 0) { ahc->untagged_queue_lock--; if (ahc->untagged_queue_lock == 0) ahc_run_untagged_queues(ahc); } } /************************* Sequencer Execution Control ************************/ /* * Work around any chip bugs related to halting sequencer execution. * On Ultra2 controllers, we must clear the CIOBUS stretch signal by * reading a register that will set this signal and deassert it. * Without this workaround, if the chip is paused, by an interrupt or * manual pause while accessing scb ram, accesses to certain registers * will hang the system (infinite pci retries). */ static void ahc_pause_bug_fix(struct ahc_softc *ahc) { if ((ahc->features & AHC_ULTRA2) != 0) (void)ahc_inb(ahc, CCSCBCTL); } /* * Determine whether the sequencer has halted code execution. * Returns non-zero status if the sequencer is stopped. */ int ahc_is_paused(struct ahc_softc *ahc) { return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); } /* * Request that the sequencer stop and wait, indefinitely, for it * to stop. The sequencer will only acknowledge that it is paused * once it has reached an instruction boundary and PAUSEDIS is * cleared in the SEQCTL register. The sequencer may use PAUSEDIS * for critical sections. */ void ahc_pause(struct ahc_softc *ahc) { ahc_outb(ahc, HCNTRL, ahc->pause); /* * Since the sequencer can disable pausing in a critical section, we * must loop until it actually stops. */ while (ahc_is_paused(ahc) == 0) ; ahc_pause_bug_fix(ahc); } /* * Allow the sequencer to continue program execution. * We check here to ensure that no additional interrupt * sources that would cause the sequencer to halt have been * asserted. If, for example, a SCSI bus reset is detected * while we are fielding a different, pausing, interrupt type, * we don't want to release the sequencer before going back * into our interrupt handler and dealing with this new * condition. */ void ahc_unpause(struct ahc_softc *ahc) { if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) ahc_outb(ahc, HCNTRL, ahc->unpause); } /************************** Memory mapping routines ***************************/ static struct ahc_dma_seg * ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr) { int sg_index; sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg); /* sg_list_phys points to entry 1, not 0 */ sg_index++; return (&scb->sg_list[sg_index]); } static uint32_t ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg) { int sg_index; /* sg_list_phys points to entry 1, not 0 */ sg_index = sg - &scb->sg_list[1]; return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list))); } static uint32_t ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) { return (ahc->scb_data->hscb_busaddr + (sizeof(struct hardware_scb) * index)); } static void ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op) { ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat, ahc->scb_data->hscb_dmamap, /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb), /*len*/sizeof(*scb->hscb), op); } void ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op) { if (scb->sg_count == 0) return; ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap, /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr) * sizeof(struct ahc_dma_seg), /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op); } #ifdef AHC_TARGET_MODE static uint32_t ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index) { return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo); } #endif /*********************** Miscellaneous Support Functions ***********************/ /* * Determine whether the sequencer reported a residual * for this SCB/transaction. */ static void ahc_update_residual(struct ahc_softc *ahc, struct scb *scb) { uint32_t sgptr; sgptr = ahc_le32toh(scb->hscb->sgptr); if ((sgptr & SG_RESID_VALID) != 0) ahc_calc_residual(ahc, scb); } /* * Return pointers to the transfer negotiation information * for the specified our_id/remote_id pair. */ struct ahc_initiator_tinfo * ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, u_int remote_id, struct ahc_tmode_tstate **tstate) { /* * Transfer data structures are stored from the perspective * of the target role. Since the parameters for a connection * in the initiator role to a given target are the same as * when the roles are reversed, we pretend we are the target. */ if (channel == 'B') our_id += 8; *tstate = ahc->enabled_targets[our_id]; return (&(*tstate)->transinfo[remote_id]); } uint16_t ahc_inw(struct ahc_softc *ahc, u_int port) { uint16_t r = ahc_inb(ahc, port+1) << 8; return r | ahc_inb(ahc, port); } void ahc_outw(struct ahc_softc *ahc, u_int port, u_int value) { ahc_outb(ahc, port, value & 0xFF); ahc_outb(ahc, port+1, (value >> 8) & 0xFF); } uint32_t ahc_inl(struct ahc_softc *ahc, u_int port) { return ((ahc_inb(ahc, port)) | (ahc_inb(ahc, port+1) << 8) | (ahc_inb(ahc, port+2) << 16) | (ahc_inb(ahc, port+3) << 24)); } void ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value) { ahc_outb(ahc, port, (value) & 0xFF); ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF); ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF); ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF); } uint64_t ahc_inq(struct ahc_softc *ahc, u_int port) { return ((ahc_inb(ahc, port)) | (ahc_inb(ahc, port+1) << 8) | (ahc_inb(ahc, port+2) << 16) | (ahc_inb(ahc, port+3) << 24) | (((uint64_t)ahc_inb(ahc, port+4)) << 32) | (((uint64_t)ahc_inb(ahc, port+5)) << 40) | (((uint64_t)ahc_inb(ahc, port+6)) << 48) | (((uint64_t)ahc_inb(ahc, port+7)) << 56)); } void ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value) { ahc_outb(ahc, port, value & 0xFF); ahc_outb(ahc, port+1, (value >> 8) & 0xFF); ahc_outb(ahc, port+2, (value >> 16) & 0xFF); ahc_outb(ahc, port+3, (value >> 24) & 0xFF); ahc_outb(ahc, port+4, (value >> 32) & 0xFF); ahc_outb(ahc, port+5, (value >> 40) & 0xFF); ahc_outb(ahc, port+6, (value >> 48) & 0xFF); ahc_outb(ahc, port+7, (value >> 56) & 0xFF); } /* * Get a free scb. If there are none, see if we can allocate a new SCB. */ struct scb * ahc_get_scb(struct ahc_softc *ahc) { struct scb *scb; if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) { ahc_alloc_scbs(ahc); scb = SLIST_FIRST(&ahc->scb_data->free_scbs); if (scb == NULL) return (NULL); } SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle); return (scb); } /* * Return an SCB resource to the free list. */ void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *hscb; hscb = scb->hscb; /* Clean up for the next user */ ahc->scb_data->scbindex[hscb->tag] = NULL; scb->flags = SCB_FREE; hscb->control = 0; SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle); /* Notify the OSM that a resource is now available. */ ahc_platform_scb_free(ahc, scb); } struct scb * ahc_lookup_scb(struct ahc_softc *ahc, u_int tag) { struct scb* scb; scb = ahc->scb_data->scbindex[tag]; if (scb != NULL) ahc_sync_scb(ahc, scb, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); return (scb); } static void ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *q_hscb; u_int saved_tag; /* * Our queuing method is a bit tricky. The card * knows in advance which HSCB to download, and we * can't disappoint it. To achieve this, the next * SCB to download is saved off in ahc->next_queued_scb. * When we are called to queue "an arbitrary scb", * we copy the contents of the incoming HSCB to the one * the sequencer knows about, swap HSCB pointers and * finally assign the SCB to the tag indexed location * in the scb_array. This makes sure that we can still * locate the correct SCB by SCB_TAG. */ q_hscb = ahc->next_queued_scb->hscb; saved_tag = q_hscb->tag; memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); if ((scb->flags & SCB_CDB32_PTR) != 0) { q_hscb->shared_data.cdb_ptr = ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag) + offsetof(struct hardware_scb, cdb32)); } q_hscb->tag = saved_tag; q_hscb->next = scb->hscb->tag; /* Now swap HSCB pointers. */ ahc->next_queued_scb->hscb = scb->hscb; scb->hscb = q_hscb; /* Now define the mapping from tag to SCB in the scbindex */ ahc->scb_data->scbindex[scb->hscb->tag] = scb; } /* * Tell the sequencer about a new transaction to execute. */ void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb) { ahc_swap_with_next_hscb(ahc, scb); if (scb->hscb->tag == SCB_LIST_NULL || scb->hscb->next == SCB_LIST_NULL) panic("Attempt to queue invalid SCB tag %x:%x\n", scb->hscb->tag, scb->hscb->next); /* * Setup data "oddness". */ scb->hscb->lun &= LID; if (ahc_get_transfer_length(scb) & 0x1) scb->hscb->lun |= SCB_XFERLEN_ODD; /* * Keep a history of SCBs we've downloaded in the qinfifo. */ ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; /* * Make sure our data is consistent from the * perspective of the adapter. */ ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* Tell the adapter about the newly queued SCB */ if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { if ((ahc->features & AHC_AUTOPAUSE) == 0) ahc_pause(ahc); ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); if ((ahc->features & AHC_AUTOPAUSE) == 0) ahc_unpause(ahc); } } struct scsi_sense_data * ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb) { int offset; offset = scb - ahc->scb_data->scbarray; return (&ahc->scb_data->sense[offset]); } static uint32_t ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb) { int offset; offset = scb - ahc->scb_data->scbarray; return (ahc->scb_data->sense_busaddr + (offset * sizeof(struct scsi_sense_data))); } /************************** Interrupt Processing ******************************/ static void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op) { ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/0, /*len*/256, op); } static void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op) { #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0) { ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, 0), sizeof(struct target_cmd) * AHC_TMODE_CMDS, op); } #endif } /* * See if the firmware has posted any completed commands * into our in-core command complete fifos. */ #define AHC_RUN_QOUTFIFO 0x1 #define AHC_RUN_TQINFIFO 0x2 static u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc) { u_int retval; retval = 0; ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/ahc->qoutfifonext, /*len*/1, BUS_DMASYNC_POSTREAD); if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) retval |= AHC_RUN_QOUTFIFO; #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0 && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) { ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, ahc->tqinfifofnext), /*len*/sizeof(struct target_cmd), BUS_DMASYNC_POSTREAD); if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0) retval |= AHC_RUN_TQINFIFO; } #endif return (retval); } /* * Catch an interrupt from the adapter */ int ahc_intr(struct ahc_softc *ahc) { u_int intstat; if ((ahc->pause & INTEN) == 0) { /* * Our interrupt is not enabled on the chip * and may be disabled for re-entrancy reasons, * so just return. This is likely just a shared * interrupt. */ return (0); } /* * Instead of directly reading the interrupt status register, * infer the cause of the interrupt by checking our in-core * completion queues. This avoids a costly PCI bus read in * most cases. */ if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0 && (ahc_check_cmdcmpltqueues(ahc) != 0)) intstat = CMDCMPLT; else { intstat = ahc_inb(ahc, INTSTAT); } if ((intstat & INT_PEND) == 0) { #if AHC_PCI_CONFIG > 0 if (ahc->unsolicited_ints > 500) { ahc->unsolicited_ints = 0; if ((ahc->chip & AHC_PCI) != 0 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) ahc->bus_intr(ahc); } #endif ahc->unsolicited_ints++; return (0); } ahc->unsolicited_ints = 0; if (intstat & CMDCMPLT) { ahc_outb(ahc, CLRINT, CLRCMDINT); /* * Ensure that the chip sees that we've cleared * this interrupt before we walk the output fifo. * Otherwise, we may, due to posted bus writes, * clear the interrupt after we finish the scan, * and after the sequencer has added new entries * and asserted the interrupt again. */ ahc_flush_device_writes(ahc); ahc_run_qoutfifo(ahc); #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0) ahc_run_tqinfifo(ahc, /*paused*/FALSE); #endif } /* * Handle statuses that may invalidate our cached * copy of INTSTAT separately. */ if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) { /* Hot eject. Do nothing */ } else if (intstat & BRKADRINT) { ahc_handle_brkadrint(ahc); } else if ((intstat & (SEQINT|SCSIINT)) != 0) { ahc_pause_bug_fix(ahc); if ((intstat & SEQINT) != 0) ahc_handle_seqint(ahc, intstat); if ((intstat & SCSIINT) != 0) ahc_handle_scsiint(ahc, intstat); } return (1); } /************************* Sequencer Execution Control ************************/ /* * Restart the sequencer program from address zero */ static void ahc_restart(struct ahc_softc *ahc) { uint8_t sblkctl; ahc_pause(ahc); /* No more pending messages. */ ahc_clear_msg_state(ahc); ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); ahc_outb(ahc, LASTPHASE, P_BUSFREE); ahc_outb(ahc, SAVED_SCSIID, 0xFF); ahc_outb(ahc, SAVED_LUN, 0xFF); /* * Ensure that the sequencer's idea of TQINPOS * matches our own. The sequencer increments TQINPOS * only after it sees a DMA complete and a reset could * occur before the increment leaving the kernel to believe * the command arrived but the sequencer to not. */ ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); /* Always allow reselection */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Ensure that no DMA operations are in progress */ ahc_outb(ahc, CCSCBCNT, 0); ahc_outb(ahc, CCSGCTL, 0); ahc_outb(ahc, CCSCBCTL, 0); } /* * If we were in the process of DMA'ing SCB data into * an SCB, replace that SCB on the free list. This prevents * an SCB leak. */ if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { ahc_add_curscb_to_free_list(ahc); ahc_outb(ahc, SEQ_FLAGS2, ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); } /* * Clear any pending sequencer interrupt. It is no * longer relevant since we're resetting the Program * Counter. */ ahc_outb(ahc, CLRINT, CLRSEQINT); ahc_outb(ahc, MWI_RESIDUAL, 0); ahc_outb(ahc, SEQCTL, ahc->seqctl); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); /* * Take the LED out of diagnostic mode on PM resume, too */ sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON))); ahc_unpause(ahc); } /************************* Input/Output Queues ********************************/ static void ahc_run_qoutfifo(struct ahc_softc *ahc) { struct scb *scb; u_int scb_index; ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { scb_index = ahc->qoutfifo[ahc->qoutfifonext]; if ((ahc->qoutfifonext & 0x03) == 0x03) { u_int modnext; /* * Clear 32bits of QOUTFIFO at a time * so that we don't clobber an incoming * byte DMA to the array on architectures * that only support 32bit load and store * operations. */ modnext = ahc->qoutfifonext & ~0x3; *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/modnext, /*len*/4, BUS_DMASYNC_PREREAD); } ahc->qoutfifonext++; scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printk("%s: WARNING no command for scb %d " "(cmdcmplt)\nQOUTPOS = %d\n", ahc_name(ahc), scb_index, (ahc->qoutfifonext - 1) & 0xFF); continue; } /* * Save off the residual * if there is one. */ ahc_update_residual(ahc, scb); ahc_done(ahc, scb); } } static void ahc_run_untagged_queues(struct ahc_softc *ahc) { int i; for (i = 0; i < 16; i++) ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); } static void ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) { struct scb *scb; if (ahc->untagged_queue_lock != 0) return; if ((scb = TAILQ_FIRST(queue)) != NULL && (scb->flags & SCB_ACTIVE) == 0) { scb->flags |= SCB_ACTIVE; ahc_queue_scb(ahc, scb); } } /************************* Interrupt Handling *********************************/ static void ahc_handle_brkadrint(struct ahc_softc *ahc) { /* * We upset the sequencer :-( * Lookup the error message */ int i; int error; error = ahc_inb(ahc, ERROR); for (i = 0; error != 1 && i < num_errors; i++) error >>= 1; printk("%s: brkadrint, %s at seqaddr = 0x%x\n", ahc_name(ahc), ahc_hard_errors[i].errmesg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); ahc_dump_card_state(ahc); /* Tell everyone that this HBA is no longer available */ ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_NO_HBA); /* Disable all interrupt sources by resetting the controller */ ahc_shutdown(ahc); } static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) { struct scb *scb; struct ahc_devinfo devinfo; ahc_fetch_devinfo(ahc, &devinfo); /* * Clear the upper byte that holds SEQINT status * codes and clear the SEQINT bit. We will unpause * the sequencer, if appropriate, after servicing * the request. */ ahc_outb(ahc, CLRINT, CLRSEQINT); switch (intstat & SEQINT_MASK) { case BAD_STATUS: { u_int scb_index; struct hardware_scb *hscb; /* * Set the default return value to 0 (don't * send sense). The sense code will change * this if needed. */ ahc_outb(ahc, RETURN_1, 0); /* * The sequencer will notify us when a command * has an error that would be of interest to * the kernel. This allows us to leave the sequencer * running in the common case of command completes * without error. The sequencer will already have * dma'd the SCB back up to us, so we can reference * the in kernel copy directly. */ scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { ahc_print_devinfo(ahc, &devinfo); printk("ahc_intr - referenced scb " "not valid during seqint 0x%x scb(%d)\n", intstat, scb_index); ahc_dump_card_state(ahc); panic("for safety"); goto unpause; } hscb = scb->hscb; /* Don't want to clobber the original sense code */ if ((scb->flags & SCB_SENSE) != 0) { /* * Clear the SCB_SENSE Flag and have * the sequencer do a normal command * complete. */ scb->flags &= ~SCB_SENSE; ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); break; } ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); /* Freeze the queue until the client sees the error. */ ahc_freeze_devq(ahc, scb); ahc_freeze_scb(scb); ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); switch (hscb->shared_data.status.scsi_status) { case SCSI_STATUS_OK: printk("%s: Interrupted for staus of 0???\n", ahc_name(ahc)); break; case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_CHECK_COND: { struct ahc_dma_seg *sg; struct scsi_sense *sc; struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; struct ahc_transinfo *tinfo; #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_SENSE) { ahc_print_path(ahc, scb); printk("SCB %d: requests Check Status\n", scb->hscb->tag); } #endif if (ahc_perform_autosense(scb) == 0) break; targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; sg = scb->sg_list; sc = (struct scsi_sense *)(&hscb->shared_data.cdb); /* * Save off the residual if there is one. */ ahc_update_residual(ahc, scb); #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_SENSE) { ahc_print_path(ahc, scb); printk("Sending Sense\n"); } #endif sg->addr = ahc_get_sense_bufaddr(ahc, scb); sg->len = ahc_get_sense_bufsize(ahc, scb); sg->len |= AHC_DMA_LAST_SEG; /* Fixup byte order */ sg->addr = ahc_htole32(sg->addr); sg->len = ahc_htole32(sg->len); sc->opcode = REQUEST_SENSE; sc->byte2 = 0; if (tinfo->protocol_version <= SCSI_REV_2 && SCB_GET_LUN(scb) < 8) sc->byte2 = SCB_GET_LUN(scb) << 5; sc->unused[0] = 0; sc->unused[1] = 0; sc->length = sg->len; sc->control = 0; /* * We can't allow the target to disconnect. * This will be an untagged transaction and * having the target disconnect will make this * transaction indestinguishable from outstanding * tagged transactions. */ hscb->control = 0; /* * This request sense could be because the * the device lost power or in some other * way has lost our transfer negotiations. * Renegotiate if appropriate. Unit attention * errors will be reported before any data * phases occur. */ if (ahc_get_residual(scb) == ahc_get_transfer_length(scb)) { ahc_update_neg_request(ahc, &devinfo, tstate, targ_info, AHC_NEG_IF_NON_ASYNC); } if (tstate->auto_negotiate & devinfo.target_mask) { hscb->control |= MK_MESSAGE; scb->flags &= ~SCB_NEGOTIATE; scb->flags |= SCB_AUTO_NEGOTIATE; } hscb->cdb_len = sizeof(*sc); hscb->dataptr = sg->addr; hscb->datacnt = sg->len; hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; hscb->sgptr = ahc_htole32(hscb->sgptr); scb->sg_count = 1; scb->flags |= SCB_SENSE; ahc_qinfifo_requeue_tail(ahc, scb); ahc_outb(ahc, RETURN_1, SEND_SENSE); /* * Ensure we have enough time to actually * retrieve the sense. */ ahc_scb_timer_reset(scb, 5 * 1000000); break; } default: break; } break; } case NO_MATCH: { /* Ensure we don't leave the selection hardware on */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); printk("%s:%c:%d: no active SCB for reconnecting " "target - issuing BUS DEVICE RESET\n", ahc_name(ahc), devinfo.channel, devinfo.target); printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "ARG_1 == 0x%x ACCUM = 0x%x\n", ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), ahc_index_busy_tcl(ahc, BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN))), ahc_inb(ahc, SINDEX)); printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), ahc_inb(ahc, SCB_CONTROL)); printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); printk("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); printk("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); ahc_dump_card_state(ahc); ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; ahc->msgout_len = 1; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; ahc_outb(ahc, MSG_OUT, HOST_MSG); ahc_assert_atn(ahc); break; } case SEND_REJECT: { u_int rejbyte = ahc_inb(ahc, ACCUM); printk("%s:%c:%d: Warning - unknown message received from " "target (0x%x). Rejecting\n", ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); break; } case PROTO_VIOLATION: { ahc_handle_proto_violation(ahc); break; } case IGN_WIDE_RES: ahc_handle_ign_wide_residue(ahc, &devinfo); break; case PDATA_REINIT: ahc_reinitialize_dataptrs(ahc); break; case BAD_PHASE: { u_int lastphase; lastphase = ahc_inb(ahc, LASTPHASE); printk("%s:%c:%d: unknown scsi bus phase %x, " "lastphase = 0x%x. Attempting to continue\n", ahc_name(ahc), devinfo.channel, devinfo.target, lastphase, ahc_inb(ahc, SCSISIGI)); break; } case MISSED_BUSFREE: { u_int lastphase; lastphase = ahc_inb(ahc, LASTPHASE); printk("%s:%c:%d: Missed busfree. " "Lastphase = 0x%x, Curphase = 0x%x\n", ahc_name(ahc), devinfo.channel, devinfo.target, lastphase, ahc_inb(ahc, SCSISIGI)); ahc_restart(ahc); return; } case HOST_MSG_LOOP: { /* * The sequencer has encountered a message phase * that requires host assistance for completion. * While handling the message phase(s), we will be * notified by the sequencer after each byte is * transferred so we can track bus phase changes. * * If this is the first time we've seen a HOST_MSG_LOOP * interrupt, initialize the state of the host message * loop. */ if (ahc->msg_type == MSG_TYPE_NONE) { struct scb *scb; u_int scb_index; u_int bus_phase; bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; if (bus_phase != P_MESGIN && bus_phase != P_MESGOUT) { printk("ahc_intr: HOST_MSG_LOOP bad " "phase 0x%x\n", bus_phase); /* * Probably transitioned to bus free before * we got here. Just punt the message. */ ahc_clear_intstat(ahc); ahc_restart(ahc); return; } scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (devinfo.role == ROLE_INITIATOR) { if (bus_phase == P_MESGOUT) { if (scb == NULL) panic("HOST_MSG_LOOP with " "invalid SCB %x\n", scb_index); ahc_setup_initiator_msgout(ahc, &devinfo, scb); } else { ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahc->msgin_index = 0; } } #ifdef AHC_TARGET_MODE else { if (bus_phase == P_MESGOUT) { ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; ahc->msgin_index = 0; } else ahc_setup_target_msgin(ahc, &devinfo, scb); } #endif } ahc_handle_message_phase(ahc); break; } case PERR_DETECTED: { /* * If we've cleared the parity error interrupt * but the sequencer still believes that SCSIPERR * is true, it must be that the parity error is * for the currently presented byte on the bus, * and we are not in a phase (data-in) where we will * eventually ack this byte. Ack the byte and * throw it away in the hope that the target will * take us to message out to deliver the appropriate * error message. */ if ((intstat & SCSIINT) == 0 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { if ((ahc->features & AHC_DT) == 0) { u_int curphase; /* * The hardware will only let you ack bytes * if the expected phase in SCSISIGO matches * the current phase. Make sure this is * currently the case. */ curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; ahc_outb(ahc, LASTPHASE, curphase); ahc_outb(ahc, SCSISIGO, curphase); } if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) { int wait; /* * In a data phase. Faster to bitbucket * the data than to individually ack each * byte. This is also the only strategy * that will work with AUTOACK enabled. */ ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) | BITBUCKET); wait = 5000; while (--wait != 0) { if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) != 0) break; ahc_delay(100); } ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); if (wait == 0) { struct scb *scb; u_int scb_index; ahc_print_devinfo(ahc, &devinfo); printk("Unable to clear parity error. " "Resetting bus.\n"); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb != NULL) ahc_set_transaction_status(scb, CAM_UNCOR_PARITY); ahc_reset_channel(ahc, devinfo.channel, /*init reset*/TRUE); } } else { ahc_inb(ahc, SCSIDATL); } } break; } case DATA_OVERRUN: { /* * When the sequencer detects an overrun, it * places the controller in "BITBUCKET" mode * and allows the target to complete its transfer. * Unfortunately, none of the counters get updated * when the controller is in this mode, so we have * no way of knowing how large the overrun was. */ u_int scbindex = ahc_inb(ahc, SCB_TAG); u_int lastphase = ahc_inb(ahc, LASTPHASE); u_int i; scb = ahc_lookup_scb(ahc, scbindex); for (i = 0; i < num_phases; i++) { if (lastphase == ahc_phase_table[i].phase) break; } ahc_print_path(ahc, scb); printk("data overrun detected %s." " Tag == 0x%x.\n", ahc_phase_table[i].phasemsg, scb->hscb->tag); ahc_print_path(ahc, scb); printk("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", ahc_get_transfer_length(scb), scb->sg_count); if (scb->sg_count > 0) { for (i = 0; i < scb->sg_count; i++) { printk("sg[%d] - Addr 0x%x%x : Length %d\n", i, (ahc_le32toh(scb->sg_list[i].len) >> 24 & SG_HIGH_ADDR_BITS), ahc_le32toh(scb->sg_list[i].addr), ahc_le32toh(scb->sg_list[i].len) & AHC_SG_LEN_MASK); } } /* * Set this and it will take effect when the * target does a command complete. */ ahc_freeze_devq(ahc, scb); if ((scb->flags & SCB_SENSE) == 0) { ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); } else { scb->flags &= ~SCB_SENSE; ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); } ahc_freeze_scb(scb); if ((ahc->features & AHC_ULTRA2) != 0) { /* * Clear the channel in case we return * to data phase later. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); } if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { u_int dscommand1; /* Ensure HHADDR is 0 for future DMA operations. */ dscommand1 = ahc_inb(ahc, DSCOMMAND1); ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); ahc_outb(ahc, HADDR, 0); ahc_outb(ahc, DSCOMMAND1, dscommand1); } break; } case MKMSG_FAILED: { u_int scbindex; printk("%s:%c:%d:%d: Attempt to issue message failed\n", ahc_name(ahc), devinfo.channel, devinfo.target, devinfo.lun); scbindex = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scbindex); if (scb != NULL && (scb->flags & SCB_RECOVERY_SCB) != 0) /* * Ensure that we didn't put a second instance of this * SCB into the QINFIFO. */ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), scb->hscb->tag, ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); break; } case NO_FREE_SCB: { printk("%s: No free or disconnected SCBs\n", ahc_name(ahc)); ahc_dump_card_state(ahc); panic("for safety"); break; } case SCB_MISMATCH: { u_int scbptr; scbptr = ahc_inb(ahc, SCBPTR); printk("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", scbptr, ahc_inb(ahc, ARG_1), ahc->scb_data->hscbs[scbptr].tag); ahc_dump_card_state(ahc); panic("for safety"); break; } case OUT_OF_RANGE: { printk("%s: BTT calculation out of range\n", ahc_name(ahc)); printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "ARG_1 == 0x%x ACCUM = 0x%x\n", ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n, A == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), ahc_index_busy_tcl(ahc, BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN))), ahc_inb(ahc, SINDEX), ahc_inb(ahc, ACCUM)); printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), ahc_inb(ahc, SCB_CONTROL)); printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); ahc_dump_card_state(ahc); panic("for safety"); break; } default: printk("ahc_intr: seqint, " "intstat == 0x%x, scsisigi = 0x%x\n", intstat, ahc_inb(ahc, SCSISIGI)); break; } unpause: /* * The sequencer is paused immediately on * a SEQINT, so we should restart it when * we're done. */ ahc_unpause(ahc); } static void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) { u_int scb_index; u_int status0; u_int status; struct scb *scb; char cur_channel; char intr_channel; if ((ahc->features & AHC_TWIN) != 0 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) cur_channel = 'B'; else cur_channel = 'A'; intr_channel = cur_channel; if ((ahc->features & AHC_ULTRA2) != 0) status0 = ahc_inb(ahc, SSTAT0) & IOERR; else status0 = 0; status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); if (status == 0 && status0 == 0) { if ((ahc->features & AHC_TWIN) != 0) { /* Try the other channel */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); intr_channel = (cur_channel == 'A') ? 'B' : 'A'; } if (status == 0) { printk("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_unpause(ahc); return; } } /* Make sure the sequencer is in a safe location. */ ahc_clear_critical_section(ahc); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb != NULL && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; if ((ahc->features & AHC_ULTRA2) != 0 && (status0 & IOERR) != 0) { int now_lvd; now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; printk("%s: Transceiver State Has Changed to %s mode\n", ahc_name(ahc), now_lvd ? "LVD" : "SE"); ahc_outb(ahc, CLRSINT0, CLRIOERR); /* * When transitioning to SE mode, the reset line * glitches, triggering an arbitration bug in some * Ultra2 controllers. This bug is cleared when we * assert the reset line. Since a reset glitch has * already occurred with this transition and a * transceiver state change is handled just like * a bus reset anyway, asserting the reset line * ourselves is safe. */ ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/now_lvd == 0); } else if ((status & SCSIRSTI) != 0) { printk("%s: Someone reset channel %c\n", ahc_name(ahc), intr_channel); if (intr_channel != cur_channel) ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); } else if ((status & SCSIPERR) != 0) { /* * Determine the bus phase and queue an appropriate message. * SCSIPERR is latched true as soon as a parity error * occurs. If the sequencer acked the transfer that * caused the parity error and the currently presented * transfer on the bus has correct parity, SCSIPERR will * be cleared by CLRSCSIPERR. Use this to determine if * we should look at the last phase the sequencer recorded, * or the current phase presented on the bus. */ struct ahc_devinfo devinfo; u_int mesg_out; u_int curphase; u_int errorphase; u_int lastphase; u_int scsirate; u_int i; u_int sstat2; int silent; lastphase = ahc_inb(ahc, LASTPHASE); curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; sstat2 = ahc_inb(ahc, SSTAT2); ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); /* * For all phases save DATA, the sequencer won't * automatically ack a byte that has a parity error * in it. So the only way that the current phase * could be 'data-in' is if the parity error is for * an already acked byte in the data phase. During * synchronous data-in transfers, we may actually * ack bytes before latching the current phase in * LASTPHASE, leading to the discrepancy between * curphase and lastphase. */ if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 || curphase == P_DATAIN || curphase == P_DATAIN_DT) errorphase = curphase; else errorphase = lastphase; for (i = 0; i < num_phases; i++) { if (errorphase == ahc_phase_table[i].phase) break; } mesg_out = ahc_phase_table[i].mesg_out; silent = FALSE; if (scb != NULL) { if (SCB_IS_SILENT(scb)) silent = TRUE; else ahc_print_path(ahc, scb); scb->flags |= SCB_TRANSMISSION_ERROR; } else printk("%s:%c:%d: ", ahc_name(ahc), intr_channel, SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); scsirate = ahc_inb(ahc, SCSIRATE); if (silent == FALSE) { printk("parity error detected %s. " "SEQADDR(0x%x) SCSIRATE(0x%x)\n", ahc_phase_table[i].phasemsg, ahc_inw(ahc, SEQADDR0), scsirate); if ((ahc->features & AHC_DT) != 0) { if ((sstat2 & CRCVALERR) != 0) printk("\tCRC Value Mismatch\n"); if ((sstat2 & CRCENDERR) != 0) printk("\tNo terminal CRC packet " "recevied\n"); if ((sstat2 & CRCREQERR) != 0) printk("\tIllegal CRC packet " "request\n"); if ((sstat2 & DUAL_EDGE_ERR) != 0) printk("\tUnexpected %sDT Data Phase\n", (scsirate & SINGLE_EDGE) ? "" : "non-"); } } if ((ahc->features & AHC_DT) != 0 && (sstat2 & DUAL_EDGE_ERR) != 0) { /* * This error applies regardless of * data direction, so ignore the value * in the phase table. */ mesg_out = MSG_INITIATOR_DET_ERR; } /* * We've set the hardware to assert ATN if we * get a parity error on "in" phases, so all we * need to do is stuff the message buffer with * the appropriate message. "In" phases have set * mesg_out to something other than MSG_NOP. */ if (mesg_out != MSG_NOOP) { if (ahc->msg_type != MSG_TYPE_NONE) ahc->send_msg_perror = TRUE; else ahc_outb(ahc, MSG_OUT, mesg_out); } /* * Force a renegotiation with this target just in * case we are out of sync for some external reason * unknown (or unreported) by the target. */ ahc_fetch_devinfo(ahc, &devinfo); ahc_force_renegotiation(ahc, &devinfo); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_unpause(ahc); } else if ((status & SELTO) != 0) { u_int scbptr; /* Stop the selection */ ahc_outb(ahc, SCSISEQ, 0); /* No more pending messages */ ahc_clear_msg_state(ahc); /* Clear interrupt state */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); /* * Although the driver does not care about the * 'Selection in Progress' status bit, the busy * LED does. SELINGO is only cleared by a successful * selection, so we must manually clear it to insure * the LED turns off just incase no future successful * selections occur (e.g. no devices on the bus). */ ahc_outb(ahc, CLRSINT0, CLRSELINGO); scbptr = ahc_inb(ahc, WAITING_SCBH); ahc_outb(ahc, SCBPTR, scbptr); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printk("%s: ahc_intr - referenced scb not " "valid during SELTO scb(%d, %d)\n", ahc_name(ahc), scbptr, scb_index); ahc_dump_card_state(ahc); } else { struct ahc_devinfo devinfo; #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_SELTO) != 0) { ahc_print_path(ahc, scb); printk("Saw Selection Timeout for SCB 0x%x\n", scb_index); } #endif ahc_scb_devinfo(ahc, &devinfo, scb); ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); ahc_freeze_devq(ahc, scb); /* * Cancel any pending transactions on the device * now that it seems to be missing. This will * also revert us to async/narrow transfers until * we can renegotiate with the device. */ ahc_handle_devreset(ahc, &devinfo, CAM_SEL_TIMEOUT, "Selection Timeout", /*verbose_level*/1); } ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_restart(ahc); } else if ((status & BUSFREE) != 0 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { struct ahc_devinfo devinfo; u_int lastphase; u_int saved_scsiid; u_int saved_lun; u_int target; u_int initiator_role_id; char channel; int printerror; /* * Clear our selection hardware as soon as possible. * We may have an entry in the waiting Q for this target, * that is affected by this busfree and we don't want to * go about selecting the target while we handle the event. */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); /* * Disable busfree interrupts and clear the busfree * interrupt status. We do this here so that several * bus transactions occur prior to clearing the SCSIINT * latch. It can take a bit for the clearing to take effect. */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); /* * Look at what phase we were last in. * If its message out, chances are pretty good * that the busfree was in response to one of * our abort requests. */ lastphase = ahc_inb(ahc, LASTPHASE); saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); saved_lun = ahc_inb(ahc, SAVED_LUN); target = SCSIID_TARGET(ahc, saved_scsiid); initiator_role_id = SCSIID_OUR_ID(saved_scsiid); channel = SCSIID_CHANNEL(ahc, saved_scsiid); ahc_compile_devinfo(&devinfo, initiator_role_id, target, saved_lun, channel, ROLE_INITIATOR); printerror = 1; if (lastphase == P_MESGOUT) { u_int tag; tag = SCB_LIST_NULL; if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { if (ahc->msgout_buf[ahc->msgout_index - 1] == MSG_ABORT_TAG) tag = scb->hscb->tag; ahc_print_path(ahc, scb); printk("SCB %d - Abort%s Completed.\n", scb->hscb->tag, tag == SCB_LIST_NULL ? "" : " Tag"); ahc_abort_scbs(ahc, target, channel, saved_lun, tag, ROLE_INITIATOR, CAM_REQ_ABORTED); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_BUS_DEV_RESET, TRUE)) { #ifdef __FreeBSD__ /* * Don't mark the user's request for this BDR * as completing with CAM_BDR_SENT. CAM3 * specifies CAM_REQ_CMP. */ if (scb != NULL && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV && ahc_match_scb(ahc, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_INITIATOR)) { ahc_set_transaction_status(scb, CAM_REQ_CMP); } #endif ahc_compile_devinfo(&devinfo, initiator_role_id, target, CAM_LUN_WILDCARD, channel, ROLE_INITIATOR); ahc_handle_devreset(ahc, &devinfo, CAM_BDR_SENT, "Bus Device Reset", /*verbose_level*/0); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, FALSE)) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; /* * PPR Rejected. Try non-ppr negotiation * and retry command. */ tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; tinfo->goal.ppr_options = 0; ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, FALSE)) { /* * Negotiation Rejected. Go-narrow and * retry command. */ ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, FALSE)) { /* * Negotiation Rejected. Go-async and * retry command. */ ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } } if (printerror != 0) { u_int i; if (scb != NULL) { u_int tag; if ((scb->hscb->control & TAG_ENB) != 0) tag = scb->hscb->tag; else tag = SCB_LIST_NULL; ahc_print_path(ahc, scb); ahc_abort_scbs(ahc, target, channel, SCB_GET_LUN(scb), tag, ROLE_INITIATOR, CAM_UNEXP_BUSFREE); } else { /* * We had not fully identified this connection, * so we cannot abort anything. */ printk("%s: ", ahc_name(ahc)); } for (i = 0; i < num_phases; i++) { if (lastphase == ahc_phase_table[i].phase) break; } if (lastphase != P_BUSFREE) { /* * Renegotiate with this device at the * next opportunity just in case this busfree * is due to a negotiation mismatch with the * device. */ ahc_force_renegotiation(ahc, &devinfo); } printk("Unexpected busfree %s\n" "SEQADDR == 0x%x\n", ahc_phase_table[i].phasemsg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); } ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_restart(ahc); } else { printk("%s: Missing case in ahc_handle_scsiint. status = %x\n", ahc_name(ahc), status); ahc_outb(ahc, CLRINT, CLRSCSIINT); } } /* * Force renegotiation to occur the next time we initiate * a command to the current device. */ static void ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; targ_info = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); ahc_update_neg_request(ahc, devinfo, tstate, targ_info, AHC_NEG_IF_NON_ASYNC); } #define AHC_MAX_STEPS 2000 static void ahc_clear_critical_section(struct ahc_softc *ahc) { int stepping; int steps; u_int simode0; u_int simode1; if (ahc->num_critical_sections == 0) return; stepping = FALSE; steps = 0; simode0 = 0; simode1 = 0; for (;;) { struct cs *cs; u_int seqaddr; u_int i; seqaddr = ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8); /* * Seqaddr represents the next instruction to execute, * so we are really executing the instruction just * before it. */ if (seqaddr != 0) seqaddr -= 1; cs = ahc->critical_sections; for (i = 0; i < ahc->num_critical_sections; i++, cs++) { if (cs->begin < seqaddr && cs->end >= seqaddr) break; } if (i == ahc->num_critical_sections) break; if (steps > AHC_MAX_STEPS) { printk("%s: Infinite loop in critical section\n", ahc_name(ahc)); ahc_dump_card_state(ahc); panic("critical section loop"); } steps++; if (stepping == FALSE) { /* * Disable all interrupt sources so that the * sequencer will not be stuck by a pausing * interrupt condition while we attempt to * leave a critical section. */ simode0 = ahc_inb(ahc, SIMODE0); ahc_outb(ahc, SIMODE0, 0); simode1 = ahc_inb(ahc, SIMODE1); if ((ahc->features & AHC_DT) != 0) /* * On DT class controllers, we * use the enhanced busfree logic. * Unfortunately we cannot re-enable * busfree detection within the * current connection, so we must * leave it on while single stepping. */ ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE); else ahc_outb(ahc, SIMODE1, 0); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP); stepping = TRUE; } if ((ahc->features & AHC_DT) != 0) { ahc_outb(ahc, CLRSINT1, CLRBUSFREE); ahc_outb(ahc, CLRINT, CLRSCSIINT); } ahc_outb(ahc, HCNTRL, ahc->unpause); while (!ahc_is_paused(ahc)) ahc_delay(200); } if (stepping) { ahc_outb(ahc, SIMODE0, simode0); ahc_outb(ahc, SIMODE1, simode1); ahc_outb(ahc, SEQCTL, ahc->seqctl); } } /* * Clear any pending interrupt status. */ static void ahc_clear_intstat(struct ahc_softc *ahc) { /* Clear any interrupt conditions this may have caused */ ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| CLRREQINIT); ahc_flush_device_writes(ahc); ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); ahc_flush_device_writes(ahc); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_flush_device_writes(ahc); } /**************************** Debugging Routines ******************************/ #ifdef AHC_DEBUG uint32_t ahc_debug = AHC_DEBUG_OPTS; #endif #if 0 /* unused */ static void ahc_print_scb(struct scb *scb) { int i; struct hardware_scb *hscb = scb->hscb; printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", (void *)scb, hscb->control, hscb->scsiid, hscb->lun, hscb->cdb_len); printk("Shared Data: "); for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) printk("%#02x", hscb->shared_data.cdb[i]); printk(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", ahc_le32toh(hscb->dataptr), ahc_le32toh(hscb->datacnt), ahc_le32toh(hscb->sgptr), hscb->tag); if (scb->sg_count > 0) { for (i = 0; i < scb->sg_count; i++) { printk("sg[%d] - Addr 0x%x%x : Length %d\n", i, (ahc_le32toh(scb->sg_list[i].len) >> 24 & SG_HIGH_ADDR_BITS), ahc_le32toh(scb->sg_list[i].addr), ahc_le32toh(scb->sg_list[i].len)); } } } #endif /************************* Transfer Negotiation *******************************/ /* * Allocate per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static struct ahc_tmode_tstate * ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) { struct ahc_tmode_tstate *master_tstate; struct ahc_tmode_tstate *tstate; int i; master_tstate = ahc->enabled_targets[ahc->our_id]; if (channel == 'B') { scsi_id += 8; master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; } if (ahc->enabled_targets[scsi_id] != NULL && ahc->enabled_targets[scsi_id] != master_tstate) panic("%s: ahc_alloc_tstate - Target already allocated", ahc_name(ahc)); tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC); if (tstate == NULL) return (NULL); /* * If we have allocated a master tstate, copy user settings from * the master tstate (taken from SRAM or the EEPROM) for this * channel, but reset our current and goal settings to async/narrow * until an initiator talks to us. */ if (master_tstate != NULL) { memcpy(tstate, master_tstate, sizeof(*tstate)); memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); tstate->ultraenb = 0; for (i = 0; i < AHC_NUM_TARGETS; i++) { memset(&tstate->transinfo[i].curr, 0, sizeof(tstate->transinfo[i].curr)); memset(&tstate->transinfo[i].goal, 0, sizeof(tstate->transinfo[i].goal)); } } else memset(tstate, 0, sizeof(*tstate)); ahc->enabled_targets[scsi_id] = tstate; return (tstate); } #ifdef AHC_TARGET_MODE /* * Free per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static void ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) { struct ahc_tmode_tstate *tstate; /* * Don't clean up our "master" tstate. * It has our default user settings. */ if (((channel == 'B' && scsi_id == ahc->our_id_b) || (channel == 'A' && scsi_id == ahc->our_id)) && force == FALSE) return; if (channel == 'B') scsi_id += 8; tstate = ahc->enabled_targets[scsi_id]; if (tstate != NULL) kfree(tstate); ahc->enabled_targets[scsi_id] = NULL; } #endif /* * Called when we have an active connection to a target on the bus, * this function finds the nearest syncrate to the input period limited * by the capabilities of the bus connectivity of and sync settings for * the target. */ const struct ahc_syncrate * ahc_devlimited_syncrate(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *period, u_int *ppr_options, role_t role) { struct ahc_transinfo *transinfo; u_int maxsync; if ((ahc->features & AHC_ULTRA2) != 0) { if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { maxsync = AHC_SYNCRATE_DT; } else { maxsync = AHC_SYNCRATE_ULTRA; /* Can't do DT on an SE bus */ *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } } else if ((ahc->features & AHC_ULTRA) != 0) { maxsync = AHC_SYNCRATE_ULTRA; } else { maxsync = AHC_SYNCRATE_FAST; } /* * Never allow a value higher than our current goal * period otherwise we may allow a target initiated * negotiation to go above the limit as set by the * user. In the case of an initiator initiated * sync negotiation, we limit based on the user * setting. This allows the system to still accept * incoming negotiations even if target initiated * negotiation is not performed. */ if (role == ROLE_TARGET) transinfo = &tinfo->user; else transinfo = &tinfo->goal; *ppr_options &= transinfo->ppr_options; if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { maxsync = max(maxsync, (u_int)AHC_SYNCRATE_ULTRA2); *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } if (transinfo->period == 0) { *period = 0; *ppr_options = 0; return (NULL); } *period = max(*period, (u_int)transinfo->period); return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); } /* * Look up the valid period to SCSIRATE conversion in our table. * Return the period and offset that should be sent to the target * if this was the beginning of an SDTR. */ const struct ahc_syncrate * ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, u_int *ppr_options, u_int maxsync) { const struct ahc_syncrate *syncrate; if ((ahc->features & AHC_DT) == 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; /* Skip all DT only entries if DT is not available */ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && maxsync < AHC_SYNCRATE_ULTRA2) maxsync = AHC_SYNCRATE_ULTRA2; /* Now set the maxsync based on the card capabilities * DT is already done above */ if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0 && maxsync < AHC_SYNCRATE_ULTRA) maxsync = AHC_SYNCRATE_ULTRA; if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0 && maxsync < AHC_SYNCRATE_FAST) maxsync = AHC_SYNCRATE_FAST; for (syncrate = &ahc_syncrates[maxsync]; syncrate->rate != NULL; syncrate++) { /* * The Ultra2 table doesn't go as low * as for the Fast/Ultra cards. */ if ((ahc->features & AHC_ULTRA2) != 0 && (syncrate->sxfr_u2 == 0)) break; if (*period <= syncrate->period) { /* * When responding to a target that requests * sync, the requested rate may fall between * two rates that we can output, but still be * a rate that we can receive. Because of this, * we want to respond to the target with * the same rate that it sent to us even * if the period we use to send data to it * is lower. Only lower the response period * if we must. */ if (syncrate == &ahc_syncrates[maxsync]) *period = syncrate->period; /* * At some speeds, we only support * ST transfers. */ if ((syncrate->sxfr_u2 & ST_SXFR) != 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; break; } } if ((*period == 0) || (syncrate->rate == NULL) || ((ahc->features & AHC_ULTRA2) != 0 && (syncrate->sxfr_u2 == 0))) { /* Use asynchronous transfers. */ *period = 0; syncrate = NULL; *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } return (syncrate); } /* * Convert from an entry in our syncrate table to the SCSI equivalent * sync "period" factor. */ u_int ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) { const struct ahc_syncrate *syncrate; if ((ahc->features & AHC_ULTRA2) != 0) scsirate &= SXFR_ULTRA2; else scsirate &= SXFR; /* now set maxsync based on card capabilities */ if ((ahc->features & AHC_DT) == 0 && maxsync < AHC_SYNCRATE_ULTRA2) maxsync = AHC_SYNCRATE_ULTRA2; if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0 && maxsync < AHC_SYNCRATE_ULTRA) maxsync = AHC_SYNCRATE_ULTRA; if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0 && maxsync < AHC_SYNCRATE_FAST) maxsync = AHC_SYNCRATE_FAST; syncrate = &ahc_syncrates[maxsync]; while (syncrate->rate != NULL) { if ((ahc->features & AHC_ULTRA2) != 0) { if (syncrate->sxfr_u2 == 0) break; else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) return (syncrate->period); } else if (scsirate == (syncrate->sxfr & SXFR)) { return (syncrate->period); } syncrate++; } return (0); /* async */ } /* * Truncate the given synchronous offset to a value the * current adapter type and syncrate are capable of. */ static void ahc_validate_offset(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, const struct ahc_syncrate *syncrate, u_int *offset, int wide, role_t role) { u_int maxoffset; /* Limit offset to what we can do */ if (syncrate == NULL) { maxoffset = 0; } else if ((ahc->features & AHC_ULTRA2) != 0) { maxoffset = MAX_OFFSET_ULTRA2; } else { if (wide) maxoffset = MAX_OFFSET_16BIT; else maxoffset = MAX_OFFSET_8BIT; } *offset = min(*offset, maxoffset); if (tinfo != NULL) { if (role == ROLE_TARGET) *offset = min(*offset, (u_int)tinfo->user.offset); else *offset = min(*offset, (u_int)tinfo->goal.offset); } } /* * Truncate the given transfer width parameter to a value the * current adapter type is capable of. */ static void ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *bus_width, role_t role) { switch (*bus_width) { default: if (ahc->features & AHC_WIDE) { /* Respond Wide */ *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } /* FALLTHROUGH */ case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } if (tinfo != NULL) { if (role == ROLE_TARGET) *bus_width = min((u_int)tinfo->user.width, *bus_width); else *bus_width = min((u_int)tinfo->goal.width, *bus_width); } } /* * Update the bitmask of targets for which the controller should * negotiate with at the next convenient opportunity. This currently * means the next time we send the initial identify messages for * a new transaction. */ int ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct ahc_tmode_tstate *tstate, struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type) { u_int auto_negotiate_orig; auto_negotiate_orig = tstate->auto_negotiate; if (neg_type == AHC_NEG_ALWAYS) { /* * Force our "current" settings to be * unknown so that unless a bus reset * occurs the need to renegotiate is * recorded persistently. */ if ((ahc->features & AHC_WIDE) != 0) tinfo->curr.width = AHC_WIDTH_UNKNOWN; tinfo->curr.period = AHC_PERIOD_UNKNOWN; tinfo->curr.offset = AHC_OFFSET_UNKNOWN; } if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options || (neg_type == AHC_NEG_IF_NON_ASYNC && (tinfo->goal.offset != 0 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT || tinfo->goal.ppr_options != 0))) tstate->auto_negotiate |= devinfo->target_mask; else tstate->auto_negotiate &= ~devinfo->target_mask; return (auto_negotiate_orig != tstate->auto_negotiate); } /* * Update the user/goal/curr tables of synchronous negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, const struct ahc_syncrate *syncrate, u_int period, u_int offset, u_int ppr_options, u_int type, int paused) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int old_period; u_int old_offset; u_int old_ppr; int active; int update_needed; active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; update_needed = 0; if (syncrate == NULL) { period = 0; offset = 0; } tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHC_TRANS_USER) != 0) { tinfo->user.period = period; tinfo->user.offset = offset; tinfo->user.ppr_options = ppr_options; } if ((type & AHC_TRANS_GOAL) != 0) { tinfo->goal.period = period; tinfo->goal.offset = offset; tinfo->goal.ppr_options = ppr_options; } old_period = tinfo->curr.period; old_offset = tinfo->curr.offset; old_ppr = tinfo->curr.ppr_options; if ((type & AHC_TRANS_CUR) != 0 && (old_period != period || old_offset != offset || old_ppr != ppr_options)) { u_int scsirate; update_needed++; scsirate = tinfo->scsirate; if ((ahc->features & AHC_ULTRA2) != 0) { scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); if (syncrate != NULL) { scsirate |= syncrate->sxfr_u2; if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) scsirate |= ENABLE_CRC; else scsirate |= SINGLE_EDGE; } } else { scsirate &= ~(SXFR|SOFS); /* * Ensure Ultra mode is set properly for * this target. */ tstate->ultraenb &= ~devinfo->target_mask; if (syncrate != NULL) { if (syncrate->sxfr & ULTRA_SXFR) { tstate->ultraenb |= devinfo->target_mask; } scsirate |= syncrate->sxfr & SXFR; scsirate |= offset & SOFS; } if (active) { u_int sxfrctl0; sxfrctl0 = ahc_inb(ahc, SXFRCTL0); sxfrctl0 &= ~FAST20; if (tstate->ultraenb & devinfo->target_mask) sxfrctl0 |= FAST20; ahc_outb(ahc, SXFRCTL0, sxfrctl0); } } if (active) { ahc_outb(ahc, SCSIRATE, scsirate); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIOFFSET, offset); } tinfo->scsirate = scsirate; tinfo->curr.period = period; tinfo->curr.offset = offset; tinfo->curr.ppr_options = ppr_options; ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { if (offset != 0) { printk("%s: target %d synchronous at %sMHz%s, " "offset = 0x%x\n", ahc_name(ahc), devinfo->target, syncrate->rate, (ppr_options & MSG_EXT_PPR_DT_REQ) ? " DT" : "", offset); } else { printk("%s: target %d using " "asynchronous transfers\n", ahc_name(ahc), devinfo->target); } } } update_needed += ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_TO_GOAL); if (update_needed) ahc_update_pending_scbs(ahc); } /* * Update the user/goal/curr tables of wide negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int width, u_int type, int paused) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int oldwidth; int active; int update_needed; active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; update_needed = 0; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHC_TRANS_USER) != 0) tinfo->user.width = width; if ((type & AHC_TRANS_GOAL) != 0) tinfo->goal.width = width; oldwidth = tinfo->curr.width; if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { u_int scsirate; update_needed++; scsirate = tinfo->scsirate; scsirate &= ~WIDEXFER; if (width == MSG_EXT_WDTR_BUS_16_BIT) scsirate |= WIDEXFER; tinfo->scsirate = scsirate; if (active) ahc_outb(ahc, SCSIRATE, scsirate); tinfo->curr.width = width; ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { printk("%s: target %d using %dbit transfers\n", ahc_name(ahc), devinfo->target, 8 * (0x01 << width)); } } update_needed += ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_TO_GOAL); if (update_needed) ahc_update_pending_scbs(ahc); } /* * Update the current state of tagged queuing for a given target. */ static void ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd, struct ahc_devinfo *devinfo, ahc_queue_alg alg) { struct scsi_device *sdev = cmd->device; ahc_platform_set_tags(ahc, sdev, devinfo, alg); ahc_send_async(ahc, devinfo->channel, devinfo->target, devinfo->lun, AC_TRANSFER_NEG); } /* * When the transfer settings for a connection change, update any * in-transit SCBs to contain the new data so the hardware will * be set correctly during future (re)selections. */ static void ahc_update_pending_scbs(struct ahc_softc *ahc) { struct scb *pending_scb; int pending_scb_count; int i; int paused; u_int saved_scbptr; /* * Traverse the pending SCB list and ensure that all of the * SCBs there have the proper settings. */ pending_scb_count = 0; LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { struct ahc_devinfo devinfo; struct hardware_scb *pending_hscb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; ahc_scb_devinfo(ahc, &devinfo, pending_scb); tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); pending_hscb = pending_scb->hscb; pending_hscb->control &= ~ULTRAENB; if ((tstate->ultraenb & devinfo.target_mask) != 0) pending_hscb->control |= ULTRAENB; pending_hscb->scsirate = tinfo->scsirate; pending_hscb->scsioffset = tinfo->curr.offset; if ((tstate->auto_negotiate & devinfo.target_mask) == 0 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; pending_hscb->control &= ~MK_MESSAGE; } ahc_sync_scb(ahc, pending_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); pending_scb_count++; } if (pending_scb_count == 0) return; if (ahc_is_paused(ahc)) { paused = 1; } else { paused = 0; ahc_pause(ahc); } saved_scbptr = ahc_inb(ahc, SCBPTR); /* Ensure that the hscbs down on the card match the new information */ for (i = 0; i < ahc->scb_data->maxhscbs; i++) { struct hardware_scb *pending_hscb; u_int control; u_int scb_tag; ahc_outb(ahc, SCBPTR, i); scb_tag = ahc_inb(ahc, SCB_TAG); pending_scb = ahc_lookup_scb(ahc, scb_tag); if (pending_scb == NULL) continue; pending_hscb = pending_scb->hscb; control = ahc_inb(ahc, SCB_CONTROL); control &= ~(ULTRAENB|MK_MESSAGE); control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); ahc_outb(ahc, SCB_CONTROL, control); ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); } ahc_outb(ahc, SCBPTR, saved_scbptr); if (paused == 0) ahc_unpause(ahc); } /**************************** Pathing Information *****************************/ static void ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { u_int saved_scsiid; role_t role; int our_id; if (ahc_inb(ahc, SSTAT0) & TARGET) role = ROLE_TARGET; else role = ROLE_INITIATOR; if (role == ROLE_TARGET && (ahc->features & AHC_MULTI_TID) != 0 && (ahc_inb(ahc, SEQ_FLAGS) & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) { /* We were selected, so pull our id from TARGIDIN */ our_id = ahc_inb(ahc, TARGIDIN) & OID; } else if ((ahc->features & AHC_ULTRA2) != 0) our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; else our_id = ahc_inb(ahc, SCSIID) & OID; saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); ahc_compile_devinfo(devinfo, our_id, SCSIID_TARGET(ahc, saved_scsiid), ahc_inb(ahc, SAVED_LUN), SCSIID_CHANNEL(ahc, saved_scsiid), role); } static const struct ahc_phase_table_entry* ahc_lookup_phase_entry(int phase) { const struct ahc_phase_table_entry *entry; const struct ahc_phase_table_entry *last_entry; /* * num_phases doesn't include the default entry which * will be returned if the phase doesn't match. */ last_entry = &ahc_phase_table[num_phases]; for (entry = ahc_phase_table; entry < last_entry; entry++) { if (phase == entry->phase) break; } return (entry); } void ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role) { devinfo->our_scsiid = our_id; devinfo->target = target; devinfo->lun = lun; devinfo->target_offset = target; devinfo->channel = channel; devinfo->role = role; if (channel == 'B') devinfo->target_offset += 8; devinfo->target_mask = (0x01 << devinfo->target_offset); } void ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { printk("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } static void ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { role_t role; int our_id; our_id = SCSIID_OUR_ID(scb->hscb->scsiid); role = ROLE_INITIATOR; if ((scb->flags & SCB_TARGET_SCB) != 0) role = ROLE_TARGET; ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); } /************************ Message Phase Processing ****************************/ static void ahc_assert_atn(struct ahc_softc *ahc) { u_int scsisigo; scsisigo = ATNO; if ((ahc->features & AHC_DT) == 0) scsisigo |= ahc_inb(ahc, SCSISIGI); ahc_outb(ahc, SCSISIGO, scsisigo); } /* * When an initiator transaction with the MK_MESSAGE flag either reconnects * or enters the initial message out phase, we are interrupted. Fill our * outgoing message buffer with the appropriate message and beging handing * the message phase(s) manually. */ static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahc->msgout_index = 0; ahc->msgout_len = 0; if ((scb->flags & SCB_DEVICE_RESET) == 0 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { u_int identify_msg; identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); if ((scb->hscb->control & DISCENB) != 0) identify_msg |= MSG_IDENTIFY_DISCFLAG; ahc->msgout_buf[ahc->msgout_index++] = identify_msg; ahc->msgout_len++; if ((scb->hscb->control & TAG_ENB) != 0) { ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; ahc->msgout_len += 2; } } if (scb->flags & SCB_DEVICE_RESET) { ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; ahc->msgout_len++; ahc_print_path(ahc, scb); printk("Bus Device Reset Message Sent\n"); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else if ((scb->flags & SCB_ABORT) != 0) { if ((scb->hscb->control & TAG_ENB) != 0) ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; else ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; ahc->msgout_len++; ahc_print_path(ahc, scb); printk("Abort%s Message Sent\n", (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { ahc_build_transfer_msg(ahc, devinfo); } else { printk("ahc_intr: AWAITING_MSG for an SCB that " "does not have a waiting message\n"); printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, devinfo->target_mask); panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " "SCB flags = %x", scb->hscb->tag, scb->hscb->control, ahc_inb(ahc, MSG_OUT), scb->flags); } /* * Clear the MK_MESSAGE flag from the SCB so we aren't * asked to send this message again. */ ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); scb->hscb->control &= ~MK_MESSAGE; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } /* * Build an appropriate transfer negotiation message for the * currently active target. */ static void ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { /* * We need to initiate transfer negotiations. * If our current and goal settings are identical, * we want to renegotiate due to a check condition. */ struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; const struct ahc_syncrate *rate; int dowide; int dosync; int doppr; u_int period; u_int ppr_options; u_int offset; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Filter our period based on the current connection. * If we can't perform DT transfers on this segment (not in LVD * mode for instance), then our decision to issue a PPR message * may change. */ period = tinfo->goal.period; offset = tinfo->goal.offset; ppr_options = tinfo->goal.ppr_options; /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) ppr_options = 0; rate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); dowide = tinfo->curr.width != tinfo->goal.width; dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; /* * Only use PPR if we have options that need it, even if the device * claims to support it. There might be an expander in the way * that doesn't. */ doppr = ppr_options != 0; if (!dowide && !dosync && !doppr) { dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; dosync = tinfo->goal.offset != 0; } if (!dowide && !dosync && !doppr) { /* * Force async with a WDTR message if we have a wide bus, * or just issue an SDTR with a 0 offset. */ if ((ahc->features & AHC_WIDE) != 0) dowide = 1; else dosync = 1; if (bootverbose) { ahc_print_devinfo(ahc, devinfo); printk("Ensuring async\n"); } } /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) doppr = 0; /* * Both the PPR message and SDTR message require the * goal syncrate to be limited to what the target device * is capable of handling (based on whether an LVD->SE * expander is on the bus), so combine these two cases. * Regardless, guarantee that if we are using WDTR and SDTR * messages that WDTR comes first. */ if (doppr || (dosync && !dowide)) { offset = tinfo->goal.offset; ahc_validate_offset(ahc, tinfo, rate, &offset, doppr ? tinfo->goal.width : tinfo->curr.width, devinfo->role); if (doppr) { ahc_construct_ppr(ahc, devinfo, period, offset, tinfo->goal.width, ppr_options); } else { ahc_construct_sdtr(ahc, devinfo, period, offset); } } else { ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); } } /* * Build a synchronous negotiation message in our message * buffer based on the input parameters. */ static void ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset) { if (offset == 0) period = AHC_ASYNC_XFER_PERIOD; ahc->msgout_index += spi_populate_sync_msg( ahc->msgout_buf + ahc->msgout_index, period, offset); ahc->msgout_len += 5; if (bootverbose) { printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, period, offset); } } /* * Build a wide negotiation message in our message * buffer based on the input parameters. */ static void ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int bus_width) { ahc->msgout_index += spi_populate_width_msg( ahc->msgout_buf + ahc->msgout_index, bus_width); ahc->msgout_len += 4; if (bootverbose) { printk("(%s:%c:%d:%d): Sending WDTR %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, bus_width); } } /* * Build a parallel protocol request message in our message * buffer based on the input parameters. */ static void ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options) { if (offset == 0) period = AHC_ASYNC_XFER_PERIOD; ahc->msgout_index += spi_populate_ppr_msg( ahc->msgout_buf + ahc->msgout_index, period, offset, bus_width, ppr_options); ahc->msgout_len += 8; if (bootverbose) { printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " "offset %x, ppr_options %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, bus_width, period, offset, ppr_options); } } /* * Clear any active message state. */ static void ahc_clear_msg_state(struct ahc_softc *ahc) { ahc->msgout_len = 0; ahc->msgin_index = 0; ahc->msg_type = MSG_TYPE_NONE; if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { /* * The target didn't care to respond to our * message request, so clear ATN. */ ahc_outb(ahc, CLRSINT1, CLRATNO); } ahc_outb(ahc, MSG_OUT, MSG_NOOP); ahc_outb(ahc, SEQ_FLAGS2, ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); } static void ahc_handle_proto_violation(struct ahc_softc *ahc) { struct ahc_devinfo devinfo; struct scb *scb; u_int scbid; u_int seq_flags; u_int curphase; u_int lastphase; int found; ahc_fetch_devinfo(ahc, &devinfo); scbid = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scbid); seq_flags = ahc_inb(ahc, SEQ_FLAGS); curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; lastphase = ahc_inb(ahc, LASTPHASE); if ((seq_flags & NOT_IDENTIFIED) != 0) { /* * The reconnecting target either did not send an * identify message, or did, but we didn't find an SCB * to match. */ ahc_print_devinfo(ahc, &devinfo); printk("Target did not send an IDENTIFY message. " "LASTPHASE = 0x%x.\n", lastphase); scb = NULL; } else if (scb == NULL) { /* * We don't seem to have an SCB active for this * transaction. Print an error and reset the bus. */ ahc_print_devinfo(ahc, &devinfo); printk("No SCB found during protocol violation\n"); goto proto_violation_reset; } else { ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL); if ((seq_flags & NO_CDB_SENT) != 0) { ahc_print_path(ahc, scb); printk("No or incomplete CDB sent to device.\n"); } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) { /* * The target never bothered to provide status to * us prior to completing the command. Since we don't * know the disposition of this command, we must attempt * to abort it. Assert ATN and prepare to send an abort * message. */ ahc_print_path(ahc, scb); printk("Completed command without status.\n"); } else { ahc_print_path(ahc, scb); printk("Unknown protocol violation.\n"); ahc_dump_card_state(ahc); } } if ((lastphase & ~P_DATAIN_DT) == 0 || lastphase == P_COMMAND) { proto_violation_reset: /* * Target either went directly to data/command * phase or didn't respond to our ATN. * The only safe thing to do is to blow * it away with a bus reset. */ found = ahc_reset_channel(ahc, 'A', TRUE); printk("%s: Issued Channel %c Bus Reset. " "%d SCBs aborted\n", ahc_name(ahc), 'A', found); } else { /* * Leave the selection hardware off in case * this abort attempt will affect yet to * be sent commands. */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); ahc_assert_atn(ahc); ahc_outb(ahc, MSG_OUT, HOST_MSG); if (scb == NULL) { ahc_print_devinfo(ahc, &devinfo); ahc->msgout_buf[0] = MSG_ABORT_TASK; ahc->msgout_len = 1; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } else { ahc_print_path(ahc, scb); scb->flags |= SCB_ABORT; } printk("Protocol violation %s. Attempting to abort.\n", ahc_lookup_phase_entry(curphase)->phasemsg); } } /* * Manual message loop handler. */ static void ahc_handle_message_phase(struct ahc_softc *ahc) { struct ahc_devinfo devinfo; u_int bus_phase; int end_session; ahc_fetch_devinfo(ahc, &devinfo); end_session = FALSE; bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; reswitch: switch (ahc->msg_type) { case MSG_TYPE_INITIATOR_MSGOUT: { int lastbyte; int phasemis; int msgdone; if (ahc->msgout_len == 0) panic("HOST_MSG_LOOP interrupt with no active message"); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printk("INITIATOR_MSG_OUT"); } #endif phasemis = bus_phase != P_MESGOUT; if (phasemis) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { printk(" PHASEMIS %s\n", ahc_lookup_phase_entry(bus_phase) ->phasemsg); } #endif if (bus_phase == P_MESGIN) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahc_outb(ahc, CLRSINT1, CLRATNO); ahc->send_msg_perror = FALSE; ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahc->msgin_index = 0; goto reswitch; } end_session = TRUE; break; } if (ahc->send_msg_perror) { ahc_outb(ahc, CLRSINT1, CLRATNO); ahc_outb(ahc, CLRSINT1, CLRREQINIT); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahc->send_msg_perror); #endif ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); break; } msgdone = ahc->msgout_index == ahc->msgout_len; if (msgdone) { /* * The target has requested a retry. * Re-assert ATN, reset our message index to * 0, and try again. */ ahc->msgout_index = 0; ahc_assert_atn(ahc); } lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); if (lastbyte) { /* Last byte is signified by dropping ATN */ ahc_outb(ahc, CLRSINT1, CLRATNO); } /* * Clear our interrupt status and present * the next byte on the bus. */ ahc_outb(ahc, CLRSINT1, CLRREQINIT); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahc->msgout_buf[ahc->msgout_index]); #endif ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); break; } case MSG_TYPE_INITIATOR_MSGIN: { int phasemis; int message_done; #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printk("INITIATOR_MSG_IN"); } #endif phasemis = bus_phase != P_MESGIN; if (phasemis) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { printk(" PHASEMIS %s\n", ahc_lookup_phase_entry(bus_phase) ->phasemsg); } #endif ahc->msgin_index = 0; if (bus_phase == P_MESGOUT && (ahc->send_msg_perror == TRUE || (ahc->msgout_len != 0 && ahc->msgout_index == 0))) { ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; goto reswitch; } end_session = TRUE; break; } /* Pull the byte in without acking it */ ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahc->msgin_buf[ahc->msgin_index]); #endif message_done = ahc_parse_msg(ahc, &devinfo); if (message_done) { /* * Clear our incoming message buffer in case there * is another message following this one. */ ahc->msgin_index = 0; /* * If this message illicited a response, * assert ATN so the target takes us to the * message out phase. */ if (ahc->msgout_len != 0) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printk("Asserting ATN for response\n"); } #endif ahc_assert_atn(ahc); } } else ahc->msgin_index++; if (message_done == MSGLOOP_TERMINATED) { end_session = TRUE; } else { /* Ack the byte */ ahc_outb(ahc, CLRSINT1, CLRREQINIT); ahc_inb(ahc, SCSIDATL); } break; } case MSG_TYPE_TARGET_MSGIN: { int msgdone; int msgout_request; if (ahc->msgout_len == 0) panic("Target MSGIN with no active message"); /* * If we interrupted a mesgout session, the initiator * will not know this until our first REQ. So, we * only honor mesgout requests after we've sent our * first byte. */ if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 && ahc->msgout_index > 0) msgout_request = TRUE; else msgout_request = FALSE; if (msgout_request) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); ahc->msgin_index = 0; /* Dummy read to REQ for first byte */ ahc_inb(ahc, SCSIDATL); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); break; } msgdone = ahc->msgout_index == ahc->msgout_len; if (msgdone) { ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); end_session = TRUE; break; } /* * Present the next byte on the bus. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); break; } case MSG_TYPE_TARGET_MSGOUT: { int lastbyte; int msgdone; /* * The initiator signals that this is * the last byte by dropping ATN. */ lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; /* * Read the latched byte, but turn off SPIOEN first * so that we don't inadvertently cause a REQ for the * next byte. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); msgdone = ahc_parse_msg(ahc, &devinfo); if (msgdone == MSGLOOP_TERMINATED) { /* * The message is *really* done in that it caused * us to go to bus free. The sequencer has already * been reset at this point, so pull the ejection * handle. */ return; } ahc->msgin_index++; /* * XXX Read spec about initiator dropping ATN too soon * and use msgdone to detect it. */ if (msgdone == MSGLOOP_MSGCOMPLETE) { ahc->msgin_index = 0; /* * If this message illicited a response, transition * to the Message in phase and send it. */ if (ahc->msgout_len != 0) { ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); ahc->msg_type = MSG_TYPE_TARGET_MSGIN; ahc->msgin_index = 0; break; } } if (lastbyte) end_session = TRUE; else { /* Ask for the next byte. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); } break; } default: panic("Unknown REQINIT message type"); } if (end_session) { ahc_clear_msg_state(ahc); ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); } else ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); } /* * See if we sent a particular extended message to the target. * If "full" is true, return true only if the target saw the full * message. If "full" is false, return true if the target saw at * least the first byte of the message. */ static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) { int found; u_int index; found = FALSE; index = 0; while (index < ahc->msgout_len) { if (ahc->msgout_buf[index] == MSG_EXTENDED) { u_int end_index; end_index = index + 1 + ahc->msgout_buf[index + 1]; if (ahc->msgout_buf[index+2] == msgval && type == AHCMSG_EXT) { if (full) { if (ahc->msgout_index > end_index) found = TRUE; } else if (ahc->msgout_index > index) found = TRUE; } index = end_index; } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { /* Skip tag type and tag id or residue param*/ index += 2; } else { /* Single byte message */ if (type == AHCMSG_1B && ahc->msgout_buf[index] == msgval && ahc->msgout_index > index) found = TRUE; index++; } if (found) break; } return (found); } /* * Wait for a complete incoming message, parse it, and respond accordingly. */ static int ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; int reject; int done; int response; u_int targ_scsirate; done = MSGLOOP_IN_PROG; response = FALSE; reject = FALSE; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); targ_scsirate = tinfo->scsirate; /* * Parse as much of the message as is available, * rejecting it if we don't support it. When * the entire message is available and has been * handled, return MSGLOOP_MSGCOMPLETE, indicating * that we have parsed an entire message. * * In the case of extended messages, we accept the length * byte outright and perform more checking once we know the * extended message type. */ switch (ahc->msgin_buf[0]) { case MSG_DISCONNECT: case MSG_SAVEDATAPOINTER: case MSG_CMDCOMPLETE: case MSG_RESTOREPOINTERS: case MSG_IGN_WIDE_RESIDUE: /* * End our message loop as these are messages * the sequencer handles on its own. */ done = MSGLOOP_TERMINATED; break; case MSG_MESSAGE_REJECT: response = ahc_handle_msg_reject(ahc, devinfo); /* FALLTHROUGH */ case MSG_NOOP: done = MSGLOOP_MSGCOMPLETE; break; case MSG_EXTENDED: { /* Wait for enough of the message to begin validation */ if (ahc->msgin_index < 2) break; switch (ahc->msgin_buf[2]) { case MSG_EXT_SDTR: { const struct ahc_syncrate *syncrate; u_int period; u_int ppr_options; u_int offset; u_int saved_offset; if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { reject = TRUE; break; } /* * Wait until we have both args before validating * and acting on this message. * * Add one to MSG_EXT_SDTR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) break; period = ahc->msgin_buf[3]; ppr_options = 0; saved_offset = offset = ahc->msgin_buf[4]; syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); ahc_validate_offset(ahc, tinfo, syncrate, &offset, targ_scsirate & WIDEXFER, devinfo->role); if (bootverbose) { printk("(%s:%c:%d:%d): Received " "SDTR period %x, offset %x\n\t" "Filtered to period %x, offset %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, ahc->msgin_buf[3], saved_offset, period, offset); } ahc_set_syncrate(ahc, devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); /* * See if we initiated Sync Negotiation * and didn't have to fall down to async * transfers. */ if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { /* We started it */ if (saved_offset != offset) { /* Went too low - force async */ reject = TRUE; } } else { /* * Send our own SDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printk("(%s:%c:%d:%d): Target " "Initiated SDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_sdtr(ahc, devinfo, period, offset); ahc->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_WDTR: { u_int bus_width; u_int saved_width; u_int sending_reply; sending_reply = FALSE; if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { reject = TRUE; break; } /* * Wait until we have our arg before validating * and acting on this message. * * Add one to MSG_EXT_WDTR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) break; bus_width = ahc->msgin_buf[3]; saved_width = bus_width; ahc_validate_width(ahc, tinfo, &bus_width, devinfo->role); if (bootverbose) { printk("(%s:%c:%d:%d): Received WDTR " "%x filtered to %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, saved_width, bus_width); } if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { /* * Don't send a WDTR back to the * target, since we asked first. * If the width went higher than our * request, reject it. */ if (saved_width > bus_width) { reject = TRUE; printk("(%s:%c:%d:%d): requested %dBit " "transfers. Rejecting...\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, 8 * (0x01 << bus_width)); bus_width = 0; } } else { /* * Send our own WDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printk("(%s:%c:%d:%d): Target " "Initiated WDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_wdtr(ahc, devinfo, bus_width); ahc->msgout_index = 0; response = TRUE; sending_reply = TRUE; } /* * After a wide message, we are async, but * some devices don't seem to honor this portion * of the spec. Force a renegotiation of the * sync component of our transfer agreement even * if our goal is async. By updating our width * after forcing the negotiation, we avoid * renegotiating for width. */ ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_ALWAYS); ahc_set_width(ahc, devinfo, bus_width, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); if (sending_reply == FALSE && reject == FALSE) { /* * We will always have an SDTR to send. */ ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_PPR: { const struct ahc_syncrate *syncrate; u_int period; u_int offset; u_int bus_width; u_int ppr_options; u_int saved_width; u_int saved_offset; u_int saved_ppr_options; if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { reject = TRUE; break; } /* * Wait until we have all args before validating * and acting on this message. * * Add one to MSG_EXT_PPR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) break; period = ahc->msgin_buf[3]; offset = ahc->msgin_buf[5]; bus_width = ahc->msgin_buf[6]; saved_width = bus_width; ppr_options = ahc->msgin_buf[7]; /* * According to the spec, a DT only * period factor with no DT option * set implies async. */ if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && period == 9) offset = 0; saved_ppr_options = ppr_options; saved_offset = offset; /* * Mask out any options we don't support * on any controller. Transfer options are * only available if we are negotiating wide. */ ppr_options &= MSG_EXT_PPR_DT_REQ; if (bus_width == 0) ppr_options = 0; ahc_validate_width(ahc, tinfo, &bus_width, devinfo->role); syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); ahc_validate_offset(ahc, tinfo, syncrate, &offset, bus_width, devinfo->role); if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { /* * If we are unable to do any of the * requested options (we went too low), * then we'll have to reject the message. */ if (saved_width > bus_width || saved_offset != offset || saved_ppr_options != ppr_options) { reject = TRUE; period = 0; offset = 0; bus_width = 0; ppr_options = 0; syncrate = NULL; } } else { if (devinfo->role != ROLE_TARGET) printk("(%s:%c:%d:%d): Target " "Initiated PPR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); else printk("(%s:%c:%d:%d): Initiator " "Initiated PPR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_ppr(ahc, devinfo, period, offset, bus_width, ppr_options); ahc->msgout_index = 0; response = TRUE; } if (bootverbose) { printk("(%s:%c:%d:%d): Received PPR width %x, " "period %x, offset %x,options %x\n" "\tFiltered to width %x, period %x, " "offset %x, options %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, saved_width, ahc->msgin_buf[3], saved_offset, saved_ppr_options, bus_width, period, offset, ppr_options); } ahc_set_width(ahc, devinfo, bus_width, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_set_syncrate(ahc, devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); done = MSGLOOP_MSGCOMPLETE; break; } default: /* Unknown extended message. Reject it. */ reject = TRUE; break; } break; } #ifdef AHC_TARGET_MODE case MSG_BUS_DEV_RESET: ahc_handle_devreset(ahc, devinfo, CAM_BDR_SENT, "Bus Device Reset Received", /*verbose_level*/0); ahc_restart(ahc); done = MSGLOOP_TERMINATED; break; case MSG_ABORT_TAG: case MSG_ABORT: case MSG_CLEAR_QUEUE: { int tag; /* Target mode messages */ if (devinfo->role != ROLE_TARGET) { reject = TRUE; break; } tag = SCB_LIST_NULL; if (ahc->msgin_buf[0] == MSG_ABORT_TAG) tag = ahc_inb(ahc, INITIATOR_TAG); ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, devinfo->lun, tag, ROLE_TARGET, CAM_REQ_ABORTED); tstate = ahc->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[devinfo->lun]; if (lstate != NULL) { ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, ahc->msgin_buf[0], /*arg*/tag); ahc_send_lstate_events(ahc, lstate); } } ahc_restart(ahc); done = MSGLOOP_TERMINATED; break; } #endif case MSG_TERM_IO_PROC: default: reject = TRUE; break; } if (reject) { /* * Setup to reject the message. */ ahc->msgout_index = 0; ahc->msgout_len = 1; ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; done = MSGLOOP_MSGCOMPLETE; response = TRUE; } if (done != MSGLOOP_IN_PROG && !response) /* Clear the outgoing message buffer */ ahc->msgout_len = 0; return (done); } /* * Process a message reject message. */ static int ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { /* * What we care about here is if we had an * outstanding SDTR or WDTR message for this * target. If we did, this is a signal that * the target is refusing negotiation. */ struct scb *scb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int scb_index; u_int last_msg; int response = 0; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* Might be necessary */ last_msg = ahc_inb(ahc, LAST_MSG); if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { /* * Target does not support the PPR message. * Attempt to negotiate SPI-2 style. */ if (bootverbose) { printk("(%s:%c:%d:%d): PPR Rejected. " "Trying WDTR/SDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.ppr_options = 0; tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = 1; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { /* note 8bit xfers */ printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using " "8bit transfers\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); /* * No need to clear the sync rate. If the target * did not accept the command, our syncrate is * unaffected. If the target started the negotiation, * but rejected our response, we already cleared the * sync rate before sending our WDTR. */ if (tinfo->goal.offset != tinfo->curr.offset) { /* Start the sync negotiation */ ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = 1; } } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { /* note asynch xfers and clear flag */ ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); printk("(%s:%c:%d:%d): refuses synchronous negotiation. " "Using asynchronous transfers\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { int tag_type; int mask; tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); if (tag_type == MSG_SIMPLE_TASK) { printk("(%s:%c:%d:%d): refuses tagged commands. " "Performing non-tagged I/O\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_NONE); mask = ~0x23; } else { printk("(%s:%c:%d:%d): refuses %s tagged commands. " "Performing simple queue tagged I/O only\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, tag_type == MSG_ORDERED_TASK ? "ordered" : "head of queue"); ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_BASIC); mask = ~0x03; } /* * Resend the identify for this CCB as the target * may believe that the selection is invalid otherwise. */ ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & mask); scb->hscb->control &= mask; ahc_set_transaction_tag(scb, /*enabled*/FALSE, /*type*/MSG_SIMPLE_TASK); ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); ahc_assert_atn(ahc); /* * This transaction is now at the head of * the untagged queue for this target. */ if ((ahc->flags & AHC_SCB_BTT) == 0) { struct scb_tailq *untagged_q; untagged_q = &(ahc->untagged_queues[devinfo->target_offset]); TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); scb->flags |= SCB_UNTAGGEDQ; } ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), scb->hscb->tag); /* * Requeue all tagged commands for this target * currently in our possession so they can be * converted to untagged commands. */ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); } else { /* * Otherwise, we ignore it. */ printk("%s:%c:%d: Message reject for %x -- ignored\n", ahc_name(ahc), devinfo->channel, devinfo->target, last_msg); } return (response); } /* * Process an ingnore wide residue message. */ static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { u_int scb_index; struct scb *scb; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); /* * XXX Actually check data direction in the sequencer? * Perhaps add datadir to some spare bits in the hscb? */ if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { /* * Ignore the message if we haven't * seen an appropriate data phase yet. */ } else { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. Otherwise, subtract a byte * and update the residual count accordingly. */ uint32_t sgptr; sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); if ((sgptr & SG_LIST_NULL) != 0 && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. */ } else { struct ahc_dma_seg *sg; uint32_t data_cnt; uint32_t data_addr; uint32_t sglen; /* Pull in all of the sgptr */ sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR); data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT); if ((sgptr & SG_LIST_NULL) != 0) { /* * The residual data count is not updated * for the command run to completion case. * Explicitly zero the count. */ data_cnt &= ~AHC_SG_LEN_MASK; } data_addr = ahc_inl(ahc, SHADDR); data_cnt += 1; data_addr -= 1; sgptr &= SG_PTR_MASK; sg = ahc_sg_bus_to_virt(scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHC_SG_LEN_MASK)) { sg--; sglen = ahc_le32toh(sg->len); /* * Preserve High Address and SG_LIST bits * while setting the count to 1. */ data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); data_addr = ahc_le32toh(sg->addr) + (sglen & AHC_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahc_sg_virt_to_bus(scb, sg); } ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr); ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt); /* * Toggle the "oddness" of the transfer length * to handle this mid-transfer ignore wide * residue. This ensures that the oddness is * correct for subsequent data transfers. */ ahc_outb(ahc, SCB_LUN, ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD); } } } /* * Reinitialize the data pointers for the active transfer * based on its current residual. */ static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc) { struct scb *scb; struct ahc_dma_seg *sg; u_int scb_index; uint32_t sgptr; uint32_t resid; uint32_t dataptr; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; sg = ahc_sg_bus_to_virt(scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); dataptr = ahc_le32toh(sg->addr) + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK) - resid; if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { u_int dscommand1; dscommand1 = ahc_inb(ahc, DSCOMMAND1); ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); ahc_outb(ahc, HADDR, (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); ahc_outb(ahc, DSCOMMAND1, dscommand1); } ahc_outb(ahc, HADDR + 3, dataptr >> 24); ahc_outb(ahc, HADDR + 2, dataptr >> 16); ahc_outb(ahc, HADDR + 1, dataptr >> 8); ahc_outb(ahc, HADDR, dataptr); ahc_outb(ahc, HCNT + 2, resid >> 16); ahc_outb(ahc, HCNT + 1, resid >> 8); ahc_outb(ahc, HCNT, resid); if ((ahc->features & AHC_ULTRA2) == 0) { ahc_outb(ahc, STCNT + 2, resid >> 16); ahc_outb(ahc, STCNT + 1, resid >> 8); ahc_outb(ahc, STCNT, resid); } } /* * Handle the effects of issuing a bus device reset message. */ static void ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, cam_status status, char *message, int verbose_level) { #ifdef AHC_TARGET_MODE struct ahc_tmode_tstate* tstate; u_int lun; #endif int found; found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, status); #ifdef AHC_TARGET_MODE /* * Send an immediate notify ccb to all target mord peripheral * drivers affected by this action. */ tstate = ahc->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { for (lun = 0; lun < AHC_NUM_LUNS; lun++) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, MSG_BUS_DEV_RESET, /*arg*/0); ahc_send_lstate_events(ahc, lstate); } } #endif /* * Go back to async/narrow transfers and renegotiate. */ ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR, /*paused*/TRUE); ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR, /*paused*/TRUE); if (status != CAM_SEL_TIMEOUT) ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_SENT_BDR); if (message != NULL && (verbose_level <= bootverbose)) printk("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), message, devinfo->channel, devinfo->target, found); } #ifdef AHC_TARGET_MODE static void ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahc->msgout_index = 0; ahc->msgout_len = 0; if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) ahc_build_transfer_msg(ahc, devinfo); else panic("ahc_intr: AWAITING target message with no message"); ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_TARGET_MSGIN; } #endif /**************************** Initialization **********************************/ /* * Allocate a controller structure for a new device * and perform initial initializion. */ struct ahc_softc * ahc_alloc(void *platform_arg, char *name) { struct ahc_softc *ahc; int i; #ifndef __FreeBSD__ ahc = kmalloc(sizeof(*ahc), GFP_ATOMIC); if (!ahc) { printk("aic7xxx: cannot malloc softc!\n"); kfree(name); return NULL; } #else ahc = device_get_softc((device_t)platform_arg); #endif memset(ahc, 0, sizeof(*ahc)); ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC); if (ahc->seep_config == NULL) { #ifndef __FreeBSD__ kfree(ahc); #endif kfree(name); return (NULL); } LIST_INIT(&ahc->pending_scbs); /* We don't know our unit number until the OSM sets it */ ahc->name = name; ahc->unit = -1; ahc->description = NULL; ahc->channel = 'A'; ahc->channel_b = 'B'; ahc->chip = AHC_NONE; ahc->features = AHC_FENONE; ahc->bugs = AHC_BUGNONE; ahc->flags = AHC_FNONE; /* * Default to all error reporting enabled with the * sequencer operating at its fastest speed. * The bus attach code may modify this. */ ahc->seqctl = FASTMODE; for (i = 0; i < AHC_NUM_TARGETS; i++) TAILQ_INIT(&ahc->untagged_queues[i]); if (ahc_platform_alloc(ahc, platform_arg) != 0) { ahc_free(ahc); ahc = NULL; } return (ahc); } int ahc_softc_init(struct ahc_softc *ahc) { /* The IRQMS bit is only valid on VL and EISA chips */ if ((ahc->chip & AHC_PCI) == 0) ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; else ahc->unpause = 0; ahc->pause = ahc->unpause | PAUSE; /* XXX The shared scb data stuff should be deprecated */ if (ahc->scb_data == NULL) { ahc->scb_data = kmalloc(sizeof(*ahc->scb_data), GFP_ATOMIC); if (ahc->scb_data == NULL) return (ENOMEM); memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); } return (0); } void ahc_set_unit(struct ahc_softc *ahc, int unit) { ahc->unit = unit; } void ahc_set_name(struct ahc_softc *ahc, char *name) { if (ahc->name != NULL) kfree(ahc->name); ahc->name = name; } void ahc_free(struct ahc_softc *ahc) { int i; switch (ahc->init_level) { default: case 5: ahc_shutdown(ahc); /* FALLTHROUGH */ case 4: ahc_dmamap_unload(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); /* FALLTHROUGH */ case 3: ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, ahc->shared_data_dmamap); ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); /* FALLTHROUGH */ case 2: ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); case 1: #ifndef __linux__ ahc_dma_tag_destroy(ahc, ahc->buffer_dmat); #endif break; case 0: break; } #ifndef __linux__ ahc_dma_tag_destroy(ahc, ahc->parent_dmat); #endif ahc_platform_free(ahc); ahc_fini_scbdata(ahc); for (i = 0; i < AHC_NUM_TARGETS; i++) { struct ahc_tmode_tstate *tstate; tstate = ahc->enabled_targets[i]; if (tstate != NULL) { #ifdef AHC_TARGET_MODE int j; for (j = 0; j < AHC_NUM_LUNS; j++) { struct ahc_tmode_lstate *lstate; lstate = tstate->enabled_luns[j]; if (lstate != NULL) { xpt_free_path(lstate->path); kfree(lstate); } } #endif kfree(tstate); } } #ifdef AHC_TARGET_MODE if (ahc->black_hole != NULL) { xpt_free_path(ahc->black_hole->path); kfree(ahc->black_hole); } #endif if (ahc->name != NULL) kfree(ahc->name); if (ahc->seep_config != NULL) kfree(ahc->seep_config); #ifndef __FreeBSD__ kfree(ahc); #endif return; } static void ahc_shutdown(void *arg) { struct ahc_softc *ahc; int i; ahc = (struct ahc_softc *)arg; /* This will reset most registers to 0, but not all */ ahc_reset(ahc, /*reinit*/FALSE); ahc_outb(ahc, SCSISEQ, 0); ahc_outb(ahc, SXFRCTL0, 0); ahc_outb(ahc, DSPCISTATUS, 0); for (i = TARG_SCSIRATE; i < SCSICONF; i++) ahc_outb(ahc, i, 0); } /* * Reset the controller and record some information about it * that is only available just after a reset. If "reinit" is * non-zero, this reset occurred after initial configuration * and the caller requests that the chip be fully reinitialized * to a runable state. Chip interrupts are *not* enabled after * a reinitialization. The caller must enable interrupts via * ahc_intr_enable(). */ int ahc_reset(struct ahc_softc *ahc, int reinit) { u_int sblkctl; u_int sxfrctl1_a, sxfrctl1_b; int error; int wait; /* * Preserve the value of the SXFRCTL1 register for all channels. * It contains settings that affect termination and we don't want * to disturb the integrity of the bus. */ ahc_pause(ahc); sxfrctl1_b = 0; if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { u_int sblkctl; /* * Save channel B's settings in case this chip * is setup for TWIN channel operation. */ sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); } sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); /* * Ensure that the reset has finished. We delay 1000us * prior to reading the register to make sure the chip * has sufficiently completed its reset to handle register * accesses. */ wait = 1000; do { ahc_delay(1000); } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); if (wait == 0) { printk("%s: WARNING - Failed chip reset! " "Trying to initialize anyway.\n", ahc_name(ahc)); } ahc_outb(ahc, HCNTRL, ahc->pause); /* Determine channel configuration */ sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); /* No Twin Channel PCI cards */ if ((ahc->chip & AHC_PCI) != 0) sblkctl &= ~SELBUSB; switch (sblkctl) { case 0: /* Single Narrow Channel */ break; case 2: /* Wide Channel */ ahc->features |= AHC_WIDE; break; case 8: /* Twin Channel */ ahc->features |= AHC_TWIN; break; default: printk(" Unsupported adapter type. Ignoring\n"); return(-1); } /* * Reload sxfrctl1. * * We must always initialize STPWEN to 1 before we * restore the saved values. STPWEN is initialized * to a tri-state condition which can only be cleared * by turning it on. */ if ((ahc->features & AHC_TWIN) != 0) { u_int sblkctl; sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); } ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); error = 0; if (reinit != 0) /* * If a recovery action has forced a chip reset, * re-initialize the chip to our liking. */ error = ahc->bus_chip_init(ahc); #ifdef AHC_DUMP_SEQ else ahc_dumpseq(ahc); #endif return (error); } /* * Determine the number of SCBs available on the controller */ int ahc_probe_scbs(struct ahc_softc *ahc) { int i; for (i = 0; i < AHC_SCB_MAX; i++) { ahc_outb(ahc, SCBPTR, i); ahc_outb(ahc, SCB_BASE, i); if (ahc_inb(ahc, SCB_BASE) != i) break; ahc_outb(ahc, SCBPTR, 0); if (ahc_inb(ahc, SCB_BASE) != 0) break; } return (i); } static void ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { dma_addr_t *baddr; baddr = (dma_addr_t *)arg; *baddr = segs->ds_addr; } static void ahc_build_free_scb_list(struct ahc_softc *ahc) { int scbsize; int i; scbsize = 32; if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) scbsize = 64; for (i = 0; i < ahc->scb_data->maxhscbs; i++) { int j; ahc_outb(ahc, SCBPTR, i); /* * Touch all SCB bytes to avoid parity errors * should one of our debugging routines read * an otherwise uninitiatlized byte. */ for (j = 0; j < scbsize; j++) ahc_outb(ahc, SCB_BASE+j, 0xFF); /* Clear the control byte. */ ahc_outb(ahc, SCB_CONTROL, 0); /* Set the next pointer */ if ((ahc->flags & AHC_PAGESCBS) != 0) ahc_outb(ahc, SCB_NEXT, i+1); else ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); /* Make the tag number, SCSIID, and lun invalid */ ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); ahc_outb(ahc, SCB_SCSIID, 0xFF); ahc_outb(ahc, SCB_LUN, 0xFF); } if ((ahc->flags & AHC_PAGESCBS) != 0) { /* SCB 0 heads the free list. */ ahc_outb(ahc, FREE_SCBH, 0); } else { /* No free list. */ ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); } /* Make sure that the last SCB terminates the free list */ ahc_outb(ahc, SCBPTR, i-1); ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); } static int ahc_init_scbdata(struct ahc_softc *ahc) { struct scb_data *scb_data; scb_data = ahc->scb_data; SLIST_INIT(&scb_data->free_scbs); SLIST_INIT(&scb_data->sg_maps); /* Allocate SCB resources */ scb_data->scbarray = kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC); if (scb_data->scbarray == NULL) return (ENOMEM); memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); /* Determine the number of hardware SCBs and initialize them */ scb_data->maxhscbs = ahc_probe_scbs(ahc); if (ahc->scb_data->maxhscbs == 0) { printk("%s: No SCB space found\n", ahc_name(ahc)); return (ENXIO); } /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for our hardware scb structures */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->hscb_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Allocation for our hscbs */ if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat, (void **)&scb_data->hscbs, BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { goto error_exit; } scb_data->init_level++; /* And permanently map them */ ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, scb_data->hscbs, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); scb_data->init_level++; /* DMA tag for our sense buffers */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sense_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Allocate them */ if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat, (void **)&scb_data->sense, BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { goto error_exit; } scb_data->init_level++; /* And permanently map them */ ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, scb_data->sense, AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); scb_data->init_level++; /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sg_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Perform initial CCB allocation */ memset(scb_data->hscbs, 0, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); ahc_alloc_scbs(ahc); if (scb_data->numscbs == 0) { printk("%s: ahc_init_scbdata - " "Unable to allocate initial scbs\n", ahc_name(ahc)); goto error_exit; } /* * Reserve the next queued SCB. */ ahc->next_queued_scb = ahc_get_scb(ahc); /* * Note that we were successful */ return (0); error_exit: return (ENOMEM); } static void ahc_fini_scbdata(struct ahc_softc *ahc) { struct scb_data *scb_data; scb_data = ahc->scb_data; if (scb_data == NULL) return; switch (scb_data->init_level) { default: case 7: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); ahc_dmamap_unload(ahc, scb_data->sg_dmat, sg_map->sg_dmamap); ahc_dmamem_free(ahc, scb_data->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); kfree(sg_map); } ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); } case 6: ahc_dmamap_unload(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); case 5: ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, scb_data->sense_dmamap); ahc_dmamap_destroy(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); case 4: ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); case 3: ahc_dmamap_unload(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); case 2: ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, scb_data->hscb_dmamap); ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); case 1: ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); break; case 0: break; } if (scb_data->scbarray != NULL) kfree(scb_data->scbarray); } static void ahc_alloc_scbs(struct ahc_softc *ahc) { struct scb_data *scb_data; struct scb *next_scb; struct sg_map_node *sg_map; dma_addr_t physaddr; struct ahc_dma_seg *segs; int newcount; int i; scb_data = ahc->scb_data; if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) /* Can't allocate any more */ return; next_scb = &scb_data->scbarray[scb_data->numscbs]; sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC); if (sg_map == NULL) return; /* Allocate S/G space for the next batch of SCBS */ if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { kfree(sg_map); return; } SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, &sg_map->sg_physaddr, /*flags*/0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); for (i = 0; i < newcount; i++) { struct scb_platform_data *pdata; #ifndef __linux__ int error; #endif pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC); if (pdata == NULL) break; next_scb->platform_data = pdata; next_scb->sg_map = sg_map; next_scb->sg_list = segs; /* * The sequencer always starts with the second entry. * The first entry is embedded in the scb. */ next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); next_scb->ahc_softc = ahc; next_scb->flags = SCB_FREE; #ifndef __linux__ error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0, &next_scb->dmamap); if (error != 0) break; #endif next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; next_scb->hscb->tag = ahc->scb_data->numscbs; SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, next_scb, links.sle); segs += AHC_NSEG; physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); next_scb++; ahc->scb_data->numscbs++; } } void ahc_controller_info(struct ahc_softc *ahc, char *buf) { int len; len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); buf += len; if ((ahc->features & AHC_TWIN) != 0) len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " "B SCSI Id=%d, primary %c, ", ahc->our_id, ahc->our_id_b, (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); else { const char *speed; const char *type; speed = ""; if ((ahc->features & AHC_ULTRA) != 0) { speed = "Ultra "; } else if ((ahc->features & AHC_DT) != 0) { speed = "Ultra160 "; } else if ((ahc->features & AHC_ULTRA2) != 0) { speed = "Ultra2 "; } if ((ahc->features & AHC_WIDE) != 0) { type = "Wide"; } else { type = "Single"; } len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ", speed, type, ahc->channel, ahc->our_id); } buf += len; if ((ahc->flags & AHC_PAGESCBS) != 0) sprintf(buf, "%d/%d SCBs", ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); else sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); } int ahc_chip_init(struct ahc_softc *ahc) { int term; int error; u_int i; u_int scsi_conf; u_int scsiseq_template; uint32_t physaddr; ahc_outb(ahc, SEQ_FLAGS, 0); ahc_outb(ahc, SEQ_FLAGS2, 0); /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ if (ahc->features & AHC_TWIN) { /* * Setup Channel B first. */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; ahc_outb(ahc, SCSIID, ahc->our_id_b); scsi_conf = ahc_inb(ahc, SCSICONF + 1); ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); /* Select Channel A */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); } term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); else ahc_outb(ahc, SCSIID, ahc->our_id); scsi_conf = ahc_inb(ahc, SCSICONF); ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) |term|ahc->seltime |ENSTIMER|ACTNEGEN); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); /* There are no untagged SCBs active yet. */ for (i = 0; i < 16; i++) { ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); if ((ahc->flags & AHC_SCB_BTT) != 0) { int lun; /* * The SCB based BTT allows an entry per * target and lun pair. */ for (lun = 1; lun < AHC_NUM_LUNS; lun++) ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); } } /* All of our queues are empty */ for (i = 0; i < 256; i++) ahc->qoutfifo[i] = SCB_LIST_NULL; ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); for (i = 0; i < 256; i++) ahc->qinfifo[i] = SCB_LIST_NULL; if ((ahc->features & AHC_MULTI_TID) != 0) { ahc_outb(ahc, TARGID, 0); ahc_outb(ahc, TARGID + 1, 0); } /* * Tell the sequencer where it can find our arrays in memory. */ physaddr = ahc->scb_data->hscb_busaddr; ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); physaddr = ahc->shared_data_busaddr; ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); /* * Initialize the group code to command length table. * This overrides the values in TARG_SCSIRATE, so only * setup the table after we have processed that information. */ ahc_outb(ahc, CMDSIZE_TABLE, 5); ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); if ((ahc->features & AHC_HS_MAILBOX) != 0) ahc_outb(ahc, HS_MAILBOX, 0); /* Tell the sequencer of our initial queue positions */ if ((ahc->features & AHC_TARGETMODE) != 0) { ahc->tqinfifonext = 1; ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); } ahc->qinfifonext = 0; ahc->qoutfifonext = 0; if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext); ahc_outb(ahc, SDSCB_QOFF, 0); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); ahc_outb(ahc, QINPOS, ahc->qinfifonext); ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext); } /* We don't have any waiting selections */ ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); /* Our disconnection list is empty too */ ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); /* Message out buffer starts empty */ ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* * Setup the allowed SCSI Sequences based on operational mode. * If we are a target, we'll enable select in operations once * we've had a lun enabled. */ scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; if ((ahc->flags & AHC_INITIATORROLE) != 0) scsiseq_template |= ENRSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); /* Initialize our list of free SCBs. */ ahc_build_free_scb_list(ahc); /* * Tell the sequencer which SCB will be the next one it receives. */ ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); /* * Load the Sequencer program and Enable the adapter * in "fast" mode. */ if (bootverbose) printk("%s: Downloading Sequencer Program...", ahc_name(ahc)); error = ahc_loadseq(ahc); if (error != 0) return (error); if ((ahc->features & AHC_ULTRA2) != 0) { int wait; /* * Wait for up to 500ms for our transceivers * to settle. If the adapter does not have * a cable attached, the transceivers may * never settle, so don't complain if we * fail here. */ for (wait = 5000; (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; wait--) ahc_delay(100); } ahc_restart(ahc); return (0); } /* * Start the board, ready for normal operation */ int ahc_init(struct ahc_softc *ahc) { int max_targ; u_int i; u_int scsi_conf; u_int ultraenb; u_int discenable; u_int tagenable; size_t driver_data_size; #ifdef AHC_DEBUG if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0) ahc->flags |= AHC_SEQUENCER_DEBUG; #endif #ifdef AHC_PRINT_SRAM printk("Scratch Ram:"); for (i = 0x20; i < 0x5f; i++) { if (((i % 8) == 0) && (i != 0)) { printk ("\n "); } printk (" 0x%x", ahc_inb(ahc, i)); } if ((ahc->features & AHC_MORE_SRAM) != 0) { for (i = 0x70; i < 0x7f; i++) { if (((i % 8) == 0) && (i != 0)) { printk ("\n "); } printk (" 0x%x", ahc_inb(ahc, i)); } } printk ("\n"); /* * Reading uninitialized scratch ram may * generate parity errors. */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); #endif max_targ = 15; /* * Assume we have a board at this stage and it has been reset. */ if ((ahc->flags & AHC_USEDEFAULTS) != 0) ahc->our_id = ahc->our_id_b = 7; /* * Default to allowing initiator operations. */ ahc->flags |= AHC_INITIATORROLE; /* * Only allow target mode features if this unit has them enabled. */ if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) ahc->features &= ~AHC_TARGETMODE; #ifndef __linux__ /* DMA tag for mapping buffers into device visible space. */ if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING ? (dma_addr_t)0x7FFFFFFFFFULL : BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE, /*nsegments*/AHC_NSEG, /*maxsegsz*/AHC_MAXTRANSFER_SIZE, /*flags*/BUS_DMA_ALLOCNOW, &ahc->buffer_dmat) != 0) { return (ENOMEM); } #endif ahc->init_level++; /* * DMA tag for our command fifos and other data in system memory * the card's sequencer must be able to access. For initiator * roles, we need to allocate space for the qinfifo and qoutfifo. * The qinfifo and qoutfifo are composed of 256 1 byte elements. * When providing for the target mode role, we must additionally * provide space for the incoming target command fifo and an extra * byte to deal with a dma bug in some chip versions. */ driver_data_size = 2 * 256 * sizeof(uint8_t); if ((ahc->features & AHC_TARGETMODE) != 0) driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) + /*DMA WideOdd Bug Buffer*/1; if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, driver_data_size, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ahc->shared_data_dmat) != 0) { return (ENOMEM); } ahc->init_level++; /* Allocation of driver data */ if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat, (void **)&ahc->qoutfifo, BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { return (ENOMEM); } ahc->init_level++; /* And permanently map it in */ ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, &ahc->shared_data_busaddr, /*flags*/0); if ((ahc->features & AHC_TARGETMODE) != 0) { ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; ahc->dma_bug_buf = ahc->shared_data_busaddr + driver_data_size - 1; /* All target command blocks start out invalid. */ for (i = 0; i < AHC_TMODE_CMDS; i++) ahc->targetcmds[i].cmd_valid = 0; ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; } ahc->qinfifo = &ahc->qoutfifo[256]; ahc->init_level++; /* Allocate SCB data now that buffer_dmat is initialized */ if (ahc->scb_data->maxhscbs == 0) if (ahc_init_scbdata(ahc) != 0) return (ENOMEM); /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { printk("%s: unable to allocate ahc_tmode_tstate. " "Failing attach\n", ahc_name(ahc)); return (ENOMEM); } if ((ahc->features & AHC_TWIN) != 0) { if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { printk("%s: unable to allocate ahc_tmode_tstate. " "Failing attach\n", ahc_name(ahc)); return (ENOMEM); } } if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { ahc->flags |= AHC_PAGESCBS; } else { ahc->flags &= ~AHC_PAGESCBS; } #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_MISC) { printk("%s: hardware scb %u bytes; kernel scb %u bytes; " "ahc_dma %u bytes\n", ahc_name(ahc), (u_int)sizeof(struct hardware_scb), (u_int)sizeof(struct scb), (u_int)sizeof(struct ahc_dma_seg)); } #endif /* AHC_DEBUG */ /* * Look at the information that board initialization or * the board bios has left us. */ if (ahc->features & AHC_TWIN) { scsi_conf = ahc_inb(ahc, SCSICONF + 1); if ((scsi_conf & RESET_SCSI) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) ahc->flags |= AHC_RESET_BUS_B; } scsi_conf = ahc_inb(ahc, SCSICONF); if ((scsi_conf & RESET_SCSI) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) ahc->flags |= AHC_RESET_BUS_A; ultraenb = 0; tagenable = ALL_TARGETS_MASK; /* Grab the disconnection disable table and invert it for our needs */ if ((ahc->flags & AHC_USEDEFAULTS) != 0) { printk("%s: Host Adapter Bios disabled. Using default SCSI " "device parameters\n", ahc_name(ahc)); ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| AHC_TERM_ENB_A|AHC_TERM_ENB_B; discenable = ALL_TARGETS_MASK; if ((ahc->features & AHC_ULTRA) != 0) ultraenb = ALL_TARGETS_MASK; } else { discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) | ahc_inb(ahc, DISC_DSB)); if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) | ahc_inb(ahc, ULTRA_ENB); } if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) max_targ = 7; for (i = 0; i <= max_targ; i++) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int our_id; u_int target_id; char channel; channel = 'A'; our_id = ahc->our_id; target_id = i; if (i > 7 && (ahc->features & AHC_TWIN) != 0) { channel = 'B'; our_id = ahc->our_id_b; target_id = i % 8; } tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id, &tstate); /* Default to async narrow across the board */ memset(tinfo, 0, sizeof(*tinfo)); if (ahc->flags & AHC_USEDEFAULTS) { if ((ahc->features & AHC_WIDE) != 0) tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; /* * These will be truncated when we determine the * connection type we have with the target. */ tinfo->user.period = ahc_syncrates->period; tinfo->user.offset = MAX_OFFSET; } else { u_int scsirate; uint16_t mask; /* Take the settings leftover in scratch RAM. */ scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); mask = (0x01 << i); if ((ahc->features & AHC_ULTRA2) != 0) { u_int offset; u_int maxsync; if ((scsirate & SOFS) == 0x0F) { /* * Haven't negotiated yet, * so the format is different. */ scsirate = (scsirate & SXFR) >> 4 | (ultraenb & mask) ? 0x08 : 0x0 | (scsirate & WIDEXFER); offset = MAX_OFFSET_ULTRA2; } else offset = ahc_inb(ahc, TARG_OFFSET + i); if ((scsirate & ~WIDEXFER) == 0 && offset != 0) /* Set to the lowest sync rate, 5MHz */ scsirate |= 0x1c; maxsync = AHC_SYNCRATE_ULTRA2; if ((ahc->features & AHC_DT) != 0) maxsync = AHC_SYNCRATE_DT; tinfo->user.period = ahc_find_period(ahc, scsirate, maxsync); if (offset == 0) tinfo->user.period = 0; else tinfo->user.offset = MAX_OFFSET; if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ && (ahc->features & AHC_DT) != 0) tinfo->user.ppr_options = MSG_EXT_PPR_DT_REQ; } else if ((scsirate & SOFS) != 0) { if ((scsirate & SXFR) == 0x40 && (ultraenb & mask) != 0) { /* Treat 10MHz as a non-ultra speed */ scsirate &= ~SXFR; ultraenb &= ~mask; } tinfo->user.period = ahc_find_period(ahc, scsirate, (ultraenb & mask) ? AHC_SYNCRATE_ULTRA : AHC_SYNCRATE_FAST); if (tinfo->user.period != 0) tinfo->user.offset = MAX_OFFSET; } if (tinfo->user.period == 0) tinfo->user.offset = 0; if ((scsirate & WIDEXFER) != 0 && (ahc->features & AHC_WIDE) != 0) tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; tinfo->user.protocol_version = 4; if ((ahc->features & AHC_DT) != 0) tinfo->user.transport_version = 3; else tinfo->user.transport_version = 2; tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; } tstate->ultraenb = 0; } ahc->user_discenable = discenable; ahc->user_tagenable = tagenable; return (ahc->bus_chip_init(ahc)); } void ahc_intr_enable(struct ahc_softc *ahc, int enable) { u_int hcntrl; hcntrl = ahc_inb(ahc, HCNTRL); hcntrl &= ~INTEN; ahc->pause &= ~INTEN; ahc->unpause &= ~INTEN; if (enable) { hcntrl |= INTEN; ahc->pause |= INTEN; ahc->unpause |= INTEN; } ahc_outb(ahc, HCNTRL, hcntrl); } /* * Ensure that the card is paused in a location * outside of all critical sections and that all * pending work is completed prior to returning. * This routine should only be called from outside * an interrupt context. */ void ahc_pause_and_flushwork(struct ahc_softc *ahc) { int intstat; int maxloops; int paused; maxloops = 1000; ahc->flags |= AHC_ALL_INTERRUPTS; paused = FALSE; do { if (paused) { ahc_unpause(ahc); /* * Give the sequencer some time to service * any active selections. */ ahc_delay(500); } ahc_intr(ahc); ahc_pause(ahc); paused = TRUE; ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); intstat = ahc_inb(ahc, INTSTAT); if ((intstat & INT_PEND) == 0) { ahc_clear_critical_section(ahc); intstat = ahc_inb(ahc, INTSTAT); } } while (--maxloops && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0) && ((intstat & INT_PEND) != 0 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0)); if (maxloops == 0) { printk("Infinite interrupt loop, INTSTAT = %x", ahc_inb(ahc, INTSTAT)); } ahc_platform_flushwork(ahc); ahc->flags &= ~AHC_ALL_INTERRUPTS; } #ifdef CONFIG_PM int ahc_suspend(struct ahc_softc *ahc) { ahc_pause_and_flushwork(ahc); if (LIST_FIRST(&ahc->pending_scbs) != NULL) { ahc_unpause(ahc); return (EBUSY); } #ifdef AHC_TARGET_MODE /* * XXX What about ATIOs that have not yet been serviced? * Perhaps we should just refuse to be suspended if we * are acting in a target role. */ if (ahc->pending_device != NULL) { ahc_unpause(ahc); return (EBUSY); } #endif ahc_shutdown(ahc); return (0); } int ahc_resume(struct ahc_softc *ahc) { ahc_reset(ahc, /*reinit*/TRUE); ahc_intr_enable(ahc, TRUE); ahc_restart(ahc); return (0); } #endif /************************** Busy Target Table *********************************/ /* * Return the untagged transaction id for a given target/channel lun. * Optionally, clear the entry. */ static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) { u_int scbid; u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); } return (scbid); } static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) { u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); } } static void ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) { u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); } } /************************** SCB and SCB queue management **********************/ int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role) { int targ = SCB_GET_TARGET(ahc, scb); char chan = SCB_GET_CHANNEL(ahc, scb); int slun = SCB_GET_LUN(scb); int match; match = ((chan == channel) || (channel == ALL_CHANNELS)); if (match != 0) match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); if (match != 0) match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); if (match != 0) { #ifdef AHC_TARGET_MODE int group; group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); if (role == ROLE_INITIATOR) { match = (group != XPT_FC_GROUP_TMODE) && ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); } else if (role == ROLE_TARGET) { match = (group == XPT_FC_GROUP_TMODE) && ((tag == scb->io_ctx->csio.tag_id) || (tag == SCB_LIST_NULL)); } #else /* !AHC_TARGET_MODE */ match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); #endif /* AHC_TARGET_MODE */ } return match; } static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) { int target; char channel; int lun; target = SCB_GET_TARGET(ahc, scb); lun = SCB_GET_LUN(scb); channel = SCB_GET_CHANNEL(ahc, scb); ahc_search_qinfifo(ahc, target, channel, lun, /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahc_platform_freeze_devq(ahc, scb); } void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) { struct scb *prev_scb; prev_scb = NULL; if (ahc_qinfifo_count(ahc) != 0) { u_int prev_tag; uint8_t prev_pos; prev_pos = ahc->qinfifonext - 1; prev_tag = ahc->qinfifo[prev_pos]; prev_scb = ahc_lookup_scb(ahc, prev_tag); } ahc_qinfifo_requeue(ahc, prev_scb, scb); if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); } } static void ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, struct scb *scb) { if (prev_scb == NULL) { ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); } else { prev_scb->hscb->next = scb->hscb->tag; ahc_sync_scb(ahc, prev_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; scb->hscb->next = ahc->next_queued_scb->hscb->tag; ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } static int ahc_qinfifo_count(struct ahc_softc *ahc) { uint8_t qinpos; uint8_t diff; if ((ahc->features & AHC_QUEUE_REGS) != 0) { qinpos = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinpos); } else qinpos = ahc_inb(ahc, QINPOS); diff = ahc->qinfifonext - qinpos; return (diff); } int ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahc_search_action action) { struct scb *scb; struct scb *prev_scb; uint8_t qinstart; uint8_t qinpos; uint8_t qintail; uint8_t next; uint8_t prev; uint8_t curscbptr; int found; int have_qregs; qintail = ahc->qinfifonext; have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; if (have_qregs) { qinstart = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinstart); } else qinstart = ahc_inb(ahc, QINPOS); qinpos = qinstart; found = 0; prev_scb = NULL; if (action == SEARCH_COMPLETE) { /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); } /* * Start with an empty queue. Entries that are not chosen * for removal will be re-added to the queue as we go. */ ahc->qinfifonext = qinpos; ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); while (qinpos != qintail) { scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); if (scb == NULL) { printk("qinpos = %d, SCB index = %d\n", qinpos, ahc->qinfifo[qinpos]); panic("Loop 1\n"); } if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = ahc_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scb, status); cstat = ahc_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahc_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in qinfifo\n"); ahc_done(ahc, scb); /* FALLTHROUGH */ } case SEARCH_REMOVE: break; case SEARCH_COUNT: ahc_qinfifo_requeue(ahc, prev_scb, scb); prev_scb = scb; break; } } else { ahc_qinfifo_requeue(ahc, prev_scb, scb); prev_scb = scb; } qinpos++; } if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); } if (action != SEARCH_COUNT && (found != 0) && (qinstart != ahc->qinfifonext)) { /* * The sequencer may be in the process of dmaing * down the SCB at the beginning of the queue. * This could be problematic if either the first, * or the second SCB is removed from the queue * (the first SCB includes a pointer to the "next" * SCB to dma). If we have removed any entries, swap * the first element in the queue with the next HSCB * so the sequencer will notice that NEXT_QUEUED_SCB * has changed during its dma attempt and will retry * the DMA. */ scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); if (scb == NULL) { printk("found = %d, qinstart = %d, qinfifionext = %d\n", found, qinstart, ahc->qinfifonext); panic("First/Second Qinfifo fixup\n"); } /* * ahc_swap_with_next_hscb forces our next pointer to * point to the reserved SCB for future commands. Save * and restore our original next pointer to maintain * queue integrity. */ next = scb->hscb->next; ahc->scb_data->scbindex[scb->hscb->tag] = NULL; ahc_swap_with_next_hscb(ahc, scb); scb->hscb->next = next; ahc->qinfifo[qinstart] = scb->hscb->tag; /* Tell the card about the new head of the qinfifo. */ ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); /* Fixup the tail "next" pointer. */ qintail = ahc->qinfifonext - 1; scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); scb->hscb->next = ahc->next_queued_scb->hscb->tag; } /* * Search waiting for selection list. */ curscbptr = ahc_inb(ahc, SCBPTR); next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ prev = SCB_LIST_NULL; while (next != SCB_LIST_NULL) { uint8_t scb_index; ahc_outb(ahc, SCBPTR, next); scb_index = ahc_inb(ahc, SCB_TAG); if (scb_index >= ahc->scb_data->numscbs) { printk("Waiting List inconsistency. " "SCB index == %d, yet numscbs == %d.", scb_index, ahc->scb_data->numscbs); ahc_dump_card_state(ahc); panic("for safety"); } scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printk("scb_index = %d, next = %d\n", scb_index, next); panic("Waiting List traversal\n"); } if (ahc_match_scb(ahc, scb, target, channel, lun, SCB_LIST_NULL, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = ahc_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scb, status); cstat = ahc_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahc_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in Waiting List\n"); ahc_done(ahc, scb); /* FALLTHROUGH */ } case SEARCH_REMOVE: next = ahc_rem_wscb(ahc, next, prev); break; case SEARCH_COUNT: prev = next; next = ahc_inb(ahc, SCB_NEXT); break; } } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } } ahc_outb(ahc, SCBPTR, curscbptr); found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target, channel, lun, status, action); if (action == SEARCH_COMPLETE) ahc_release_untagged_queues(ahc); return (found); } int ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx, int target, char channel, int lun, uint32_t status, ahc_search_action action) { struct scb *scb; int maxtarget; int found; int i; if (action == SEARCH_COMPLETE) { /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); } found = 0; i = 0; if ((ahc->flags & AHC_SCB_BTT) == 0) { maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } } else { maxtarget = 0; } for (; i < maxtarget; i++) { struct scb_tailq *untagged_q; struct scb *next_scb; untagged_q = &(ahc->untagged_queues[i]); next_scb = TAILQ_FIRST(untagged_q); while (next_scb != NULL) { scb = next_scb; next_scb = TAILQ_NEXT(scb, links.tqe); /* * The head of the list may be the currently * active untagged command for a device. * We're only searching for commands that * have not been started. A transaction * marked active but still in the qinfifo * is removed by the qinfifo scanning code * above. */ if ((scb->flags & SCB_ACTIVE) != 0) continue; if (ahc_match_scb(ahc, scb, target, channel, lun, SCB_LIST_NULL, ROLE_INITIATOR) == 0 || (ctx != NULL && ctx != scb->io_ctx)) continue; /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = ahc_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scb, status); cstat = ahc_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahc_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in untaggedQ\n"); ahc_done(ahc, scb); break; } case SEARCH_REMOVE: scb->flags &= ~SCB_UNTAGGEDQ; TAILQ_REMOVE(untagged_q, scb, links.tqe); break; case SEARCH_COUNT: break; } } } if (action == SEARCH_COMPLETE) ahc_release_untagged_queues(ahc); return (found); } int ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, int stop_on_first, int remove, int save_state) { struct scb *scbp; u_int next; u_int prev; u_int count; u_int active_scb; count = 0; next = ahc_inb(ahc, DISCONNECTED_SCBH); prev = SCB_LIST_NULL; if (save_state) { /* restore this when we're done */ active_scb = ahc_inb(ahc, SCBPTR); } else /* Silence compiler */ active_scb = SCB_LIST_NULL; while (next != SCB_LIST_NULL) { u_int scb_index; ahc_outb(ahc, SCBPTR, next); scb_index = ahc_inb(ahc, SCB_TAG); if (scb_index >= ahc->scb_data->numscbs) { printk("Disconnected List inconsistency. " "SCB index == %d, yet numscbs == %d.", scb_index, ahc->scb_data->numscbs); ahc_dump_card_state(ahc); panic("for safety"); } if (next == prev) { panic("Disconnected List Loop. " "cur SCBPTR == %x, prev SCBPTR == %x.", next, prev); } scbp = ahc_lookup_scb(ahc, scb_index); if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, ROLE_INITIATOR)) { count++; if (remove) { next = ahc_rem_scb_from_disc_list(ahc, prev, next); } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } if (stop_on_first) break; } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } } if (save_state) ahc_outb(ahc, SCBPTR, active_scb); return (count); } /* * Remove an SCB from the on chip list of disconnected transactions. * This is empty/unused if we are not performing SCB paging. */ static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) { u_int next; ahc_outb(ahc, SCBPTR, scbptr); next = ahc_inb(ahc, SCB_NEXT); ahc_outb(ahc, SCB_CONTROL, 0); ahc_add_curscb_to_free_list(ahc); if (prev != SCB_LIST_NULL) { ahc_outb(ahc, SCBPTR, prev); ahc_outb(ahc, SCB_NEXT, next); } else ahc_outb(ahc, DISCONNECTED_SCBH, next); return (next); } /* * Add the SCB as selected by SCBPTR onto the on chip list of * free hardware SCBs. This list is empty/unused if we are not * performing SCB paging. */ static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc) { /* * Invalidate the tag so that our abort * routines don't think it's active. */ ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); if ((ahc->flags & AHC_PAGESCBS) != 0) { ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); } } /* * Manipulate the waiting for selection list and return the * scb that follows the one that we remove. */ static u_int ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) { u_int curscb, next; /* * Select the SCB we want to abort and * pull the next pointer out of it. */ curscb = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, scbpos); next = ahc_inb(ahc, SCB_NEXT); /* Clear the necessary fields */ ahc_outb(ahc, SCB_CONTROL, 0); ahc_add_curscb_to_free_list(ahc); /* update the waiting list */ if (prev == SCB_LIST_NULL) { /* First in the list */ ahc_outb(ahc, WAITING_SCBH, next); /* * Ensure we aren't attempting to perform * selection for this entry. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else { /* * Select the scb that pointed to us * and update its next pointer. */ ahc_outb(ahc, SCBPTR, prev); ahc_outb(ahc, SCB_NEXT, next); } /* * Point us back at the original scb position. */ ahc_outb(ahc, SCBPTR, curscb); return next; } /******************************** Error Handling ******************************/ /* * Abort all SCBs that match the given description (target/channel/lun/tag), * setting their status to the passed in status if the status has not already * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer * is paused before it is called. */ static int ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { struct scb *scbp; struct scb *scbp_next; u_int active_scb; int i, j; int maxtarget; int minlun; int maxlun; int found; /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); /* restore this when we're done */ active_scb = ahc_inb(ahc, SCBPTR); found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); /* * Clean out the busy target table for any untagged commands. */ i = 0; maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } if (lun == CAM_LUN_WILDCARD) { /* * Unless we are using an SCB based * busy targets table, there is only * one table entry for all luns of * a target. */ minlun = 0; maxlun = 1; if ((ahc->flags & AHC_SCB_BTT) != 0) maxlun = AHC_NUM_LUNS; } else { minlun = lun; maxlun = lun + 1; } if (role != ROLE_TARGET) { for (;i < maxtarget; i++) { for (j = minlun;j < maxlun; j++) { u_int scbid; u_int tcl; tcl = BUILD_TCL(i << 4, j); scbid = ahc_index_busy_tcl(ahc, tcl); scbp = ahc_lookup_scb(ahc, scbid); if (scbp == NULL || ahc_match_scb(ahc, scbp, target, channel, lun, tag, role) == 0) continue; ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); } } /* * Go through the disconnected list and remove any entries we * have queued for completion, 0'ing their control byte too. * We save the active SCB and restore it ourselves, so there * is no reason for this search to restore it too. */ ahc_search_disc_list(ahc, target, channel, lun, tag, /*stop_on_first*/FALSE, /*remove*/TRUE, /*save_state*/FALSE); } /* * Go through the hardware SCB array looking for commands that * were active but not on any list. In some cases, these remnants * might not still have mappings in the scbindex array (e.g. unexpected * bus free with the same scb queued for an abort). Don't hold this * against them. */ for (i = 0; i < ahc->scb_data->maxhscbs; i++) { u_int scbid; ahc_outb(ahc, SCBPTR, i); scbid = ahc_inb(ahc, SCB_TAG); scbp = ahc_lookup_scb(ahc, scbid); if ((scbp == NULL && scbid != SCB_LIST_NULL) || (scbp != NULL && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) ahc_add_curscb_to_free_list(ahc); } /* * Go through the pending CCB list and look for * commands for this target that are still active. * These are other tagged commands that were * disconnected when the reset occurred. */ scbp_next = LIST_FIRST(&ahc->pending_scbs); while (scbp_next != NULL) { scbp = scbp_next; scbp_next = LIST_NEXT(scbp, pending_links); if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { cam_status ostat; ostat = ahc_get_transaction_status(scbp); if (ostat == CAM_REQ_INPROG) ahc_set_transaction_status(scbp, status); if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) ahc_freeze_scb(scbp); if ((scbp->flags & SCB_ACTIVE) == 0) printk("Inactive SCB on pending list\n"); ahc_done(ahc, scbp); found++; } } ahc_outb(ahc, SCBPTR, active_scb); ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); ahc_release_untagged_queues(ahc); return found; } static void ahc_reset_current_bus(struct ahc_softc *ahc) { uint8_t scsiseq; ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); scsiseq = ahc_inb(ahc, SCSISEQ); ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); ahc_flush_device_writes(ahc); ahc_delay(AHC_BUSRESET_DELAY); /* Turn off the bus reset */ ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); ahc_clear_intstat(ahc); /* Re-enable reset interrupts */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); } int ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) { struct ahc_devinfo devinfo; u_int initiator, target, max_scsiid; u_int sblkctl; u_int scsiseq; u_int simode1; int found; int restart_needed; char cur_channel; ahc->pending_device = NULL; ahc_compile_devinfo(&devinfo, CAM_TARGET_WILDCARD, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahc_pause(ahc); /* Make sure the sequencer is in a safe location. */ ahc_clear_critical_section(ahc); /* * Run our command complete fifos to ensure that we perform * completion processing on any commands that 'completed' * before the reset occurred. */ ahc_run_qoutfifo(ahc); #ifdef AHC_TARGET_MODE /* * XXX - In Twin mode, the tqinfifo may have commands * for an unaffected channel in it. However, if * we have run out of ATIO resources to drain that * queue, we may not get them all out here. Further, * the blocked transactions for the reset channel * should just be killed off, irrespecitve of whether * we are blocked on ATIO resources. Write a routine * to compact the tqinfifo appropriately. */ if ((ahc->flags & AHC_TARGETROLE) != 0) { ahc_run_tqinfifo(ahc, /*paused*/TRUE); } #endif /* * Reset the bus if we are initiating this reset */ sblkctl = ahc_inb(ahc, SBLKCTL); cur_channel = 'A'; if ((ahc->features & AHC_TWIN) != 0 && ((sblkctl & SELBUSB) != 0)) cur_channel = 'B'; scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); if (cur_channel != channel) { /* Case 1: Command for another bus is active * Stealthily reset the other bus without * upsetting the current bus. */ ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); #ifdef AHC_TARGET_MODE /* * Bus resets clear ENSELI, so we cannot * defer re-enabling bus reset interrupts * if we are in target mode. */ if ((ahc->flags & AHC_TARGETROLE) != 0) simode1 |= ENSCSIRST; #endif ahc_outb(ahc, SIMODE1, simode1); if (initiate_reset) ahc_reset_current_bus(ahc); ahc_clear_intstat(ahc); ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); ahc_outb(ahc, SBLKCTL, sblkctl); restart_needed = FALSE; } else { /* Case 2: A command from this bus is active or we're idle */ simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); #ifdef AHC_TARGET_MODE /* * Bus resets clear ENSELI, so we cannot * defer re-enabling bus reset interrupts * if we are in target mode. */ if ((ahc->flags & AHC_TARGETROLE) != 0) simode1 |= ENSCSIRST; #endif ahc_outb(ahc, SIMODE1, simode1); if (initiate_reset) ahc_reset_current_bus(ahc); ahc_clear_intstat(ahc); ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); restart_needed = TRUE; } /* * Clean up all the state information for the * pending transactions on this bus. */ found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; #ifdef AHC_TARGET_MODE /* * Send an immediate notify ccb to all target more peripheral * drivers affected by this action. */ for (target = 0; target <= max_scsiid; target++) { struct ahc_tmode_tstate* tstate; u_int lun; tstate = ahc->enabled_targets[target]; if (tstate == NULL) continue; for (lun = 0; lun < AHC_NUM_LUNS; lun++) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, EVENT_TYPE_BUS_RESET, /*arg*/0); ahc_send_lstate_events(ahc, lstate); } } #endif /* Notify the XPT that a bus reset occurred */ ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AC_BUS_RESET); /* * Revert to async/narrow transfers until we renegotiate. */ for (target = 0; target <= max_scsiid; target++) { if (ahc->enabled_targets[target] == NULL) continue; for (initiator = 0; initiator <= max_scsiid; initiator++) { struct ahc_devinfo devinfo; ahc_compile_devinfo(&devinfo, target, initiator, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR, /*paused*/TRUE); ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR, /*paused*/TRUE); } } if (restart_needed) ahc_restart(ahc); else ahc_unpause(ahc); return found; } /***************************** Residual Processing ****************************/ /* * Calculate the residual for a just completed SCB. */ static void ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *hscb; struct status_pkt *spkt; uint32_t sgptr; uint32_t resid_sgptr; uint32_t resid; /* * 5 cases. * 1) No residual. * SG_RESID_VALID clear in sgptr. * 2) Transferless command * 3) Never performed any transfers. * sgptr has SG_FULL_RESID set. * 4) No residual but target did not * save data pointers after the * last transfer, so sgptr was * never updated. * 5) We have a partial residual. * Use residual_sgptr to determine * where we are. */ hscb = scb->hscb; sgptr = ahc_le32toh(hscb->sgptr); if ((sgptr & SG_RESID_VALID) == 0) /* Case 1 */ return; sgptr &= ~SG_RESID_VALID; if ((sgptr & SG_LIST_NULL) != 0) /* Case 2 */ return; spkt = &hscb->shared_data.status; resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); if ((sgptr & SG_FULL_RESID) != 0) { /* Case 3 */ resid = ahc_get_transfer_length(scb); } else if ((resid_sgptr & SG_LIST_NULL) != 0) { /* Case 4 */ return; } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); } else { struct ahc_dma_seg *sg; /* * Remainder of the SG where the transfer * stopped. */ resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); /* The residual sg_ptr always points to the next sg */ sg--; /* * Add up the contents of all residual * SG segments that are after the SG where * the transfer stopped. */ while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { sg++; resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; } } if ((scb->flags & SCB_SENSE) == 0) ahc_set_residual(scb, resid); else ahc_set_sense_residual(scb, resid); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MISC) != 0) { ahc_print_path(ahc, scb); printk("Handled %sResidual of %d bytes\n", (scb->flags & SCB_SENSE) ? "Sense " : "", resid); } #endif } /******************************* Target Mode **********************************/ #ifdef AHC_TARGET_MODE /* * Add a target mode event to this lun's queue */ static void ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg) { struct ahc_tmode_event *event; int pending; xpt_freeze_devq(lstate->path, /*count*/1); if (lstate->event_w_idx >= lstate->event_r_idx) pending = lstate->event_w_idx - lstate->event_r_idx; else pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 - (lstate->event_r_idx - lstate->event_w_idx); if (event_type == EVENT_TYPE_BUS_RESET || event_type == MSG_BUS_DEV_RESET) { /* * Any earlier events are irrelevant, so reset our buffer. * This has the effect of allowing us to deal with reset * floods (an external device holding down the reset line) * without losing the event that is really interesting. */ lstate->event_r_idx = 0; lstate->event_w_idx = 0; xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); } if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { xpt_print_path(lstate->path); printk("immediate event %x:%x lost\n", lstate->event_buffer[lstate->event_r_idx].event_type, lstate->event_buffer[lstate->event_r_idx].event_arg); lstate->event_r_idx++; if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); } event = &lstate->event_buffer[lstate->event_w_idx]; event->initiator_id = initiator_id; event->event_type = event_type; event->event_arg = event_arg; lstate->event_w_idx++; if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_w_idx = 0; } /* * Send any target mode events queued up waiting * for immediate notify resources. */ void ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) { struct ccb_hdr *ccbh; struct ccb_immed_notify *inot; while (lstate->event_r_idx != lstate->event_w_idx && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { struct ahc_tmode_event *event; event = &lstate->event_buffer[lstate->event_r_idx]; SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); inot = (struct ccb_immed_notify *)ccbh; switch (event->event_type) { case EVENT_TYPE_BUS_RESET: ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; break; default: ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; inot->message_args[0] = event->event_type; inot->message_args[1] = event->event_arg; break; } inot->initiator_id = event->initiator_id; inot->sense_len = 0; xpt_done((union ccb *)inot); lstate->event_r_idx++; if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; } } #endif /******************** Sequencer Program Patching/Download *********************/ #ifdef AHC_DUMP_SEQ void ahc_dumpseq(struct ahc_softc* ahc) { int i; ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); for (i = 0; i < ahc->instruction_ram_size; i++) { uint8_t ins_bytes[4]; ahc_insb(ahc, SEQRAM, ins_bytes, 4); printk("0x%08x\n", ins_bytes[0] << 24 | ins_bytes[1] << 16 | ins_bytes[2] << 8 | ins_bytes[3]); } } #endif static int ahc_loadseq(struct ahc_softc *ahc) { struct cs cs_table[num_critical_sections]; u_int begin_set[num_critical_sections]; u_int end_set[num_critical_sections]; const struct patch *cur_patch; u_int cs_count; u_int cur_cs; u_int i; u_int skip_addr; u_int sg_prefetch_cnt; int downloaded; uint8_t download_consts[7]; /* * Start out with 0 critical sections * that apply to this firmware load. */ cs_count = 0; cur_cs = 0; memset(begin_set, 0, sizeof(begin_set)); memset(end_set, 0, sizeof(end_set)); /* Setup downloadable constant table */ download_consts[QOUTFIFO_OFFSET] = 0; if (ahc->targetcmds != NULL) download_consts[QOUTFIFO_OFFSET] += 32; download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); sg_prefetch_cnt = ahc->pci_cachesize; if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); cur_patch = patches; downloaded = 0; skip_addr = 0; ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); for (i = 0; i < sizeof(seqprog)/4; i++) { if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { /* * Don't download this instruction as it * is in a patch that was removed. */ continue; } if (downloaded == ahc->instruction_ram_size) { /* * We're about to exceed the instruction * storage capacity for this chip. Fail * the load. */ printk("\n%s: Program too large for instruction memory " "size of %d!\n", ahc_name(ahc), ahc->instruction_ram_size); return (ENOMEM); } /* * Move through the CS table until we find a CS * that might apply to this instruction. */ for (; cur_cs < num_critical_sections; cur_cs++) { if (critical_sections[cur_cs].end <= i) { if (begin_set[cs_count] == TRUE && end_set[cs_count] == FALSE) { cs_table[cs_count].end = downloaded; end_set[cs_count] = TRUE; cs_count++; } continue; } if (critical_sections[cur_cs].begin <= i && begin_set[cs_count] == FALSE) { cs_table[cs_count].begin = downloaded; begin_set[cs_count] = TRUE; } break; } ahc_download_instr(ahc, i, download_consts); downloaded++; } ahc->num_critical_sections = cs_count; if (cs_count != 0) { cs_count *= sizeof(struct cs); ahc->critical_sections = kmalloc(cs_count, GFP_ATOMIC); if (ahc->critical_sections == NULL) panic("ahc_loadseq: Could not malloc"); memcpy(ahc->critical_sections, cs_table, cs_count); } ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); if (bootverbose) { printk(" %d instructions downloaded\n", downloaded); printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags); } return (0); } static int ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch, u_int start_instr, u_int *skip_addr) { const struct patch *cur_patch; const struct patch *last_patch; u_int num_patches; num_patches = ARRAY_SIZE(patches); last_patch = &patches[num_patches]; cur_patch = *start_patch; while (cur_patch < last_patch && start_instr == cur_patch->begin) { if (cur_patch->patch_func(ahc) == 0) { /* Start rejecting code */ *skip_addr = start_instr + cur_patch->skip_instr; cur_patch += cur_patch->skip_patch; } else { /* Accepted this patch. Advance to the next * one and wait for our intruction pointer to * hit this point. */ cur_patch++; } } *start_patch = cur_patch; if (start_instr < *skip_addr) /* Still skipping */ return (0); return (1); } static void ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) { union ins_formats instr; struct ins_format1 *fmt1_ins; struct ins_format3 *fmt3_ins; u_int opcode; /* * The firmware is always compiled into a little endian format. */ instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); fmt1_ins = &instr.format1; fmt3_ins = NULL; /* Pull the opcode */ opcode = instr.format1.opcode; switch (opcode) { case AIC_OP_JMP: case AIC_OP_JC: case AIC_OP_JNC: case AIC_OP_CALL: case AIC_OP_JNE: case AIC_OP_JNZ: case AIC_OP_JE: case AIC_OP_JZ: { const struct patch *cur_patch; int address_offset; u_int address; u_int skip_addr; u_int i; fmt3_ins = &instr.format3; address_offset = 0; address = fmt3_ins->address; cur_patch = patches; skip_addr = 0; for (i = 0; i < address;) { ahc_check_patch(ahc, &cur_patch, i, &skip_addr); if (skip_addr > i) { int end_addr; end_addr = min(address, skip_addr); address_offset += end_addr - i; i = skip_addr; } else { i++; } } address -= address_offset; fmt3_ins->address = address; /* FALLTHROUGH */ } case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: case AIC_OP_ADD: case AIC_OP_ADC: case AIC_OP_BMOV: if (fmt1_ins->parity != 0) { fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; if ((ahc->features & AHC_CMD_CHAN) == 0 && opcode == AIC_OP_BMOV) { /* * Block move was added at the same time * as the command channel. Verify that * this is only a move of a single element * and convert the BMOV to a MOV * (AND with an immediate of FF). */ if (fmt1_ins->immediate != 1) panic("%s: BMOV not supported\n", ahc_name(ahc)); fmt1_ins->opcode = AIC_OP_AND; fmt1_ins->immediate = 0xff; } /* FALLTHROUGH */ case AIC_OP_ROL: if ((ahc->features & AHC_ULTRA2) != 0) { int i, count; /* Calculate odd parity for the instruction */ for (i = 0, count = 0; i < 31; i++) { uint32_t mask; mask = 0x01 << i; if ((instr.integer & mask) != 0) count++; } if ((count & 0x01) == 0) instr.format1.parity = 1; } else { /* Compress the instruction for older sequencers */ if (fmt3_ins != NULL) { instr.integer = fmt3_ins->immediate | (fmt3_ins->source << 8) | (fmt3_ins->address << 16) | (fmt3_ins->opcode << 25); } else { instr.integer = fmt1_ins->immediate | (fmt1_ins->source << 8) | (fmt1_ins->destination << 16) | (fmt1_ins->ret << 24) | (fmt1_ins->opcode << 25); } } /* The sequencer is a little endian cpu */ instr.integer = ahc_htole32(instr.integer); ahc_outsb(ahc, SEQRAM, instr.bytes, 4); break; default: panic("Unknown opcode encountered in seq program"); break; } } int ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point) { int printed; u_int printed_mask; if (cur_column != NULL && *cur_column >= wrap_point) { printk("\n"); *cur_column = 0; } printed = printk("%s[0x%x]", name, value); if (table == NULL) { printed += printk(" "); *cur_column += printed; return (printed); } printed_mask = 0; while (printed_mask != 0xFF) { int entry; for (entry = 0; entry < num_entries; entry++) { if (((value & table[entry].mask) != table[entry].value) || ((printed_mask & table[entry].mask) == table[entry].mask)) continue; printed += printk("%s%s", printed_mask == 0 ? ":(" : "|", table[entry].name); printed_mask |= table[entry].mask; break; } if (entry >= num_entries) break; } if (printed_mask != 0) printed += printk(") "); else printed += printk(" "); if (cur_column != NULL) *cur_column += printed; return (printed); } void ahc_dump_card_state(struct ahc_softc *ahc) { struct scb *scb; struct scb_tailq *untagged_q; u_int cur_col; int paused; int target; int maxtarget; int i; uint8_t last_phase; uint8_t qinpos; uint8_t qintail; uint8_t qoutpos; uint8_t scb_index; uint8_t saved_scbptr; if (ahc_is_paused(ahc)) { paused = 1; } else { paused = 0; ahc_pause(ahc); } saved_scbptr = ahc_inb(ahc, SCBPTR); last_phase = ahc_inb(ahc, LASTPHASE); printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" "%s: Dumping Card State %s, at SEQADDR 0x%x\n", ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); if (paused) printk("Card was paused\n"); printk("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), ahc_inb(ahc, ARG_2)); printk("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), ahc_inb(ahc, SCBPTR)); cur_col = 0; if ((ahc->features & AHC_DT) != 0) ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50); ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50); ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50); ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50); ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50); ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50); ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50); ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50); ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50); ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50); ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50); ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50); ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50); ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50); ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50); ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50); ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50); ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50); ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50); if (cur_col != 0) printk("\n"); printk("STACK:"); for (i = 0; i < STACK_SIZE; i++) printk(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8)); printk("\nSCB count = %d\n", ahc->scb_data->numscbs); printk("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); printk("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); /* QINFIFO */ printk("QINFIFO entries: "); if ((ahc->features & AHC_QUEUE_REGS) != 0) { qinpos = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinpos); } else qinpos = ahc_inb(ahc, QINPOS); qintail = ahc->qinfifonext; while (qinpos != qintail) { printk("%d ", ahc->qinfifo[qinpos]); qinpos++; } printk("\n"); printk("Waiting Queue entries: "); scb_index = ahc_inb(ahc, WAITING_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); scb_index = ahc_inb(ahc, SCB_NEXT); } printk("\n"); printk("Disconnected Queue entries: "); scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); scb_index = ahc_inb(ahc, SCB_NEXT); } printk("\n"); ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); printk("QOUTFIFO entries: "); qoutpos = ahc->qoutfifonext; i = 0; while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { printk("%d ", ahc->qoutfifo[qoutpos]); qoutpos++; } printk("\n"); printk("Sequencer Free SCB List: "); scb_index = ahc_inb(ahc, FREE_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printk("%d ", scb_index); scb_index = ahc_inb(ahc, SCB_NEXT); } printk("\n"); printk("Sequencer SCB Info: "); for (i = 0; i < ahc->scb_data->maxhscbs; i++) { ahc_outb(ahc, SCBPTR, i); cur_col = printk("\n%3d ", i); ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60); ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60); ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); } printk("\n"); printk("Pending list: "); i = 0; LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { if (i++ > 256) break; cur_col = printk("\n%3d ", scb->hscb->tag); ahc_scb_control_print(scb->hscb->control, &cur_col, 60); ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60); ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60); if ((ahc->flags & AHC_PAGESCBS) == 0) { ahc_outb(ahc, SCBPTR, scb->hscb->tag); printk("("); ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); printk(")"); } } printk("\n"); printk("Kernel Free SCB list: "); i = 0; SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { if (i++ > 256) break; printk("%d ", scb->hscb->tag); } printk("\n"); maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; for (target = 0; target <= maxtarget; target++) { untagged_q = &ahc->untagged_queues[target]; if (TAILQ_FIRST(untagged_q) == NULL) continue; printk("Untagged Q(%d): ", target); i = 0; TAILQ_FOREACH(scb, untagged_q, links.tqe) { if (i++ > 256) break; printk("%d ", scb->hscb->tag); } printk("\n"); } ahc_platform_dump_card_state(ahc); printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); ahc_outb(ahc, SCBPTR, saved_scbptr); if (paused == 0) ahc_unpause(ahc); } /************************* Target Mode ****************************************/ #ifdef AHC_TARGET_MODE cam_status ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, struct ahc_tmode_tstate **tstate, struct ahc_tmode_lstate **lstate, int notfound_failure) { if ((ahc->features & AHC_TARGETMODE) == 0) return (CAM_REQ_INVALID); /* * Handle the 'black hole' device that sucks up * requests to unattached luns on enabled targets. */ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { *tstate = NULL; *lstate = ahc->black_hole; } else { u_int max_id; max_id = (ahc->features & AHC_WIDE) ? 16 : 8; if (ccb->ccb_h.target_id >= max_id) return (CAM_TID_INVALID); if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) return (CAM_LUN_INVALID); *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; *lstate = NULL; if (*tstate != NULL) *lstate = (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; } if (notfound_failure != 0 && *lstate == NULL) return (CAM_PATH_INVALID); return (CAM_REQ_CMP); } void ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) { struct ahc_tmode_tstate *tstate; struct ahc_tmode_lstate *lstate; struct ccb_en_lun *cel; cam_status status; u_long s; u_int target; u_int lun; u_int target_mask; u_int our_id; int error; char channel; status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, /*notfound_failure*/FALSE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } if (cam_sim_bus(sim) == 0) our_id = ahc->our_id; else our_id = ahc->our_id_b; if (ccb->ccb_h.target_id != our_id) { /* * our_id represents our initiator ID, or * the ID of the first target to have an * enabled lun in target mode. There are * two cases that may preclude enabling a * target id other than our_id. * * o our_id is for an active initiator role. * Since the hardware does not support * reselections to the initiator role at * anything other than our_id, and our_id * is used by the hardware to indicate the * ID to use for both select-out and * reselect-out operations, the only target * ID we can support in this mode is our_id. * * o The MULTARGID feature is not available and * a previous target mode ID has been enabled. */ if ((ahc->features & AHC_MULTIROLE) != 0) { if ((ahc->features & AHC_MULTI_TID) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) { /* * Only allow additional targets if * the initiator role is disabled. * The hardware cannot handle a re-select-in * on the initiator id during a re-select-out * on a different target id. */ status = CAM_TID_INVALID; } else if ((ahc->flags & AHC_INITIATORROLE) != 0 || ahc->enabled_luns > 0) { /* * Only allow our target id to change * if the initiator role is not configured * and there are no enabled luns which * are attached to the currently registered * scsi id. */ status = CAM_TID_INVALID; } } else if ((ahc->features & AHC_MULTI_TID) == 0 && ahc->enabled_luns > 0) { status = CAM_TID_INVALID; } } if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } /* * We now have an id that is valid. * If we aren't in target mode, switch modes. */ if ((ahc->flags & AHC_TARGETROLE) == 0 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { u_long s; ahc_flag saved_flags; printk("Configuring Target Mode\n"); ahc_lock(ahc, &s); if (LIST_FIRST(&ahc->pending_scbs) != NULL) { ccb->ccb_h.status = CAM_BUSY; ahc_unlock(ahc, &s); return; } saved_flags = ahc->flags; ahc->flags |= AHC_TARGETROLE; if ((ahc->features & AHC_MULTIROLE) == 0) ahc->flags &= ~AHC_INITIATORROLE; ahc_pause(ahc); error = ahc_loadseq(ahc); if (error != 0) { /* * Restore original configuration and notify * the caller that we cannot support target mode. * Since the adapter started out in this * configuration, the firmware load will succeed, * so there is no point in checking ahc_loadseq's * return value. */ ahc->flags = saved_flags; (void)ahc_loadseq(ahc); ahc_restart(ahc); ahc_unlock(ahc, &s); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; return; } ahc_restart(ahc); ahc_unlock(ahc, &s); } cel = &ccb->cel; target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; channel = SIM_CHANNEL(ahc, sim); target_mask = 0x01 << target; if (channel == 'B') target_mask <<= 8; if (cel->enable != 0) { u_int scsiseq; /* Are we already enabled?? */ if (lstate != NULL) { xpt_print_path(ccb->ccb_h.path); printk("Lun already enabled\n"); ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; return; } if (cel->grp6_len != 0 || cel->grp7_len != 0) { /* * Don't (yet?) support vendor * specific commands. */ ccb->ccb_h.status = CAM_REQ_INVALID; printk("Non-zero Group Codes\n"); return; } /* * Seems to be okay. * Setup our data structures. */ if (target != CAM_TARGET_WILDCARD && tstate == NULL) { tstate = ahc_alloc_tstate(ahc, target, channel); if (tstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate tstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } } lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } memset(lstate, 0, sizeof(*lstate)); status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), xpt_path_lun_id(ccb->ccb_h.path)); if (status != CAM_REQ_CMP) { kfree(lstate); xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate path\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } SLIST_INIT(&lstate->accept_tios); SLIST_INIT(&lstate->immed_notifies); ahc_lock(ahc, &s); ahc_pause(ahc); if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = lstate; ahc->enabled_luns++; if ((ahc->features & AHC_MULTI_TID) != 0) { u_int targid_mask; targid_mask = ahc_inb(ahc, TARGID) | (ahc_inb(ahc, TARGID + 1) << 8); targid_mask |= target_mask; ahc_outb(ahc, TARGID, targid_mask); ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); ahc_update_scsiid(ahc, targid_mask); } else { u_int our_id; char channel; channel = SIM_CHANNEL(ahc, sim); our_id = SIM_SCSI_ID(ahc, sim); /* * This can only happen if selections * are not enabled */ if (target != our_id) { u_int sblkctl; char cur_channel; int swap; sblkctl = ahc_inb(ahc, SBLKCTL); cur_channel = (sblkctl & SELBUSB) ? 'B' : 'A'; if ((ahc->features & AHC_TWIN) == 0) cur_channel = 'A'; swap = cur_channel != channel; if (channel == 'A') ahc->our_id = target; else ahc->our_id_b = target; if (swap) ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); ahc_outb(ahc, SCSIID, target); if (swap) ahc_outb(ahc, SBLKCTL, sblkctl); } } } else ahc->black_hole = lstate; /* Allow select-in operations */ if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); scsiseq |= ENSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); scsiseq = ahc_inb(ahc, SCSISEQ); scsiseq |= ENSELI; ahc_outb(ahc, SCSISEQ, scsiseq); } ahc_unpause(ahc); ahc_unlock(ahc, &s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_print_path(ccb->ccb_h.path); printk("Lun now enabled for target mode\n"); } else { struct scb *scb; int i, empty; if (lstate == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } ahc_lock(ahc, &s); ccb->ccb_h.status = CAM_REQ_CMP; LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { struct ccb_hdr *ccbh; ccbh = &scb->io_ctx->ccb_h; if (ccbh->func_code == XPT_CONT_TARGET_IO && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ printk("CTIO pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; ahc_unlock(ahc, &s); return; } } if (SLIST_FIRST(&lstate->accept_tios) != NULL) { printk("ATIOs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { printk("INOTs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (ccb->ccb_h.status != CAM_REQ_CMP) { ahc_unlock(ahc, &s); return; } xpt_print_path(ccb->ccb_h.path); printk("Target mode disabled\n"); xpt_free_path(lstate->path); kfree(lstate); ahc_pause(ahc); /* Can we clean up the target too? */ if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = NULL; ahc->enabled_luns--; for (empty = 1, i = 0; i < 8; i++) if (tstate->enabled_luns[i] != NULL) { empty = 0; break; } if (empty) { ahc_free_tstate(ahc, target, channel, /*force*/FALSE); if (ahc->features & AHC_MULTI_TID) { u_int targid_mask; targid_mask = ahc_inb(ahc, TARGID) | (ahc_inb(ahc, TARGID + 1) << 8); targid_mask &= ~target_mask; ahc_outb(ahc, TARGID, targid_mask); ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); ahc_update_scsiid(ahc, targid_mask); } } } else { ahc->black_hole = NULL; /* * We can't allow selections without * our black hole device. */ empty = TRUE; } if (ahc->enabled_luns == 0) { /* Disallow select-in */ u_int scsiseq; scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); scsiseq &= ~ENSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); scsiseq = ahc_inb(ahc, SCSISEQ); scsiseq &= ~ENSELI; ahc_outb(ahc, SCSISEQ, scsiseq); if ((ahc->features & AHC_MULTIROLE) == 0) { printk("Configuring Initiator Mode\n"); ahc->flags &= ~AHC_TARGETROLE; ahc->flags |= AHC_INITIATORROLE; /* * Returning to a configuration that * fit previously will always succeed. */ (void)ahc_loadseq(ahc); ahc_restart(ahc); /* * Unpaused. The extra unpause * that follows is harmless. */ } } ahc_unpause(ahc); ahc_unlock(ahc, &s); } } static void ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) { u_int scsiid_mask; u_int scsiid; if ((ahc->features & AHC_MULTI_TID) == 0) panic("ahc_update_scsiid called on non-multitid unit\n"); /* * Since we will rely on the TARGID mask * for selection enables, ensure that OID * in SCSIID is not set to some other ID * that we don't want to allow selections on. */ if ((ahc->features & AHC_ULTRA2) != 0) scsiid = ahc_inb(ahc, SCSIID_ULTRA2); else scsiid = ahc_inb(ahc, SCSIID); scsiid_mask = 0x1 << (scsiid & OID); if ((targid_mask & scsiid_mask) == 0) { u_int our_id; /* ffs counts from 1 */ our_id = ffs(targid_mask); if (our_id == 0) our_id = ahc->our_id; else our_id--; scsiid &= TID; scsiid |= our_id; } if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIID_ULTRA2, scsiid); else ahc_outb(ahc, SCSIID, scsiid); } static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) { struct target_cmd *cmd; /* * If the card supports auto-access pause, * we can access the card directly regardless * of whether it is paused or not. */ if ((ahc->features & AHC_AUTOPAUSE) != 0) paused = TRUE; ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { /* * Only advance through the queue if we * have the resources to process the command. */ if (ahc_handle_target_cmd(ahc, cmd) != 0) break; cmd->cmd_valid = 0; ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, ahc->tqinfifonext), sizeof(struct target_cmd), BUS_DMASYNC_PREREAD); ahc->tqinfifonext++; /* * Lazily update our position in the target mode incoming * command queue as seen by the sequencer. */ if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { if ((ahc->features & AHC_HS_MAILBOX) != 0) { u_int hs_mailbox; hs_mailbox = ahc_inb(ahc, HS_MAILBOX); hs_mailbox &= ~HOST_TQINPOS; hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; ahc_outb(ahc, HS_MAILBOX, hs_mailbox); } else { if (!paused) ahc_pause(ahc); ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext & HOST_TQINPOS); if (!paused) ahc_unpause(ahc); } } } } static int ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) { struct ahc_tmode_tstate *tstate; struct ahc_tmode_lstate *lstate; struct ccb_accept_tio *atio; uint8_t *byte; int initiator; int target; int lun; initiator = SCSIID_TARGET(ahc, cmd->scsiid); target = SCSIID_OUR_ID(cmd->scsiid); lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); byte = cmd->bytes; tstate = ahc->enabled_targets[target]; lstate = NULL; if (tstate != NULL) lstate = tstate->enabled_luns[lun]; /* * Commands for disabled luns go to the black hole driver. */ if (lstate == NULL) lstate = ahc->black_hole; atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); if (atio == NULL) { ahc->flags |= AHC_TQINFIFO_BLOCKED; /* * Wait for more ATIOs from the peripheral driver for this lun. */ if (bootverbose) printk("%s: ATIOs exhausted\n", ahc_name(ahc)); return (1); } else ahc->flags &= ~AHC_TQINFIFO_BLOCKED; #if 0 printk("Incoming command from %d for %d:%d%s\n", initiator, target, lun, lstate == ahc->black_hole ? "(Black Holed)" : ""); #endif SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); if (lstate == ahc->black_hole) { /* Fill in the wildcards */ atio->ccb_h.target_id = target; atio->ccb_h.target_lun = lun; } /* * Package it up and send it off to * whomever has this lun enabled. */ atio->sense_len = 0; atio->init_id = initiator; if (byte[0] != 0xFF) { /* Tag was included */ atio->tag_action = *byte++; atio->tag_id = *byte++; atio->ccb_h.flags = CAM_TAG_ACTION_VALID; } else { atio->ccb_h.flags = 0; } byte++; /* Okay. Now determine the cdb size based on the command code */ switch (*byte >> CMD_GROUP_CODE_SHIFT) { case 0: atio->cdb_len = 6; break; case 1: case 2: atio->cdb_len = 10; break; case 4: atio->cdb_len = 16; break; case 5: atio->cdb_len = 12; break; case 3: default: /* Only copy the opcode. */ atio->cdb_len = 1; printk("Reserved or VU command code type encountered\n"); break; } memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); atio->ccb_h.status |= CAM_CDB_RECVD; if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { /* * We weren't allowed to disconnect. * We're hanging on the bus until a * continue target I/O comes in response * to this accept tio. */ #if 0 printk("Received Immediate Command %d:%d:%d - %p\n", initiator, target, lun, ahc->pending_device); #endif ahc->pending_device = lstate; ahc_freeze_ccb((union ccb *)atio); atio->ccb_h.flags |= CAM_DIS_DISCONNECT; } xpt_done((union ccb*)atio); return (0); } #endif
gpl-2.0
LeJay/android_kernel_samsung_jactiveltexx_stock
arch/arm/mach-mmp/pxa910.c
4719
5800
/* * linux/arch/arm/mach-mmp/pxa910.c * * Code specific to PXA910 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/list.h> #include <linux/io.h> #include <linux/platform_device.h> #include <asm/mach/time.h> #include <mach/addr-map.h> #include <mach/regs-apbc.h> #include <mach/regs-apmu.h> #include <mach/cputype.h> #include <mach/irqs.h> #include <mach/dma.h> #include <mach/mfp.h> #include <mach/devices.h> #include "common.h" #include "clock.h" #define MFPR_VIRT_BASE (APB_VIRT_BASE + 0x1e000) static struct mfp_addr_map pxa910_mfp_addr_map[] __initdata = { MFP_ADDR_X(GPIO0, GPIO54, 0xdc), MFP_ADDR_X(GPIO67, GPIO98, 0x1b8), MFP_ADDR_X(GPIO100, GPIO109, 0x238), MFP_ADDR(GPIO123, 0xcc), MFP_ADDR(GPIO124, 0xd0), MFP_ADDR(DF_IO0, 0x40), MFP_ADDR(DF_IO1, 0x3c), MFP_ADDR(DF_IO2, 0x38), MFP_ADDR(DF_IO3, 0x34), MFP_ADDR(DF_IO4, 0x30), MFP_ADDR(DF_IO5, 0x2c), MFP_ADDR(DF_IO6, 0x28), MFP_ADDR(DF_IO7, 0x24), MFP_ADDR(DF_IO8, 0x20), MFP_ADDR(DF_IO9, 0x1c), MFP_ADDR(DF_IO10, 0x18), MFP_ADDR(DF_IO11, 0x14), MFP_ADDR(DF_IO12, 0x10), MFP_ADDR(DF_IO13, 0xc), MFP_ADDR(DF_IO14, 0x8), MFP_ADDR(DF_IO15, 0x4), MFP_ADDR(DF_nCS0_SM_nCS2, 0x44), MFP_ADDR(DF_nCS1_SM_nCS3, 0x48), MFP_ADDR(SM_nCS0, 0x4c), MFP_ADDR(SM_nCS1, 0x50), MFP_ADDR(DF_WEn, 0x54), MFP_ADDR(DF_REn, 0x58), MFP_ADDR(DF_CLE_SM_OEn, 0x5c), MFP_ADDR(DF_ALE_SM_WEn, 0x60), MFP_ADDR(SM_SCLK, 0x64), MFP_ADDR(DF_RDY0, 0x68), MFP_ADDR(SM_BE0, 0x6c), MFP_ADDR(SM_BE1, 0x70), MFP_ADDR(SM_ADV, 0x74), MFP_ADDR(DF_RDY1, 0x78), MFP_ADDR(SM_ADVMUX, 0x7c), MFP_ADDR(SM_RDY, 0x80), MFP_ADDR_X(MMC1_DAT7, MMC1_WP, 0x84), MFP_ADDR_END, }; void __init pxa910_init_irq(void) { icu_init_irq(); } /* APB peripheral clocks */ static APBC_CLK(uart1, PXA910_UART0, 1, 14745600); static APBC_CLK(uart2, PXA910_UART1, 1, 14745600); static APBC_CLK(twsi0, PXA168_TWSI0, 1, 33000000); static APBC_CLK(twsi1, PXA168_TWSI1, 1, 33000000); static APBC_CLK(pwm1, PXA910_PWM1, 1, 13000000); static APBC_CLK(pwm2, PXA910_PWM2, 1, 13000000); static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000); static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000); static APBC_CLK(gpio, PXA910_GPIO, 0, 13000000); static APBC_CLK(rtc, PXA910_RTC, 8, 32768); static APMU_CLK(nand, NAND, 0x19b, 156000000); static APMU_CLK(u2o, USB, 0x1b, 480000000); /* device and clock bindings */ static struct clk_lookup pxa910_clkregs[] = { INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL), INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL), INIT_CLKREG(&clk_twsi0, "pxa2xx-i2c.0", NULL), INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.1", NULL), INIT_CLKREG(&clk_pwm1, "pxa910-pwm.0", NULL), INIT_CLKREG(&clk_pwm2, "pxa910-pwm.1", NULL), INIT_CLKREG(&clk_pwm3, "pxa910-pwm.2", NULL), INIT_CLKREG(&clk_pwm4, "pxa910-pwm.3", NULL), INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL), INIT_CLKREG(&clk_gpio, "pxa-gpio", NULL), INIT_CLKREG(&clk_u2o, "pxa-u2o", "U2OCLK"), INIT_CLKREG(&clk_rtc, "sa1100-rtc", NULL), }; static int __init pxa910_init(void) { if (cpu_is_pxa910()) { mfp_init_base(MFPR_VIRT_BASE); mfp_init_addr(pxa910_mfp_addr_map); pxa_init_dma(IRQ_PXA910_DMA_INT0, 32); clkdev_add_table(ARRAY_AND_SIZE(pxa910_clkregs)); } return 0; } postcore_initcall(pxa910_init); /* system timer - clock enabled, 3.25MHz */ #define TIMER_CLK_RST (APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(3)) static void __init pxa910_timer_init(void) { /* reset and configure */ __raw_writel(APBC_APBCLK | APBC_RST, APBC_PXA910_TIMERS); __raw_writel(TIMER_CLK_RST, APBC_PXA910_TIMERS); timer_init(IRQ_PXA910_AP1_TIMER1); } struct sys_timer pxa910_timer = { .init = pxa910_timer_init, }; /* on-chip devices */ /* NOTE: there are totally 3 UARTs on PXA910: * * UART1 - Slow UART (can be used both by AP and CP) * UART2/3 - Fast UART * * To be backward compatible with the legacy FFUART/BTUART/STUART sequence, * they are re-ordered as: * * pxa910_device_uart1 - UART2 as FFUART * pxa910_device_uart2 - UART3 as BTUART * * UART1 is not used by AP for the moment. */ PXA910_DEVICE(uart1, "pxa2xx-uart", 0, UART2, 0xd4017000, 0x30, 21, 22); PXA910_DEVICE(uart2, "pxa2xx-uart", 1, UART3, 0xd4018000, 0x30, 23, 24); PXA910_DEVICE(twsi0, "pxa2xx-i2c", 0, TWSI0, 0xd4011000, 0x28); PXA910_DEVICE(twsi1, "pxa2xx-i2c", 1, TWSI1, 0xd4025000, 0x28); PXA910_DEVICE(pwm1, "pxa910-pwm", 0, NONE, 0xd401a000, 0x10); PXA910_DEVICE(pwm2, "pxa910-pwm", 1, NONE, 0xd401a400, 0x10); PXA910_DEVICE(pwm3, "pxa910-pwm", 2, NONE, 0xd401a800, 0x10); PXA910_DEVICE(pwm4, "pxa910-pwm", 3, NONE, 0xd401ac00, 0x10); PXA910_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x80, 97, 99); struct resource pxa910_resource_gpio[] = { { .start = 0xd4019000, .end = 0xd4019fff, .flags = IORESOURCE_MEM, }, { .start = IRQ_PXA910_AP_GPIO, .end = IRQ_PXA910_AP_GPIO, .name = "gpio_mux", .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa910_device_gpio = { .name = "pxa-gpio", .id = -1, .num_resources = ARRAY_SIZE(pxa910_resource_gpio), .resource = pxa910_resource_gpio, }; static struct resource pxa910_resource_rtc[] = { { .start = 0xd4010000, .end = 0xd401003f, .flags = IORESOURCE_MEM, }, { .start = IRQ_PXA910_RTC_INT, .end = IRQ_PXA910_RTC_INT, .name = "rtc 1Hz", .flags = IORESOURCE_IRQ, }, { .start = IRQ_PXA910_RTC_ALARM, .end = IRQ_PXA910_RTC_ALARM, .name = "rtc alarm", .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa910_device_rtc = { .name = "sa1100-rtc", .id = -1, .num_resources = ARRAY_SIZE(pxa910_resource_rtc), .resource = pxa910_resource_rtc, };
gpl-2.0
Octane70/SGH-I337M_JB_4.2.2_Kernel
arch/arm/mach-mmp/pxa910.c
4719
5800
/* * linux/arch/arm/mach-mmp/pxa910.c * * Code specific to PXA910 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/list.h> #include <linux/io.h> #include <linux/platform_device.h> #include <asm/mach/time.h> #include <mach/addr-map.h> #include <mach/regs-apbc.h> #include <mach/regs-apmu.h> #include <mach/cputype.h> #include <mach/irqs.h> #include <mach/dma.h> #include <mach/mfp.h> #include <mach/devices.h> #include "common.h" #include "clock.h" #define MFPR_VIRT_BASE (APB_VIRT_BASE + 0x1e000) static struct mfp_addr_map pxa910_mfp_addr_map[] __initdata = { MFP_ADDR_X(GPIO0, GPIO54, 0xdc), MFP_ADDR_X(GPIO67, GPIO98, 0x1b8), MFP_ADDR_X(GPIO100, GPIO109, 0x238), MFP_ADDR(GPIO123, 0xcc), MFP_ADDR(GPIO124, 0xd0), MFP_ADDR(DF_IO0, 0x40), MFP_ADDR(DF_IO1, 0x3c), MFP_ADDR(DF_IO2, 0x38), MFP_ADDR(DF_IO3, 0x34), MFP_ADDR(DF_IO4, 0x30), MFP_ADDR(DF_IO5, 0x2c), MFP_ADDR(DF_IO6, 0x28), MFP_ADDR(DF_IO7, 0x24), MFP_ADDR(DF_IO8, 0x20), MFP_ADDR(DF_IO9, 0x1c), MFP_ADDR(DF_IO10, 0x18), MFP_ADDR(DF_IO11, 0x14), MFP_ADDR(DF_IO12, 0x10), MFP_ADDR(DF_IO13, 0xc), MFP_ADDR(DF_IO14, 0x8), MFP_ADDR(DF_IO15, 0x4), MFP_ADDR(DF_nCS0_SM_nCS2, 0x44), MFP_ADDR(DF_nCS1_SM_nCS3, 0x48), MFP_ADDR(SM_nCS0, 0x4c), MFP_ADDR(SM_nCS1, 0x50), MFP_ADDR(DF_WEn, 0x54), MFP_ADDR(DF_REn, 0x58), MFP_ADDR(DF_CLE_SM_OEn, 0x5c), MFP_ADDR(DF_ALE_SM_WEn, 0x60), MFP_ADDR(SM_SCLK, 0x64), MFP_ADDR(DF_RDY0, 0x68), MFP_ADDR(SM_BE0, 0x6c), MFP_ADDR(SM_BE1, 0x70), MFP_ADDR(SM_ADV, 0x74), MFP_ADDR(DF_RDY1, 0x78), MFP_ADDR(SM_ADVMUX, 0x7c), MFP_ADDR(SM_RDY, 0x80), MFP_ADDR_X(MMC1_DAT7, MMC1_WP, 0x84), MFP_ADDR_END, }; void __init pxa910_init_irq(void) { icu_init_irq(); } /* APB peripheral clocks */ static APBC_CLK(uart1, PXA910_UART0, 1, 14745600); static APBC_CLK(uart2, PXA910_UART1, 1, 14745600); static APBC_CLK(twsi0, PXA168_TWSI0, 1, 33000000); static APBC_CLK(twsi1, PXA168_TWSI1, 1, 33000000); static APBC_CLK(pwm1, PXA910_PWM1, 1, 13000000); static APBC_CLK(pwm2, PXA910_PWM2, 1, 13000000); static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000); static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000); static APBC_CLK(gpio, PXA910_GPIO, 0, 13000000); static APBC_CLK(rtc, PXA910_RTC, 8, 32768); static APMU_CLK(nand, NAND, 0x19b, 156000000); static APMU_CLK(u2o, USB, 0x1b, 480000000); /* device and clock bindings */ static struct clk_lookup pxa910_clkregs[] = { INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL), INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL), INIT_CLKREG(&clk_twsi0, "pxa2xx-i2c.0", NULL), INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.1", NULL), INIT_CLKREG(&clk_pwm1, "pxa910-pwm.0", NULL), INIT_CLKREG(&clk_pwm2, "pxa910-pwm.1", NULL), INIT_CLKREG(&clk_pwm3, "pxa910-pwm.2", NULL), INIT_CLKREG(&clk_pwm4, "pxa910-pwm.3", NULL), INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL), INIT_CLKREG(&clk_gpio, "pxa-gpio", NULL), INIT_CLKREG(&clk_u2o, "pxa-u2o", "U2OCLK"), INIT_CLKREG(&clk_rtc, "sa1100-rtc", NULL), }; static int __init pxa910_init(void) { if (cpu_is_pxa910()) { mfp_init_base(MFPR_VIRT_BASE); mfp_init_addr(pxa910_mfp_addr_map); pxa_init_dma(IRQ_PXA910_DMA_INT0, 32); clkdev_add_table(ARRAY_AND_SIZE(pxa910_clkregs)); } return 0; } postcore_initcall(pxa910_init); /* system timer - clock enabled, 3.25MHz */ #define TIMER_CLK_RST (APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(3)) static void __init pxa910_timer_init(void) { /* reset and configure */ __raw_writel(APBC_APBCLK | APBC_RST, APBC_PXA910_TIMERS); __raw_writel(TIMER_CLK_RST, APBC_PXA910_TIMERS); timer_init(IRQ_PXA910_AP1_TIMER1); } struct sys_timer pxa910_timer = { .init = pxa910_timer_init, }; /* on-chip devices */ /* NOTE: there are totally 3 UARTs on PXA910: * * UART1 - Slow UART (can be used both by AP and CP) * UART2/3 - Fast UART * * To be backward compatible with the legacy FFUART/BTUART/STUART sequence, * they are re-ordered as: * * pxa910_device_uart1 - UART2 as FFUART * pxa910_device_uart2 - UART3 as BTUART * * UART1 is not used by AP for the moment. */ PXA910_DEVICE(uart1, "pxa2xx-uart", 0, UART2, 0xd4017000, 0x30, 21, 22); PXA910_DEVICE(uart2, "pxa2xx-uart", 1, UART3, 0xd4018000, 0x30, 23, 24); PXA910_DEVICE(twsi0, "pxa2xx-i2c", 0, TWSI0, 0xd4011000, 0x28); PXA910_DEVICE(twsi1, "pxa2xx-i2c", 1, TWSI1, 0xd4025000, 0x28); PXA910_DEVICE(pwm1, "pxa910-pwm", 0, NONE, 0xd401a000, 0x10); PXA910_DEVICE(pwm2, "pxa910-pwm", 1, NONE, 0xd401a400, 0x10); PXA910_DEVICE(pwm3, "pxa910-pwm", 2, NONE, 0xd401a800, 0x10); PXA910_DEVICE(pwm4, "pxa910-pwm", 3, NONE, 0xd401ac00, 0x10); PXA910_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x80, 97, 99); struct resource pxa910_resource_gpio[] = { { .start = 0xd4019000, .end = 0xd4019fff, .flags = IORESOURCE_MEM, }, { .start = IRQ_PXA910_AP_GPIO, .end = IRQ_PXA910_AP_GPIO, .name = "gpio_mux", .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa910_device_gpio = { .name = "pxa-gpio", .id = -1, .num_resources = ARRAY_SIZE(pxa910_resource_gpio), .resource = pxa910_resource_gpio, }; static struct resource pxa910_resource_rtc[] = { { .start = 0xd4010000, .end = 0xd401003f, .flags = IORESOURCE_MEM, }, { .start = IRQ_PXA910_RTC_INT, .end = IRQ_PXA910_RTC_INT, .name = "rtc 1Hz", .flags = IORESOURCE_IRQ, }, { .start = IRQ_PXA910_RTC_ALARM, .end = IRQ_PXA910_RTC_ALARM, .name = "rtc alarm", .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa910_device_rtc = { .name = "sa1100-rtc", .id = -1, .num_resources = ARRAY_SIZE(pxa910_resource_rtc), .resource = pxa910_resource_rtc, };
gpl-2.0
TheFlyhalf205/android_kernel_htc_msm8960
arch/x86/platform/uv/uv_time.c
4975
10455
/* * SGI RTC clock/timer routines. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Copyright (c) 2009 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) Dimitri Sivanich */ #include <linux/clockchips.h> #include <linux/slab.h> #include <asm/uv/uv_mmrs.h> #include <asm/uv/uv_hub.h> #include <asm/uv/bios.h> #include <asm/uv/uv.h> #include <asm/apic.h> #include <asm/cpu.h> #define RTC_NAME "sgi_rtc" static cycle_t uv_read_rtc(struct clocksource *cs); static int uv_rtc_next_event(unsigned long, struct clock_event_device *); static void uv_rtc_timer_setup(enum clock_event_mode, struct clock_event_device *); static struct clocksource clocksource_uv = { .name = RTC_NAME, .rating = 299, .read = uv_read_rtc, .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static struct clock_event_device clock_event_device_uv = { .name = RTC_NAME, .features = CLOCK_EVT_FEAT_ONESHOT, .shift = 20, .rating = 400, .irq = -1, .set_next_event = uv_rtc_next_event, .set_mode = uv_rtc_timer_setup, .event_handler = NULL, }; static DEFINE_PER_CPU(struct clock_event_device, cpu_ced); /* There is one of these allocated per node */ struct uv_rtc_timer_head { spinlock_t lock; /* next cpu waiting for timer, local node relative: */ int next_cpu; /* number of cpus on this node: */ int ncpus; struct { int lcpu; /* systemwide logical cpu number */ u64 expires; /* next timer expiration for this cpu */ } cpu[1]; }; /* * Access to uv_rtc_timer_head via blade id. */ static struct uv_rtc_timer_head **blade_info __read_mostly; static int uv_rtc_evt_enable; /* * Hardware interface routines */ /* Send IPIs to another node */ static void uv_rtc_send_IPI(int cpu) { unsigned long apicid, val; int pnode; apicid = cpu_physical_id(cpu); pnode = uv_apicid_to_pnode(apicid); apicid |= uv_apicid_hibits; val = (1UL << UVH_IPI_INT_SEND_SHFT) | (apicid << UVH_IPI_INT_APIC_ID_SHFT) | (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT); uv_write_global_mmr64(pnode, UVH_IPI_INT, val); } /* Check for an RTC interrupt pending */ static int uv_intr_pending(int pnode) { if (is_uv1_hub()) return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) & UV1H_EVENT_OCCURRED0_RTC1_MASK; else return uv_read_global_mmr64(pnode, UV2H_EVENT_OCCURRED2) & UV2H_EVENT_OCCURRED2_RTC_1_MASK; } /* Setup interrupt and return non-zero if early expiration occurred. */ static int uv_setup_intr(int cpu, u64 expires) { u64 val; unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits; int pnode = uv_cpu_to_pnode(cpu); uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, UVH_RTC1_INT_CONFIG_M_MASK); uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L); if (is_uv1_hub()) uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS, UV1H_EVENT_OCCURRED0_RTC1_MASK); else uv_write_global_mmr64(pnode, UV2H_EVENT_OCCURRED2_ALIAS, UV2H_EVENT_OCCURRED2_RTC_1_MASK); val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); /* Set configuration */ uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val); /* Initialize comparator value */ uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires); if (uv_read_rtc(NULL) <= expires) return 0; return !uv_intr_pending(pnode); } /* * Per-cpu timer tracking routines */ static __init void uv_rtc_deallocate_timers(void) { int bid; for_each_possible_blade(bid) { kfree(blade_info[bid]); } kfree(blade_info); } /* Allocate per-node list of cpu timer expiration times. */ static __init int uv_rtc_allocate_timers(void) { int cpu; blade_info = kmalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL); if (!blade_info) return -ENOMEM; memset(blade_info, 0, uv_possible_blades * sizeof(void *)); for_each_present_cpu(cpu) { int nid = cpu_to_node(cpu); int bid = uv_cpu_to_blade_id(cpu); int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id; struct uv_rtc_timer_head *head = blade_info[bid]; if (!head) { head = kmalloc_node(sizeof(struct uv_rtc_timer_head) + (uv_blade_nr_possible_cpus(bid) * 2 * sizeof(u64)), GFP_KERNEL, nid); if (!head) { uv_rtc_deallocate_timers(); return -ENOMEM; } spin_lock_init(&head->lock); head->ncpus = uv_blade_nr_possible_cpus(bid); head->next_cpu = -1; blade_info[bid] = head; } head->cpu[bcpu].lcpu = cpu; head->cpu[bcpu].expires = ULLONG_MAX; } return 0; } /* Find and set the next expiring timer. */ static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode) { u64 lowest = ULLONG_MAX; int c, bcpu = -1; head->next_cpu = -1; for (c = 0; c < head->ncpus; c++) { u64 exp = head->cpu[c].expires; if (exp < lowest) { bcpu = c; lowest = exp; } } if (bcpu >= 0) { head->next_cpu = bcpu; c = head->cpu[bcpu].lcpu; if (uv_setup_intr(c, lowest)) /* If we didn't set it up in time, trigger */ uv_rtc_send_IPI(c); } else { uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, UVH_RTC1_INT_CONFIG_M_MASK); } } /* * Set expiration time for current cpu. * * Returns 1 if we missed the expiration time. */ static int uv_rtc_set_timer(int cpu, u64 expires) { int pnode = uv_cpu_to_pnode(cpu); int bid = uv_cpu_to_blade_id(cpu); struct uv_rtc_timer_head *head = blade_info[bid]; int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id; u64 *t = &head->cpu[bcpu].expires; unsigned long flags; int next_cpu; spin_lock_irqsave(&head->lock, flags); next_cpu = head->next_cpu; *t = expires; /* Will this one be next to go off? */ if (next_cpu < 0 || bcpu == next_cpu || expires < head->cpu[next_cpu].expires) { head->next_cpu = bcpu; if (uv_setup_intr(cpu, expires)) { *t = ULLONG_MAX; uv_rtc_find_next_timer(head, pnode); spin_unlock_irqrestore(&head->lock, flags); return -ETIME; } } spin_unlock_irqrestore(&head->lock, flags); return 0; } /* * Unset expiration time for current cpu. * * Returns 1 if this timer was pending. */ static int uv_rtc_unset_timer(int cpu, int force) { int pnode = uv_cpu_to_pnode(cpu); int bid = uv_cpu_to_blade_id(cpu); struct uv_rtc_timer_head *head = blade_info[bid]; int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id; u64 *t = &head->cpu[bcpu].expires; unsigned long flags; int rc = 0; spin_lock_irqsave(&head->lock, flags); if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) rc = 1; if (rc) { *t = ULLONG_MAX; /* Was the hardware setup for this timer? */ if (head->next_cpu == bcpu) uv_rtc_find_next_timer(head, pnode); } spin_unlock_irqrestore(&head->lock, flags); return rc; } /* * Kernel interface routines. */ /* * Read the RTC. * * Starting with HUB rev 2.0, the UV RTC register is replicated across all * cachelines of it's own page. This allows faster simultaneous reads * from a given socket. */ static cycle_t uv_read_rtc(struct clocksource *cs) { unsigned long offset; if (uv_get_min_hub_revision_id() == 1) offset = 0; else offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; return (cycle_t)uv_read_local_mmr(UVH_RTC | offset); } /* * Program the next event, relative to now */ static int uv_rtc_next_event(unsigned long delta, struct clock_event_device *ced) { int ced_cpu = cpumask_first(ced->cpumask); return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL)); } /* * Setup the RTC timer in oneshot mode */ static void uv_rtc_timer_setup(enum clock_event_mode mode, struct clock_event_device *evt) { int ced_cpu = cpumask_first(evt->cpumask); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_RESUME: /* Nothing to do here yet */ break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: uv_rtc_unset_timer(ced_cpu, 1); break; } } static void uv_rtc_interrupt(void) { int cpu = smp_processor_id(); struct clock_event_device *ced = &per_cpu(cpu_ced, cpu); if (!ced || !ced->event_handler) return; if (uv_rtc_unset_timer(cpu, 0) != 1) return; ced->event_handler(ced); } static int __init uv_enable_evt_rtc(char *str) { uv_rtc_evt_enable = 1; return 1; } __setup("uvrtcevt", uv_enable_evt_rtc); static __init void uv_rtc_register_clockevents(struct work_struct *dummy) { struct clock_event_device *ced = &__get_cpu_var(cpu_ced); *ced = clock_event_device_uv; ced->cpumask = cpumask_of(smp_processor_id()); clockevents_register_device(ced); } static __init int uv_rtc_setup_clock(void) { int rc; if (!is_uv_system()) return -ENODEV; rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second); if (rc) printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc); else printk(KERN_INFO "UV RTC clocksource registered freq %lu MHz\n", sn_rtc_cycles_per_second/(unsigned long)1E6); if (rc || !uv_rtc_evt_enable || x86_platform_ipi_callback) return rc; /* Setup and register clockevents */ rc = uv_rtc_allocate_timers(); if (rc) goto error; x86_platform_ipi_callback = uv_rtc_interrupt; clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second, NSEC_PER_SEC, clock_event_device_uv.shift); clock_event_device_uv.min_delta_ns = NSEC_PER_SEC / sn_rtc_cycles_per_second; clock_event_device_uv.max_delta_ns = clocksource_uv.mask * (NSEC_PER_SEC / sn_rtc_cycles_per_second); rc = schedule_on_each_cpu(uv_rtc_register_clockevents); if (rc) { x86_platform_ipi_callback = NULL; uv_rtc_deallocate_timers(); goto error; } printk(KERN_INFO "UV RTC clockevents registered\n"); return 0; error: clocksource_unregister(&clocksource_uv); printk(KERN_INFO "UV RTC clockevents failed rc %d\n", rc); return rc; } arch_initcall(uv_rtc_setup_clock);
gpl-2.0
f96p/mako
drivers/usb/wusbcore/devconnect.c
5231
33439
/* * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8]) * Device Connect handling * * Copyright (C) 2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: docs * FIXME: this file needs to be broken up, it's grown too big * * * WUSB1.0[7.1, 7.5.1, ] * * WUSB device connection is kind of messy. Some background: * * When a device wants to connect it scans the UWB radio channels * looking for a WUSB Channel; a WUSB channel is defined by MMCs * (Micro Managed Commands or something like that) [see * Design-overview for more on this] . * * So, device scans the radio, finds MMCs and thus a host and checks * when the next DNTS is. It sends a Device Notification Connect * (DN_Connect); the host picks it up (through nep.c and notif.c, ends * up in wusb_devconnect_ack(), which creates a wusb_dev structure in * wusbhc->port[port_number].wusb_dev), assigns an unauth address * to the device (this means from 0x80 to 0xfe) and sends, in the MMC * a Connect Ack Information Element (ConnAck IE). * * So now the device now has a WUSB address. From now on, we use * that to talk to it in the RPipes. * * ASSUMPTIONS: * * - We use the the as device address the port number where it is * connected (port 0 doesn't exist). For unauth, it is 128 + that. * * ROADMAP: * * This file contains the logic for doing that--entry points: * * wusb_devconnect_ack() Ack a device until _acked() called. * Called by notif.c:wusb_handle_dn_connect() * when a DN_Connect is received. * * wusb_devconnect_acked() Ack done, release resources. * * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn() * for processing a DN_Alive pong from a device. * * wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to * process a disconenct request from a * device. * * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when * disabling a port. * * wusb_devconnect_create() Called when creating the host by * lc.c:wusbhc_create(). * * wusb_devconnect_destroy() Cleanup called removing the host. Called * by lc.c:wusbhc_destroy(). * * Each Wireless USB host maintains a list of DN_Connect requests * (actually we maintain a list of pending Connect Acks, the * wusbhc->ca_list). * * LIFE CYCLE OF port->wusb_dev * * Before the @wusbhc structure put()s the reference it owns for * port->wusb_dev [and clean the wusb_dev pointer], it needs to * lock @wusbhc->mutex. */ #include <linux/jiffies.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/export.h> #include "wusbhc.h" static void wusbhc_devconnect_acked_work(struct work_struct *work); static void wusb_dev_free(struct wusb_dev *wusb_dev) { if (wusb_dev) { kfree(wusb_dev->set_gtk_req); usb_free_urb(wusb_dev->set_gtk_urb); kfree(wusb_dev); } } static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc) { struct wusb_dev *wusb_dev; struct urb *urb; struct usb_ctrlrequest *req; wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL); if (wusb_dev == NULL) goto err; wusb_dev->wusbhc = wusbhc; INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work); urb = usb_alloc_urb(0, GFP_KERNEL); if (urb == NULL) goto err; wusb_dev->set_gtk_urb = urb; req = kmalloc(sizeof(*req), GFP_KERNEL); if (req == NULL) goto err; wusb_dev->set_gtk_req = req; req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE; req->bRequest = USB_REQ_SET_DESCRIPTOR; req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index); req->wIndex = 0; req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength); return wusb_dev; err: wusb_dev_free(wusb_dev); return NULL; } /* * Using the Connect-Ack list, fill out the @wusbhc Connect-Ack WUSB IE * properly so that it can be added to the MMC. * * We just get the @wusbhc->ca_list and fill out the first four ones or * less (per-spec WUSB1.0[7.5, before T7-38). If the ConnectAck WUSB * IE is not allocated, we alloc it. * * @wusbhc->mutex must be taken */ static void wusbhc_fill_cack_ie(struct wusbhc *wusbhc) { unsigned cnt; struct wusb_dev *dev_itr; struct wuie_connect_ack *cack_ie; cack_ie = &wusbhc->cack_ie; cnt = 0; list_for_each_entry(dev_itr, &wusbhc->cack_list, cack_node) { cack_ie->blk[cnt].CDID = dev_itr->cdid; cack_ie->blk[cnt].bDeviceAddress = dev_itr->addr; if (++cnt >= WUIE_ELT_MAX) break; } cack_ie->hdr.bLength = sizeof(cack_ie->hdr) + cnt * sizeof(cack_ie->blk[0]); } /* * Register a new device that wants to connect * * A new device wants to connect, so we add it to the Connect-Ack * list. We give it an address in the unauthorized range (bit 8 set); * user space will have to drive authorization further on. * * @dev_addr: address to use for the device (which is also the port * number). * * @wusbhc->mutex must be taken */ static struct wusb_dev *wusbhc_cack_add(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc, const char *pr_cdid, u8 port_idx) { struct device *dev = wusbhc->dev; struct wusb_dev *wusb_dev; int new_connection = wusb_dn_connect_new_connection(dnc); u8 dev_addr; int result; /* Is it registered already? */ list_for_each_entry(wusb_dev, &wusbhc->cack_list, cack_node) if (!memcmp(&wusb_dev->cdid, &dnc->CDID, sizeof(wusb_dev->cdid))) return wusb_dev; /* We don't have it, create an entry, register it */ wusb_dev = wusb_dev_alloc(wusbhc); if (wusb_dev == NULL) return NULL; wusb_dev_init(wusb_dev); wusb_dev->cdid = dnc->CDID; wusb_dev->port_idx = port_idx; /* * Devices are always available within the cluster reservation * and since the hardware will take the intersection of the * per-device availability and the cluster reservation, the * per-device availability can simply be set to always * available. */ bitmap_fill(wusb_dev->availability.bm, UWB_NUM_MAS); /* FIXME: handle reconnects instead of assuming connects are always new. */ if (1 && new_connection == 0) new_connection = 1; if (new_connection) { dev_addr = (port_idx + 2) | WUSB_DEV_ADDR_UNAUTH; dev_info(dev, "Connecting new WUSB device to address %u, " "port %u\n", dev_addr, port_idx); result = wusb_set_dev_addr(wusbhc, wusb_dev, dev_addr); if (result < 0) return NULL; } wusb_dev->entry_ts = jiffies; list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list); wusbhc->cack_count++; wusbhc_fill_cack_ie(wusbhc); return wusb_dev; } /* * Remove a Connect-Ack context entry from the HCs view * * @wusbhc->mutex must be taken */ static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { list_del_init(&wusb_dev->cack_node); wusbhc->cack_count--; wusbhc_fill_cack_ie(wusbhc); } /* * @wusbhc->mutex must be taken */ static void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { wusbhc_cack_rm(wusbhc, wusb_dev); if (wusbhc->cack_count) wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); else wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr); } static void wusbhc_devconnect_acked_work(struct work_struct *work) { struct wusb_dev *wusb_dev = container_of(work, struct wusb_dev, devconnect_acked_work); struct wusbhc *wusbhc = wusb_dev->wusbhc; mutex_lock(&wusbhc->mutex); wusbhc_devconnect_acked(wusbhc, wusb_dev); mutex_unlock(&wusbhc->mutex); wusb_dev_put(wusb_dev); } /* * Ack a device for connection * * FIXME: docs * * @pr_cdid: Printable CDID...hex Use @dnc->cdid for the real deal. * * So we get the connect ack IE (may have been allocated already), * find an empty connect block, an empty virtual port, create an * address with it (see below), make it an unauth addr [bit 7 set] and * set the MMC. * * Addresses: because WUSB hosts have no downstream hubs, we can do a * 1:1 mapping between 'port number' and device * address. This simplifies many things, as during this * initial connect phase the USB stack has no knoledge of * the device and hasn't assigned an address yet--we know * USB's choose_address() will use the same euristics we * use here, so we can assume which address will be assigned. * * USB stack always assigns address 1 to the root hub, so * to the port number we add 2 (thus virtual port #0 is * addr #2). * * @wusbhc shall be referenced */ static void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc, const char *pr_cdid) { int result; struct device *dev = wusbhc->dev; struct wusb_dev *wusb_dev; struct wusb_port *port; unsigned idx, devnum; mutex_lock(&wusbhc->mutex); /* Check we are not handling it already */ for (idx = 0; idx < wusbhc->ports_max; idx++) { port = wusb_port_by_idx(wusbhc, idx); if (port->wusb_dev && memcmp(&dnc->CDID, &port->wusb_dev->cdid, sizeof(dnc->CDID)) == 0) goto error_unlock; } /* Look up those fake ports we have for a free one */ for (idx = 0; idx < wusbhc->ports_max; idx++) { port = wusb_port_by_idx(wusbhc, idx); if ((port->status & USB_PORT_STAT_POWER) && !(port->status & USB_PORT_STAT_CONNECTION)) break; } if (idx >= wusbhc->ports_max) { dev_err(dev, "Host controller can't connect more devices " "(%u already connected); device %s rejected\n", wusbhc->ports_max, pr_cdid); /* NOTE: we could send a WUIE_Disconnect here, but we haven't * event acked, so the device will eventually timeout the * connection, right? */ goto error_unlock; } devnum = idx + 2; /* Make sure we are using no crypto on that "virtual port" */ wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0); /* Grab a filled in Connect-Ack context, fill out the * Connect-Ack Wireless USB IE, set the MMC */ wusb_dev = wusbhc_cack_add(wusbhc, dnc, pr_cdid, idx); if (wusb_dev == NULL) goto error_unlock; result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); if (result < 0) goto error_unlock; /* Give the device at least 2ms (WUSB1.0[7.5.1p3]), let's do * three for a good measure */ msleep(3); port->wusb_dev = wusb_dev; port->status |= USB_PORT_STAT_CONNECTION; port->change |= USB_PORT_STAT_C_CONNECTION; /* Now the port status changed to connected; khubd will * pick the change up and try to reset the port to bring it to * the enabled state--so this process returns up to the stack * and it calls back into wusbhc_rh_port_reset(). */ error_unlock: mutex_unlock(&wusbhc->mutex); return; } /* * Disconnect a Wireless USB device from its fake port * * Marks the port as disconnected so that khubd can pick up the change * and drops our knowledge about the device. * * Assumes there is a device connected * * @port_index: zero based port number * * NOTE: @wusbhc->mutex is locked * * WARNING: From here it is not very safe to access anything hanging off * wusb_dev */ static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, struct wusb_port *port) { struct wusb_dev *wusb_dev = port->wusb_dev; port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED); port->change |= USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE; if (wusb_dev) { dev_dbg(wusbhc->dev, "disconnecting device from port %d\n", wusb_dev->port_idx); if (!list_empty(&wusb_dev->cack_node)) list_del_init(&wusb_dev->cack_node); /* For the one in cack_add() */ wusb_dev_put(wusb_dev); } port->wusb_dev = NULL; /* After a device disconnects, change the GTK (see [WUSB] * section 6.2.11.2). */ if (wusbhc->active) wusbhc_gtk_rekey(wusbhc); /* The Wireless USB part has forgotten about the device already; now * khubd's timer will pick up the disconnection and remove the USB * device from the system */ } /* * Refresh the list of keep alives to emit in the MMC * * Some devices don't respond to keep alives unless they've been * authenticated, so skip unauthenticated devices. * * We only publish the first four devices that have a coming timeout * condition. Then when we are done processing those, we go for the * next ones. We ignore the ones that have timed out already (they'll * be purged). * * This might cause the first devices to timeout the last devices in * the port array...FIXME: come up with a better algorithm? * * Note we can't do much about MMC's ops errors; we hope next refresh * will kind of handle it. * * NOTE: @wusbhc->mutex is locked */ static void __wusbhc_keep_alive(struct wusbhc *wusbhc) { struct device *dev = wusbhc->dev; unsigned cnt; struct wusb_dev *wusb_dev; struct wusb_port *wusb_port; struct wuie_keep_alive *ie = &wusbhc->keep_alive_ie; unsigned keep_alives, old_keep_alives; old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr); keep_alives = 0; for (cnt = 0; keep_alives < WUIE_ELT_MAX && cnt < wusbhc->ports_max; cnt++) { unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout); wusb_port = wusb_port_by_idx(wusbhc, cnt); wusb_dev = wusb_port->wusb_dev; if (wusb_dev == NULL) continue; if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated) continue; if (time_after(jiffies, wusb_dev->entry_ts + tt)) { dev_err(dev, "KEEPALIVE: device %u timed out\n", wusb_dev->addr); __wusbhc_dev_disconnect(wusbhc, wusb_port); } else if (time_after(jiffies, wusb_dev->entry_ts + tt/2)) { /* Approaching timeout cut out, need to refresh */ ie->bDeviceAddress[keep_alives++] = wusb_dev->addr; } } if (keep_alives & 0x1) /* pad to even number ([WUSB] section 7.5.9) */ ie->bDeviceAddress[keep_alives++] = 0x7f; ie->hdr.bLength = sizeof(ie->hdr) + keep_alives*sizeof(ie->bDeviceAddress[0]); if (keep_alives > 0) wusbhc_mmcie_set(wusbhc, 10, 5, &ie->hdr); else if (old_keep_alives != 0) wusbhc_mmcie_rm(wusbhc, &ie->hdr); } /* * Do a run through all devices checking for timeouts */ static void wusbhc_keep_alive_run(struct work_struct *ws) { struct delayed_work *dw = to_delayed_work(ws); struct wusbhc *wusbhc = container_of(dw, struct wusbhc, keep_alive_timer); mutex_lock(&wusbhc->mutex); __wusbhc_keep_alive(wusbhc); mutex_unlock(&wusbhc->mutex); queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, msecs_to_jiffies(wusbhc->trust_timeout / 2)); } /* * Find the wusb_dev from its device address. * * The device can be found directly from the address (see * wusb_cack_add() for where the device address is set to port_idx * +2), except when the address is zero. */ static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr) { int p; if (addr == 0xff) /* unconnected */ return NULL; if (addr > 0) { int port = (addr & ~0x80) - 2; if (port < 0 || port >= wusbhc->ports_max) return NULL; return wusb_port_by_idx(wusbhc, port)->wusb_dev; } /* Look for the device with address 0. */ for (p = 0; p < wusbhc->ports_max; p++) { struct wusb_dev *wusb_dev = wusb_port_by_idx(wusbhc, p)->wusb_dev; if (wusb_dev && wusb_dev->addr == addr) return wusb_dev; } return NULL; } /* * Handle a DN_Alive notification (WUSB1.0[7.6.1]) * * This just updates the device activity timestamp and then refreshes * the keep alive IE. * * @wusbhc shall be referenced and unlocked */ static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { mutex_lock(&wusbhc->mutex); wusb_dev->entry_ts = jiffies; __wusbhc_keep_alive(wusbhc); mutex_unlock(&wusbhc->mutex); } /* * Handle a DN_Connect notification (WUSB1.0[7.6.1]) * * @wusbhc * @pkt_hdr * @size: Size of the buffer where the notification resides; if the * notification data suggests there should be more data than * available, an error will be signaled and the whole buffer * consumed. * * @wusbhc->mutex shall be held */ static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc, struct wusb_dn_hdr *dn_hdr, size_t size) { struct device *dev = wusbhc->dev; struct wusb_dn_connect *dnc; char pr_cdid[WUSB_CKHDID_STRSIZE]; static const char *beacon_behaviour[] = { "reserved", "self-beacon", "directed-beacon", "no-beacon" }; if (size < sizeof(*dnc)) { dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n", size, sizeof(*dnc)); return; } dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr); ckhdid_printf(pr_cdid, sizeof(pr_cdid), &dnc->CDID); dev_info(dev, "DN CONNECT: device %s @ %x (%s) wants to %s\n", pr_cdid, wusb_dn_connect_prev_dev_addr(dnc), beacon_behaviour[wusb_dn_connect_beacon_behavior(dnc)], wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect"); /* ACK the connect */ wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid); } /* * Handle a DN_Disconnect notification (WUSB1.0[7.6.1]) * * Device is going down -- do the disconnect. * * @wusbhc shall be referenced and unlocked */ static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { struct device *dev = wusbhc->dev; dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr); mutex_lock(&wusbhc->mutex); __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx)); mutex_unlock(&wusbhc->mutex); } /* * Handle a Device Notification coming a host * * The Device Notification comes from a host (HWA, DWA or WHCI) * wrapped in a set of headers. Somebody else has peeled off those * headers for us and we just get one Device Notifications. * * Invalid DNs (e.g., too short) are discarded. * * @wusbhc shall be referenced * * FIXMES: * - implement priorities as in WUSB1.0[Table 7-55]? */ void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr, struct wusb_dn_hdr *dn_hdr, size_t size) { struct device *dev = wusbhc->dev; struct wusb_dev *wusb_dev; if (size < sizeof(struct wusb_dn_hdr)) { dev_err(dev, "DN data shorter than DN header (%d < %d)\n", (int)size, (int)sizeof(struct wusb_dn_hdr)); return; } wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) { dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n", dn_hdr->bType, srcaddr); return; } switch (dn_hdr->bType) { case WUSB_DN_CONNECT: wusbhc_handle_dn_connect(wusbhc, dn_hdr, size); break; case WUSB_DN_ALIVE: wusbhc_handle_dn_alive(wusbhc, wusb_dev); break; case WUSB_DN_DISCONNECT: wusbhc_handle_dn_disconnect(wusbhc, wusb_dev); break; case WUSB_DN_MASAVAILCHANGED: case WUSB_DN_RWAKE: case WUSB_DN_SLEEP: /* FIXME: handle these DNs. */ break; case WUSB_DN_EPRDY: /* The hardware handles these. */ break; default: dev_warn(dev, "unknown DN %u (%d octets) from %u\n", dn_hdr->bType, (int)size, srcaddr); } } EXPORT_SYMBOL_GPL(wusbhc_handle_dn); /* * Disconnect a WUSB device from a the cluster * * @wusbhc * @port Fake port where the device is (wusbhc index, not USB port number). * * In Wireless USB, a disconnect is basically telling the device he is * being disconnected and forgetting about him. * * We send the device a Device Disconnect IE (WUSB1.0[7.5.11]) for 100 * ms and then keep going. * * We don't do much in case of error; we always pretend we disabled * the port and disconnected the device. If physically the request * didn't get there (many things can fail in the way there), the stack * will reject the device's communication attempts. * * @wusbhc should be refcounted and locked */ void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port_idx) { int result; struct device *dev = wusbhc->dev; struct wusb_dev *wusb_dev; struct wuie_disconnect *ie; wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; if (wusb_dev == NULL) { /* reset no device? ignore */ dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n", port_idx); return; } __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); ie = kzalloc(sizeof(*ie), GFP_KERNEL); if (ie == NULL) return; ie->hdr.bLength = sizeof(*ie); ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT; ie->bDeviceAddress = wusb_dev->addr; result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr); if (result < 0) dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result); else { /* At least 6 MMCs, assuming at least 1 MMC per zone. */ msleep(7*4); wusbhc_mmcie_rm(wusbhc, &ie->hdr); } kfree(ie); } /* * Walk over the BOS descriptor, verify and grok it * * @usb_dev: referenced * @wusb_dev: referenced and unlocked * * The BOS descriptor is defined at WUSB1.0[7.4.1], and it defines a * "flexible" way to wrap all kinds of descriptors inside an standard * descriptor (wonder why they didn't use normal descriptors, * btw). Not like they lack code. * * At the end we go to look for the WUSB Device Capabilities * (WUSB1.0[7.4.1.1]) that is wrapped in a device capability descriptor * that is part of the BOS descriptor set. That tells us what does the * device support (dual role, beacon type, UWB PHY rates). */ static int wusb_dev_bos_grok(struct usb_device *usb_dev, struct wusb_dev *wusb_dev, struct usb_bos_descriptor *bos, size_t desc_size) { ssize_t result; struct device *dev = &usb_dev->dev; void *itr, *top; /* Walk over BOS capabilities, verify them */ itr = (void *)bos + sizeof(*bos); top = itr + desc_size - sizeof(*bos); while (itr < top) { struct usb_dev_cap_header *cap_hdr = itr; size_t cap_size; u8 cap_type; if (top - itr < sizeof(*cap_hdr)) { dev_err(dev, "Device BUG? premature end of BOS header " "data [offset 0x%02x]: only %zu bytes left\n", (int)(itr - (void *)bos), top - itr); result = -ENOSPC; goto error_bad_cap; } cap_size = cap_hdr->bLength; cap_type = cap_hdr->bDevCapabilityType; if (cap_size == 0) break; if (cap_size > top - itr) { dev_err(dev, "Device BUG? premature end of BOS data " "[offset 0x%02x cap %02x %zu bytes]: " "only %zu bytes left\n", (int)(itr - (void *)bos), cap_type, cap_size, top - itr); result = -EBADF; goto error_bad_cap; } switch (cap_type) { case USB_CAP_TYPE_WIRELESS_USB: if (cap_size != sizeof(*wusb_dev->wusb_cap_descr)) dev_err(dev, "Device BUG? WUSB Capability " "descriptor is %zu bytes vs %zu " "needed\n", cap_size, sizeof(*wusb_dev->wusb_cap_descr)); else wusb_dev->wusb_cap_descr = itr; break; default: dev_err(dev, "BUG? Unknown BOS capability 0x%02x " "(%zu bytes) at offset 0x%02x\n", cap_type, cap_size, (int)(itr - (void *)bos)); } itr += cap_size; } result = 0; error_bad_cap: return result; } /* * Add information from the BOS descriptors to the device * * @usb_dev: referenced * @wusb_dev: referenced and unlocked * * So what we do is we alloc a space for the BOS descriptor of 64 * bytes; read the first four bytes which include the wTotalLength * field (WUSB1.0[T7-26]) and if it fits in those 64 bytes, read the * whole thing. If not we realloc to that size. * * Then we call the groking function, that will fill up * wusb_dev->wusb_cap_descr, which is what we'll need later on. */ static int wusb_dev_bos_add(struct usb_device *usb_dev, struct wusb_dev *wusb_dev) { ssize_t result; struct device *dev = &usb_dev->dev; struct usb_bos_descriptor *bos; size_t alloc_size = 32, desc_size = 4; bos = kmalloc(alloc_size, GFP_KERNEL); if (bos == NULL) return -ENOMEM; result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size); if (result < 4) { dev_err(dev, "Can't get BOS descriptor or too short: %zd\n", result); goto error_get_descriptor; } desc_size = le16_to_cpu(bos->wTotalLength); if (desc_size >= alloc_size) { kfree(bos); alloc_size = desc_size; bos = kmalloc(alloc_size, GFP_KERNEL); if (bos == NULL) return -ENOMEM; } result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size); if (result < 0 || result != desc_size) { dev_err(dev, "Can't get BOS descriptor or too short (need " "%zu bytes): %zd\n", desc_size, result); goto error_get_descriptor; } if (result < sizeof(*bos) || le16_to_cpu(bos->wTotalLength) != desc_size) { dev_err(dev, "Can't get BOS descriptor or too short (need " "%zu bytes): %zd\n", desc_size, result); goto error_get_descriptor; } result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result); if (result < 0) goto error_bad_bos; wusb_dev->bos = bos; return 0; error_bad_bos: error_get_descriptor: kfree(bos); wusb_dev->wusb_cap_descr = NULL; return result; } static void wusb_dev_bos_rm(struct wusb_dev *wusb_dev) { kfree(wusb_dev->bos); wusb_dev->wusb_cap_descr = NULL; }; static struct usb_wireless_cap_descriptor wusb_cap_descr_default = { .bLength = sizeof(wusb_cap_descr_default), .bDescriptorType = USB_DT_DEVICE_CAPABILITY, .bDevCapabilityType = USB_CAP_TYPE_WIRELESS_USB, .bmAttributes = USB_WIRELESS_BEACON_NONE, .wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53), .bmTFITXPowerInfo = 0, .bmFFITXPowerInfo = 0, .bmBandGroup = cpu_to_le16(0x0001), /* WUSB1.0[7.4.1] bottom */ .bReserved = 0 }; /* * USB stack's device addition Notifier Callback * * Called from drivers/usb/core/hub.c when a new device is added; we * use this hook to perform certain WUSB specific setup work on the * new device. As well, it is the first time we can connect the * wusb_dev and the usb_dev. So we note it down in wusb_dev and take a * reference that we'll drop. * * First we need to determine if the device is a WUSB device (else we * ignore it). For that we use the speed setting (USB_SPEED_WIRELESS) * [FIXME: maybe we'd need something more definitive]. If so, we track * it's usb_busd and from there, the WUSB HC. * * Because all WUSB HCs are contained in a 'struct wusbhc', voila, we * get the wusbhc for the device. * * We have a reference on @usb_dev (as we are called at the end of its * enumeration). * * NOTE: @usb_dev locked */ static void wusb_dev_add_ncb(struct usb_device *usb_dev) { int result = 0; struct wusb_dev *wusb_dev; struct wusbhc *wusbhc; struct device *dev = &usb_dev->dev; u8 port_idx; if (usb_dev->wusb == 0 || usb_dev->devnum == 1) return; /* skip non wusb and wusb RHs */ usb_set_device_state(usb_dev, USB_STATE_UNAUTHENTICATED); wusbhc = wusbhc_get_by_usb_dev(usb_dev); if (wusbhc == NULL) goto error_nodev; mutex_lock(&wusbhc->mutex); wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev); port_idx = wusb_port_no_to_idx(usb_dev->portnum); mutex_unlock(&wusbhc->mutex); if (wusb_dev == NULL) goto error_nodev; wusb_dev->usb_dev = usb_get_dev(usb_dev); usb_dev->wusb_dev = wusb_dev_get(wusb_dev); result = wusb_dev_sec_add(wusbhc, usb_dev, wusb_dev); if (result < 0) { dev_err(dev, "Cannot enable security: %d\n", result); goto error_sec_add; } /* Now query the device for it's BOS and attach it to wusb_dev */ result = wusb_dev_bos_add(usb_dev, wusb_dev); if (result < 0) { dev_err(dev, "Cannot get BOS descriptors: %d\n", result); goto error_bos_add; } result = wusb_dev_sysfs_add(wusbhc, usb_dev, wusb_dev); if (result < 0) goto error_add_sysfs; out: wusb_dev_put(wusb_dev); wusbhc_put(wusbhc); error_nodev: return; wusb_dev_sysfs_rm(wusb_dev); error_add_sysfs: wusb_dev_bos_rm(wusb_dev); error_bos_add: wusb_dev_sec_rm(wusb_dev); error_sec_add: mutex_lock(&wusbhc->mutex); __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); mutex_unlock(&wusbhc->mutex); goto out; } /* * Undo all the steps done at connection by the notifier callback * * NOTE: @usb_dev locked */ static void wusb_dev_rm_ncb(struct usb_device *usb_dev) { struct wusb_dev *wusb_dev = usb_dev->wusb_dev; if (usb_dev->wusb == 0 || usb_dev->devnum == 1) return; /* skip non wusb and wusb RHs */ wusb_dev_sysfs_rm(wusb_dev); wusb_dev_bos_rm(wusb_dev); wusb_dev_sec_rm(wusb_dev); wusb_dev->usb_dev = NULL; usb_dev->wusb_dev = NULL; wusb_dev_put(wusb_dev); usb_put_dev(usb_dev); } /* * Handle notifications from the USB stack (notifier call back) * * This is called when the USB stack does a * usb_{bus,device}_{add,remove}() so we can do WUSB specific * handling. It is called with [for the case of * USB_DEVICE_{ADD,REMOVE} with the usb_dev locked. */ int wusb_usb_ncb(struct notifier_block *nb, unsigned long val, void *priv) { int result = NOTIFY_OK; switch (val) { case USB_DEVICE_ADD: wusb_dev_add_ncb(priv); break; case USB_DEVICE_REMOVE: wusb_dev_rm_ncb(priv); break; case USB_BUS_ADD: /* ignore (for now) */ case USB_BUS_REMOVE: break; default: WARN_ON(1); result = NOTIFY_BAD; }; return result; } /* * Return a referenced wusb_dev given a @wusbhc and @usb_dev */ struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *wusbhc, struct usb_device *usb_dev) { struct wusb_dev *wusb_dev; u8 port_idx; port_idx = wusb_port_no_to_idx(usb_dev->portnum); BUG_ON(port_idx > wusbhc->ports_max); wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; if (wusb_dev != NULL) /* ops, device is gone */ wusb_dev_get(wusb_dev); return wusb_dev; } EXPORT_SYMBOL_GPL(__wusb_dev_get_by_usb_dev); void wusb_dev_destroy(struct kref *_wusb_dev) { struct wusb_dev *wusb_dev = container_of(_wusb_dev, struct wusb_dev, refcnt); list_del_init(&wusb_dev->cack_node); wusb_dev_free(wusb_dev); } EXPORT_SYMBOL_GPL(wusb_dev_destroy); /* * Create all the device connect handling infrastructure * * This is basically the device info array, Connect Acknowledgement * (cack) lists, keep-alive timers (and delayed work thread). */ int wusbhc_devconnect_create(struct wusbhc *wusbhc) { wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE; wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr); INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run); wusbhc->cack_ie.hdr.bIEIdentifier = WUIE_ID_CONNECTACK; wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr); INIT_LIST_HEAD(&wusbhc->cack_list); return 0; } /* * Release all resources taken by the devconnect stuff */ void wusbhc_devconnect_destroy(struct wusbhc *wusbhc) { /* no op */ } /* * wusbhc_devconnect_start - start accepting device connections * @wusbhc: the WUSB HC * * Sets the Host Info IE to accept all new connections. * * FIXME: This also enables the keep alives but this is not necessary * until there are connected and authenticated devices. */ int wusbhc_devconnect_start(struct wusbhc *wusbhc) { struct device *dev = wusbhc->dev; struct wuie_host_info *hi; int result; hi = kzalloc(sizeof(*hi), GFP_KERNEL); if (hi == NULL) return -ENOMEM; hi->hdr.bLength = sizeof(*hi); hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO; hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL); hi->CHID = wusbhc->chid; result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr); if (result < 0) { dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result); goto error_mmcie_set; } wusbhc->wuie_host_info = hi; queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, (wusbhc->trust_timeout*CONFIG_HZ)/1000/2); return 0; error_mmcie_set: kfree(hi); return result; } /* * wusbhc_devconnect_stop - stop managing connected devices * @wusbhc: the WUSB HC * * Disconnects any devices still connected, stops the keep alives and * removes the Host Info IE. */ void wusbhc_devconnect_stop(struct wusbhc *wusbhc) { int i; mutex_lock(&wusbhc->mutex); for (i = 0; i < wusbhc->ports_max; i++) { if (wusbhc->port[i].wusb_dev) __wusbhc_dev_disconnect(wusbhc, &wusbhc->port[i]); } mutex_unlock(&wusbhc->mutex); cancel_delayed_work_sync(&wusbhc->keep_alive_timer); wusbhc_mmcie_rm(wusbhc, &wusbhc->wuie_host_info->hdr); kfree(wusbhc->wuie_host_info); wusbhc->wuie_host_info = NULL; } /* * wusb_set_dev_addr - set the WUSB device address used by the host * @wusbhc: the WUSB HC the device is connect to * @wusb_dev: the WUSB device * @addr: new device address */ int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, u8 addr) { int result; wusb_dev->addr = addr; result = wusbhc->dev_info_set(wusbhc, wusb_dev); if (result < 0) dev_err(wusbhc->dev, "device %d: failed to set device " "address\n", wusb_dev->port_idx); else dev_info(wusbhc->dev, "device %d: %s addr %u\n", wusb_dev->port_idx, (addr & WUSB_DEV_ADDR_UNAUTH) ? "unauth" : "auth", wusb_dev->addr); return result; }
gpl-2.0
OptiPurity/kernel_lge_hammerhead
drivers/rtc/rtc-v3020.c
5231
9450
/* drivers/rtc/rtc-v3020.c * * Copyright (C) 2006 8D Technologies inc. * Copyright (C) 2004 Compulab Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Driver for the V3020 RTC * * Changelog: * * 10-May-2006: Raphael Assenat <raph@8d.com> * - Converted to platform driver * - Use the generic rtc class * * ??-???-2004: Someone at Compulab * - Initial driver creation. * */ #include <linux/platform_device.h> #include <linux/module.h> #include <linux/init.h> #include <linux/rtc.h> #include <linux/types.h> #include <linux/bcd.h> #include <linux/rtc-v3020.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/io.h> #undef DEBUG struct v3020; struct v3020_chip_ops { int (*map_io)(struct v3020 *chip, struct platform_device *pdev, struct v3020_platform_data *pdata); void (*unmap_io)(struct v3020 *chip); unsigned char (*read_bit)(struct v3020 *chip); void (*write_bit)(struct v3020 *chip, unsigned char bit); }; #define V3020_CS 0 #define V3020_WR 1 #define V3020_RD 2 #define V3020_IO 3 struct v3020_gpio { const char *name; unsigned int gpio; }; struct v3020 { /* MMIO access */ void __iomem *ioaddress; int leftshift; /* GPIO access */ struct v3020_gpio *gpio; struct v3020_chip_ops *ops; struct rtc_device *rtc; }; static int v3020_mmio_map(struct v3020 *chip, struct platform_device *pdev, struct v3020_platform_data *pdata) { if (pdev->num_resources != 1) return -EBUSY; if (pdev->resource[0].flags != IORESOURCE_MEM) return -EBUSY; chip->leftshift = pdata->leftshift; chip->ioaddress = ioremap(pdev->resource[0].start, 1); if (chip->ioaddress == NULL) return -EBUSY; return 0; } static void v3020_mmio_unmap(struct v3020 *chip) { iounmap(chip->ioaddress); } static void v3020_mmio_write_bit(struct v3020 *chip, unsigned char bit) { writel(bit << chip->leftshift, chip->ioaddress); } static unsigned char v3020_mmio_read_bit(struct v3020 *chip) { return !!(readl(chip->ioaddress) & (1 << chip->leftshift)); } static struct v3020_chip_ops v3020_mmio_ops = { .map_io = v3020_mmio_map, .unmap_io = v3020_mmio_unmap, .read_bit = v3020_mmio_read_bit, .write_bit = v3020_mmio_write_bit, }; static struct v3020_gpio v3020_gpio[] = { { "RTC CS", 0 }, { "RTC WR", 0 }, { "RTC RD", 0 }, { "RTC IO", 0 }, }; static int v3020_gpio_map(struct v3020 *chip, struct platform_device *pdev, struct v3020_platform_data *pdata) { int i, err; v3020_gpio[V3020_CS].gpio = pdata->gpio_cs; v3020_gpio[V3020_WR].gpio = pdata->gpio_wr; v3020_gpio[V3020_RD].gpio = pdata->gpio_rd; v3020_gpio[V3020_IO].gpio = pdata->gpio_io; for (i = 0; i < ARRAY_SIZE(v3020_gpio); i++) { err = gpio_request(v3020_gpio[i].gpio, v3020_gpio[i].name); if (err) goto err_request; gpio_direction_output(v3020_gpio[i].gpio, 1); } chip->gpio = v3020_gpio; return 0; err_request: while (--i >= 0) gpio_free(v3020_gpio[i].gpio); return err; } static void v3020_gpio_unmap(struct v3020 *chip) { int i; for (i = 0; i < ARRAY_SIZE(v3020_gpio); i++) gpio_free(v3020_gpio[i].gpio); } static void v3020_gpio_write_bit(struct v3020 *chip, unsigned char bit) { gpio_direction_output(chip->gpio[V3020_IO].gpio, bit); gpio_set_value(chip->gpio[V3020_CS].gpio, 0); gpio_set_value(chip->gpio[V3020_WR].gpio, 0); udelay(1); gpio_set_value(chip->gpio[V3020_WR].gpio, 1); gpio_set_value(chip->gpio[V3020_CS].gpio, 1); } static unsigned char v3020_gpio_read_bit(struct v3020 *chip) { int bit; gpio_direction_input(chip->gpio[V3020_IO].gpio); gpio_set_value(chip->gpio[V3020_CS].gpio, 0); gpio_set_value(chip->gpio[V3020_RD].gpio, 0); udelay(1); bit = !!gpio_get_value(chip->gpio[V3020_IO].gpio); udelay(1); gpio_set_value(chip->gpio[V3020_RD].gpio, 1); gpio_set_value(chip->gpio[V3020_CS].gpio, 1); return bit; } static struct v3020_chip_ops v3020_gpio_ops = { .map_io = v3020_gpio_map, .unmap_io = v3020_gpio_unmap, .read_bit = v3020_gpio_read_bit, .write_bit = v3020_gpio_write_bit, }; static void v3020_set_reg(struct v3020 *chip, unsigned char address, unsigned char data) { int i; unsigned char tmp; tmp = address; for (i = 0; i < 4; i++) { chip->ops->write_bit(chip, (tmp & 1)); tmp >>= 1; udelay(1); } /* Commands dont have data */ if (!V3020_IS_COMMAND(address)) { for (i = 0; i < 8; i++) { chip->ops->write_bit(chip, (data & 1)); data >>= 1; udelay(1); } } } static unsigned char v3020_get_reg(struct v3020 *chip, unsigned char address) { unsigned int data = 0; int i; for (i = 0; i < 4; i++) { chip->ops->write_bit(chip, (address & 1)); address >>= 1; udelay(1); } for (i = 0; i < 8; i++) { data >>= 1; if (chip->ops->read_bit(chip)) data |= 0x80; udelay(1); } return data; } static int v3020_read_time(struct device *dev, struct rtc_time *dt) { struct v3020 *chip = dev_get_drvdata(dev); int tmp; /* Copy the current time to ram... */ v3020_set_reg(chip, V3020_CMD_CLOCK2RAM, 0); /* ...and then read constant values. */ tmp = v3020_get_reg(chip, V3020_SECONDS); dt->tm_sec = bcd2bin(tmp); tmp = v3020_get_reg(chip, V3020_MINUTES); dt->tm_min = bcd2bin(tmp); tmp = v3020_get_reg(chip, V3020_HOURS); dt->tm_hour = bcd2bin(tmp); tmp = v3020_get_reg(chip, V3020_MONTH_DAY); dt->tm_mday = bcd2bin(tmp); tmp = v3020_get_reg(chip, V3020_MONTH); dt->tm_mon = bcd2bin(tmp) - 1; tmp = v3020_get_reg(chip, V3020_WEEK_DAY); dt->tm_wday = bcd2bin(tmp); tmp = v3020_get_reg(chip, V3020_YEAR); dt->tm_year = bcd2bin(tmp)+100; dev_dbg(dev, "\n%s : Read RTC values\n", __func__); dev_dbg(dev, "tm_hour: %i\n", dt->tm_hour); dev_dbg(dev, "tm_min : %i\n", dt->tm_min); dev_dbg(dev, "tm_sec : %i\n", dt->tm_sec); dev_dbg(dev, "tm_year: %i\n", dt->tm_year); dev_dbg(dev, "tm_mon : %i\n", dt->tm_mon); dev_dbg(dev, "tm_mday: %i\n", dt->tm_mday); dev_dbg(dev, "tm_wday: %i\n", dt->tm_wday); return 0; } static int v3020_set_time(struct device *dev, struct rtc_time *dt) { struct v3020 *chip = dev_get_drvdata(dev); dev_dbg(dev, "\n%s : Setting RTC values\n", __func__); dev_dbg(dev, "tm_sec : %i\n", dt->tm_sec); dev_dbg(dev, "tm_min : %i\n", dt->tm_min); dev_dbg(dev, "tm_hour: %i\n", dt->tm_hour); dev_dbg(dev, "tm_mday: %i\n", dt->tm_mday); dev_dbg(dev, "tm_wday: %i\n", dt->tm_wday); dev_dbg(dev, "tm_year: %i\n", dt->tm_year); /* Write all the values to ram... */ v3020_set_reg(chip, V3020_SECONDS, bin2bcd(dt->tm_sec)); v3020_set_reg(chip, V3020_MINUTES, bin2bcd(dt->tm_min)); v3020_set_reg(chip, V3020_HOURS, bin2bcd(dt->tm_hour)); v3020_set_reg(chip, V3020_MONTH_DAY, bin2bcd(dt->tm_mday)); v3020_set_reg(chip, V3020_MONTH, bin2bcd(dt->tm_mon + 1)); v3020_set_reg(chip, V3020_WEEK_DAY, bin2bcd(dt->tm_wday)); v3020_set_reg(chip, V3020_YEAR, bin2bcd(dt->tm_year % 100)); /* ...and set the clock. */ v3020_set_reg(chip, V3020_CMD_RAM2CLOCK, 0); /* Compulab used this delay here. I dont know why, * the datasheet does not specify a delay. */ /*mdelay(5);*/ return 0; } static const struct rtc_class_ops v3020_rtc_ops = { .read_time = v3020_read_time, .set_time = v3020_set_time, }; static int rtc_probe(struct platform_device *pdev) { struct v3020_platform_data *pdata = pdev->dev.platform_data; struct v3020 *chip; int retval = -EBUSY; int i; int temp; chip = kzalloc(sizeof *chip, GFP_KERNEL); if (!chip) return -ENOMEM; if (pdata->use_gpio) chip->ops = &v3020_gpio_ops; else chip->ops = &v3020_mmio_ops; retval = chip->ops->map_io(chip, pdev, pdata); if (retval) goto err_chip; /* Make sure the v3020 expects a communication cycle * by reading 8 times */ for (i = 0; i < 8; i++) temp = chip->ops->read_bit(chip); /* Test chip by doing a write/read sequence * to the chip ram */ v3020_set_reg(chip, V3020_SECONDS, 0x33); if (v3020_get_reg(chip, V3020_SECONDS) != 0x33) { retval = -ENODEV; goto err_io; } /* Make sure frequency measurement mode, test modes, and lock * are all disabled */ v3020_set_reg(chip, V3020_STATUS_0, 0x0); if (pdata->use_gpio) dev_info(&pdev->dev, "Chip available at GPIOs " "%d, %d, %d, %d\n", chip->gpio[V3020_CS].gpio, chip->gpio[V3020_WR].gpio, chip->gpio[V3020_RD].gpio, chip->gpio[V3020_IO].gpio); else dev_info(&pdev->dev, "Chip available at " "physical address 0x%llx," "data connected to D%d\n", (unsigned long long)pdev->resource[0].start, chip->leftshift); platform_set_drvdata(pdev, chip); chip->rtc = rtc_device_register("v3020", &pdev->dev, &v3020_rtc_ops, THIS_MODULE); if (IS_ERR(chip->rtc)) { retval = PTR_ERR(chip->rtc); goto err_io; } return 0; err_io: chip->ops->unmap_io(chip); err_chip: kfree(chip); return retval; } static int rtc_remove(struct platform_device *dev) { struct v3020 *chip = platform_get_drvdata(dev); struct rtc_device *rtc = chip->rtc; if (rtc) rtc_device_unregister(rtc); chip->ops->unmap_io(chip); kfree(chip); return 0; } static struct platform_driver rtc_device_driver = { .probe = rtc_probe, .remove = rtc_remove, .driver = { .name = "v3020", .owner = THIS_MODULE, }, }; module_platform_driver(rtc_device_driver); MODULE_DESCRIPTION("V3020 RTC"); MODULE_AUTHOR("Raphael Assenat"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:v3020");
gpl-2.0
pantech-msm8974/android_kernel_pantech_msm8974
drivers/staging/rtl8192u/r819xU_firmware.c
7791
12298
/************************************************************************************************** * Procedure: Init boot code/firmware code/data session * * Description: This routine will initialize firmware. If any error occurs during the initialization * process, the routine shall terminate immediately and return fail. * NIC driver should call NdisOpenFile only from MiniportInitialize. * * Arguments: The pointer of the adapter * Returns: * NDIS_STATUS_FAILURE - the following initialization process should be terminated * NDIS_STATUS_SUCCESS - if firmware initialization process success **************************************************************************************************/ #include "r8192U.h" #include "r8192U_hw.h" #include "r819xU_firmware_img.h" #include "r819xU_firmware.h" #include <linux/firmware.h> void firmware_init_param(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); rt_firmware *pfirmware = priv->pFirmware; pfirmware->cmdpacket_frag_thresold = GET_COMMAND_PACKET_FRAG_THRESHOLD(MAX_TRANSMIT_BUFFER_SIZE); } /* * segment the img and use the ptr and length to remember info on each segment * */ bool fw_download_code(struct net_device *dev, u8 *code_virtual_address, u32 buffer_len) { struct r8192_priv *priv = ieee80211_priv(dev); bool rt_status = true; u16 frag_threshold; u16 frag_length, frag_offset = 0; //u16 total_size; int i; rt_firmware *pfirmware = priv->pFirmware; struct sk_buff *skb; unsigned char *seg_ptr; cb_desc *tcb_desc; u8 bLastIniPkt; firmware_init_param(dev); //Fragmentation might be required frag_threshold = pfirmware->cmdpacket_frag_thresold; do { if((buffer_len - frag_offset) > frag_threshold) { frag_length = frag_threshold ; bLastIniPkt = 0; } else { frag_length = buffer_len - frag_offset; bLastIniPkt = 1; } /* Allocate skb buffer to contain firmware info and tx descriptor info * add 4 to avoid packet appending overflow. * */ #ifdef RTL8192U skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4); #else skb = dev_alloc_skb(frag_length + 4); #endif memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev)); tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->queue_index = TXCMD_QUEUE; tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT; tcb_desc->bLastIniPkt = bLastIniPkt; #ifdef RTL8192U skb_reserve(skb, USB_HWDESC_HEADER_LEN); #endif seg_ptr = skb->data; /* * Transform from little endian to big endian * and pending zero */ for(i=0 ; i < frag_length; i+=4) { *seg_ptr++ = ((i+0)<frag_length)?code_virtual_address[i+3]:0; *seg_ptr++ = ((i+1)<frag_length)?code_virtual_address[i+2]:0; *seg_ptr++ = ((i+2)<frag_length)?code_virtual_address[i+1]:0; *seg_ptr++ = ((i+3)<frag_length)?code_virtual_address[i+0]:0; } tcb_desc->txbuf_size= (u16)i; skb_put(skb, i); if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)|| (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\ (priv->ieee80211->queue_stop) ) { RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n"); skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb); } else { priv->ieee80211->softmac_hard_start_xmit(skb,dev); } code_virtual_address += frag_length; frag_offset += frag_length; }while(frag_offset < buffer_len); return rt_status; } bool fwSendNullPacket( struct net_device *dev, u32 Length ) { bool rtStatus = true; struct r8192_priv *priv = ieee80211_priv(dev); struct sk_buff *skb; cb_desc *tcb_desc; unsigned char *ptr_buf; bool bLastInitPacket = false; //PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK); //Get TCB and local buffer from common pool. (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) skb = dev_alloc_skb(Length+ 4); memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev)); tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->queue_index = TXCMD_QUEUE; tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT; tcb_desc->bLastIniPkt = bLastInitPacket; ptr_buf = skb_put(skb, Length); memset(ptr_buf,0,Length); tcb_desc->txbuf_size= (u16)Length; if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)|| (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\ (priv->ieee80211->queue_stop) ) { RT_TRACE(COMP_FIRMWARE,"===================NULL packet==================================> tx full!\n"); skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb); } else { priv->ieee80211->softmac_hard_start_xmit(skb,dev); } //PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK); return rtStatus; } //----------------------------------------------------------------------------- // Procedure: Check whether main code is download OK. If OK, turn on CPU // // Description: CPU register locates in different page against general register. // Switch to CPU register in the begin and switch back before return // // // Arguments: The pointer of the adapter // // Returns: // NDIS_STATUS_FAILURE - the following initialization process should be terminated // NDIS_STATUS_SUCCESS - if firmware initialization process success //----------------------------------------------------------------------------- bool CPUcheck_maincodeok_turnonCPU(struct net_device *dev) { bool rt_status = true; int check_putcodeOK_time = 200000, check_bootOk_time = 200000; u32 CPU_status = 0; /* Check whether put code OK */ do { CPU_status = read_nic_dword(dev, CPU_GEN); if(CPU_status&CPU_GEN_PUT_CODE_OK) break; }while(check_putcodeOK_time--); if(!(CPU_status&CPU_GEN_PUT_CODE_OK)) { RT_TRACE(COMP_ERR, "Download Firmware: Put code fail!\n"); goto CPUCheckMainCodeOKAndTurnOnCPU_Fail; } else { RT_TRACE(COMP_FIRMWARE, "Download Firmware: Put code ok!\n"); } /* Turn On CPU */ CPU_status = read_nic_dword(dev, CPU_GEN); write_nic_byte(dev, CPU_GEN, (u8)((CPU_status|CPU_GEN_PWR_STB_CPU)&0xff)); mdelay(1000); /* Check whether CPU boot OK */ do { CPU_status = read_nic_dword(dev, CPU_GEN); if(CPU_status&CPU_GEN_BOOT_RDY) break; }while(check_bootOk_time--); if(!(CPU_status&CPU_GEN_BOOT_RDY)) { goto CPUCheckMainCodeOKAndTurnOnCPU_Fail; } else { RT_TRACE(COMP_FIRMWARE, "Download Firmware: Boot ready!\n"); } return rt_status; CPUCheckMainCodeOKAndTurnOnCPU_Fail: RT_TRACE(COMP_ERR, "ERR in %s()\n", __FUNCTION__); rt_status = FALSE; return rt_status; } bool CPUcheck_firmware_ready(struct net_device *dev) { bool rt_status = true; int check_time = 200000; u32 CPU_status = 0; /* Check Firmware Ready */ do { CPU_status = read_nic_dword(dev, CPU_GEN); if(CPU_status&CPU_GEN_FIRM_RDY) break; }while(check_time--); if(!(CPU_status&CPU_GEN_FIRM_RDY)) goto CPUCheckFirmwareReady_Fail; else RT_TRACE(COMP_FIRMWARE, "Download Firmware: Firmware ready!\n"); return rt_status; CPUCheckFirmwareReady_Fail: RT_TRACE(COMP_ERR, "ERR in %s()\n", __FUNCTION__); rt_status = false; return rt_status; } bool init_firmware(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); bool rt_status = TRUE; u32 file_length = 0; u8 *mapped_file = NULL; u32 init_step = 0; opt_rst_type_e rst_opt = OPT_SYSTEM_RESET; firmware_init_step_e starting_state = FW_INIT_STEP0_BOOT; rt_firmware *pfirmware = priv->pFirmware; const struct firmware *fw_entry; const char *fw_name[3] = { "RTL8192U/boot.img", "RTL8192U/main.img", "RTL8192U/data.img"}; int rc; RT_TRACE(COMP_FIRMWARE, " PlatformInitFirmware()==>\n"); if (pfirmware->firmware_status == FW_STATUS_0_INIT ) { /* it is called by reset */ rst_opt = OPT_SYSTEM_RESET; starting_state = FW_INIT_STEP0_BOOT; // TODO: system reset }else if(pfirmware->firmware_status == FW_STATUS_5_READY) { /* it is called by Initialize */ rst_opt = OPT_FIRMWARE_RESET; starting_state = FW_INIT_STEP2_DATA; }else { RT_TRACE(COMP_FIRMWARE, "PlatformInitFirmware: undefined firmware state\n"); } /* * Download boot, main, and data image for System reset. * Download data image for firmware reseta */ for(init_step = starting_state; init_step <= FW_INIT_STEP2_DATA; init_step++) { /* * Open Image file, and map file to contineous memory if open file success. * or read image file from array. Default load from IMG file */ if(rst_opt == OPT_SYSTEM_RESET) { rc = request_firmware(&fw_entry, fw_name[init_step],&priv->udev->dev); if(rc < 0 ) { RT_TRACE(COMP_ERR, "request firmware fail!\n"); goto download_firmware_fail; } if(fw_entry->size > sizeof(pfirmware->firmware_buf)) { RT_TRACE(COMP_ERR, "img file size exceed the container buffer fail!\n"); goto download_firmware_fail; } if(init_step != FW_INIT_STEP1_MAIN) { memcpy(pfirmware->firmware_buf,fw_entry->data,fw_entry->size); mapped_file = pfirmware->firmware_buf; file_length = fw_entry->size; } else { #ifdef RTL8190P memcpy(pfirmware->firmware_buf,fw_entry->data,fw_entry->size); mapped_file = pfirmware->firmware_buf; file_length = fw_entry->size; #else memset(pfirmware->firmware_buf,0,128); memcpy(&pfirmware->firmware_buf[128],fw_entry->data,fw_entry->size); mapped_file = pfirmware->firmware_buf; file_length = fw_entry->size + 128; #endif } pfirmware->firmware_buf_size = file_length; }else if(rst_opt == OPT_FIRMWARE_RESET ) { /* we only need to download data.img here */ mapped_file = pfirmware->firmware_buf; file_length = pfirmware->firmware_buf_size; } /* Download image file */ /* The firmware download process is just as following, * 1. that is each packet will be segmented and inserted to the wait queue. * 2. each packet segment will be put in the skb_buff packet. * 3. each skb_buff packet data content will already include the firmware info * and Tx descriptor info * */ rt_status = fw_download_code(dev,mapped_file,file_length); if(rst_opt == OPT_SYSTEM_RESET) { release_firmware(fw_entry); } if(rt_status != TRUE) { goto download_firmware_fail; } switch(init_step) { case FW_INIT_STEP0_BOOT: /* Download boot * initialize command descriptor. * will set polling bit when firmware code is also configured */ pfirmware->firmware_status = FW_STATUS_1_MOVE_BOOT_CODE; #ifdef RTL8190P // To initialize IMEM, CPU move code from 0x80000080, hence, we send 0x80 byte packet rt_status = fwSendNullPacket(dev, RTL8190_CPU_START_OFFSET); if(rt_status != true) { RT_TRACE(COMP_INIT, "fwSendNullPacket() fail ! \n"); goto download_firmware_fail; } #endif //mdelay(1000); /* * To initialize IMEM, CPU move code from 0x80000080, * hence, we send 0x80 byte packet */ break; case FW_INIT_STEP1_MAIN: /* Download firmware code. Wait until Boot Ready and Turn on CPU */ pfirmware->firmware_status = FW_STATUS_2_MOVE_MAIN_CODE; /* Check Put Code OK and Turn On CPU */ rt_status = CPUcheck_maincodeok_turnonCPU(dev); if(rt_status != TRUE) { RT_TRACE(COMP_ERR, "CPUcheck_maincodeok_turnonCPU fail!\n"); goto download_firmware_fail; } pfirmware->firmware_status = FW_STATUS_3_TURNON_CPU; break; case FW_INIT_STEP2_DATA: /* download initial data code */ pfirmware->firmware_status = FW_STATUS_4_MOVE_DATA_CODE; mdelay(1); rt_status = CPUcheck_firmware_ready(dev); if(rt_status != TRUE) { RT_TRACE(COMP_ERR, "CPUcheck_firmware_ready fail(%d)!\n",rt_status); goto download_firmware_fail; } /* wait until data code is initialized ready.*/ pfirmware->firmware_status = FW_STATUS_5_READY; break; } } RT_TRACE(COMP_FIRMWARE, "Firmware Download Success\n"); //assert(pfirmware->firmware_status == FW_STATUS_5_READY, ("Firmware Download Fail\n")); return rt_status; download_firmware_fail: RT_TRACE(COMP_ERR, "ERR in %s()\n", __FUNCTION__); rt_status = FALSE; return rt_status; } MODULE_FIRMWARE("RTL8192U/boot.img"); MODULE_FIRMWARE("RTL8192U/main.img"); MODULE_FIRMWARE("RTL8192U/data.img");
gpl-2.0
talexop/talexop_kernel_i9505_4.3
drivers/input/misc/m68kspkr.c
10095
3331
/* * m68k beeper driver for Linux * * Copyright (c) 2002 Richard Zidlicky * Copyright (c) 2002 Vojtech Pavlik * Copyright (c) 1992 Orest Zborowski * */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/platform_device.h> #include <asm/machdep.h> #include <asm/io.h> MODULE_AUTHOR("Richard Zidlicky <rz@linux-m68k.org>"); MODULE_DESCRIPTION("m68k beeper driver"); MODULE_LICENSE("GPL"); static struct platform_device *m68kspkr_platform_device; static int m68kspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { unsigned int count = 0; if (type != EV_SND) return -1; switch (code) { case SND_BELL: if (value) value = 1000; case SND_TONE: break; default: return -1; } if (value > 20 && value < 32767) count = 1193182 / value; mach_beep(count, -1); return 0; } static int __devinit m68kspkr_probe(struct platform_device *dev) { struct input_dev *input_dev; int err; input_dev = input_allocate_device(); if (!input_dev) return -ENOMEM; input_dev->name = "m68k beeper"; input_dev->phys = "m68k/generic"; input_dev->id.bustype = BUS_HOST; input_dev->id.vendor = 0x001f; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = &dev->dev; input_dev->evbit[0] = BIT_MASK(EV_SND); input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE); input_dev->event = m68kspkr_event; err = input_register_device(input_dev); if (err) { input_free_device(input_dev); return err; } platform_set_drvdata(dev, input_dev); return 0; } static int __devexit m68kspkr_remove(struct platform_device *dev) { struct input_dev *input_dev = platform_get_drvdata(dev); input_unregister_device(input_dev); platform_set_drvdata(dev, NULL); /* turn off the speaker */ m68kspkr_event(NULL, EV_SND, SND_BELL, 0); return 0; } static void m68kspkr_shutdown(struct platform_device *dev) { /* turn off the speaker */ m68kspkr_event(NULL, EV_SND, SND_BELL, 0); } static struct platform_driver m68kspkr_platform_driver = { .driver = { .name = "m68kspkr", .owner = THIS_MODULE, }, .probe = m68kspkr_probe, .remove = __devexit_p(m68kspkr_remove), .shutdown = m68kspkr_shutdown, }; static int __init m68kspkr_init(void) { int err; if (!mach_beep) { printk(KERN_INFO "m68kspkr: no lowlevel beep support\n"); return -ENODEV; } err = platform_driver_register(&m68kspkr_platform_driver); if (err) return err; m68kspkr_platform_device = platform_device_alloc("m68kspkr", -1); if (!m68kspkr_platform_device) { err = -ENOMEM; goto err_unregister_driver; } err = platform_device_add(m68kspkr_platform_device); if (err) goto err_free_device; return 0; err_free_device: platform_device_put(m68kspkr_platform_device); err_unregister_driver: platform_driver_unregister(&m68kspkr_platform_driver); return err; } static void __exit m68kspkr_exit(void) { platform_device_unregister(m68kspkr_platform_device); platform_driver_unregister(&m68kspkr_platform_driver); } module_init(m68kspkr_init); module_exit(m68kspkr_exit);
gpl-2.0
RenderBroken/msm8974_Victara-Stock_render_kernel
drivers/ide/cs5536.c
11631
7926
/* * CS5536 PATA support * (C) 2007 Martin K. Petersen <mkp@mkp.net> * (C) 2009 Bartlomiej Zolnierkiewicz * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Documentation: * Available from AMD web site. * * The IDE timing registers for the CS5536 live in the Geode Machine * Specific Register file and not PCI config space. Most BIOSes * virtualize the PCI registers so the chip looks like a standard IDE * controller. Unfortunately not all implementations get this right. * In particular some have problems with unaligned accesses to the * virtualized PCI registers. This driver always does full dword * writes to work around the issue. Also, in case of a bad BIOS this * driver can be loaded with the "msr=1" parameter which forces using * the Machine Specific Registers to configure the device. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ide.h> #include <asm/msr.h> #define DRV_NAME "cs5536" enum { MSR_IDE_CFG = 0x51300010, PCI_IDE_CFG = 0x40, CFG = 0, DTC = 2, CAST = 3, ETC = 4, IDE_CFG_CHANEN = (1 << 1), IDE_CFG_CABLE = (1 << 17) | (1 << 16), IDE_D0_SHIFT = 24, IDE_D1_SHIFT = 16, IDE_DRV_MASK = 0xff, IDE_CAST_D0_SHIFT = 6, IDE_CAST_D1_SHIFT = 4, IDE_CAST_DRV_MASK = 0x3, IDE_CAST_CMD_SHIFT = 24, IDE_CAST_CMD_MASK = 0xff, IDE_ETC_UDMA_MASK = 0xc0, }; static int use_msr; static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val) { if (unlikely(use_msr)) { u32 dummy; rdmsr(MSR_IDE_CFG + reg, *val, dummy); return 0; } return pci_read_config_dword(pdev, PCI_IDE_CFG + reg * 4, val); } static int cs5536_write(struct pci_dev *pdev, int reg, int val) { if (unlikely(use_msr)) { wrmsr(MSR_IDE_CFG + reg, val, 0); return 0; } return pci_write_config_dword(pdev, PCI_IDE_CFG + reg * 4, val); } static void cs5536_program_dtc(ide_drive_t *drive, u8 tim) { struct pci_dev *pdev = to_pci_dev(drive->hwif->dev); int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT; u32 dtc; cs5536_read(pdev, DTC, &dtc); dtc &= ~(IDE_DRV_MASK << dshift); dtc |= tim << dshift; cs5536_write(pdev, DTC, dtc); } /** * cs5536_cable_detect - detect cable type * @hwif: Port to detect on * * Perform cable detection for ATA66 capable cable. * * Returns a cable type. */ static u8 cs5536_cable_detect(ide_hwif_t *hwif) { struct pci_dev *pdev = to_pci_dev(hwif->dev); u32 cfg; cs5536_read(pdev, CFG, &cfg); if (cfg & IDE_CFG_CABLE) return ATA_CBL_PATA80; else return ATA_CBL_PATA40; } /** * cs5536_set_pio_mode - PIO timing setup * @hwif: ATA port * @drive: ATA device */ static void cs5536_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static const u8 drv_timings[5] = { 0x98, 0x55, 0x32, 0x21, 0x20, }; static const u8 addr_timings[5] = { 0x2, 0x1, 0x0, 0x0, 0x0, }; static const u8 cmd_timings[5] = { 0x99, 0x92, 0x90, 0x22, 0x20, }; struct pci_dev *pdev = to_pci_dev(hwif->dev); ide_drive_t *pair = ide_get_pair_dev(drive); int cshift = (drive->dn & 1) ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT; unsigned long timings = (unsigned long)ide_get_drivedata(drive); u32 cast; const u8 pio = drive->pio_mode - XFER_PIO_0; u8 cmd_pio = pio; if (pair) cmd_pio = min_t(u8, pio, pair->pio_mode - XFER_PIO_0); timings &= (IDE_DRV_MASK << 8); timings |= drv_timings[pio]; ide_set_drivedata(drive, (void *)timings); cs5536_program_dtc(drive, drv_timings[pio]); cs5536_read(pdev, CAST, &cast); cast &= ~(IDE_CAST_DRV_MASK << cshift); cast |= addr_timings[pio] << cshift; cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT); cast |= cmd_timings[cmd_pio] << IDE_CAST_CMD_SHIFT; cs5536_write(pdev, CAST, cast); } /** * cs5536_set_dma_mode - DMA timing setup * @hwif: ATA port * @drive: ATA device */ static void cs5536_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static const u8 udma_timings[6] = { 0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6, }; static const u8 mwdma_timings[3] = { 0x67, 0x21, 0x20, }; struct pci_dev *pdev = to_pci_dev(hwif->dev); int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT; unsigned long timings = (unsigned long)ide_get_drivedata(drive); u32 etc; const u8 mode = drive->dma_mode; cs5536_read(pdev, ETC, &etc); if (mode >= XFER_UDMA_0) { etc &= ~(IDE_DRV_MASK << dshift); etc |= udma_timings[mode - XFER_UDMA_0] << dshift; } else { /* MWDMA */ etc &= ~(IDE_ETC_UDMA_MASK << dshift); timings &= IDE_DRV_MASK; timings |= mwdma_timings[mode - XFER_MW_DMA_0] << 8; ide_set_drivedata(drive, (void *)timings); } cs5536_write(pdev, ETC, etc); } static void cs5536_dma_start(ide_drive_t *drive) { unsigned long timings = (unsigned long)ide_get_drivedata(drive); if (drive->current_speed < XFER_UDMA_0 && (timings >> 8) != (timings & IDE_DRV_MASK)) cs5536_program_dtc(drive, timings >> 8); ide_dma_start(drive); } static int cs5536_dma_end(ide_drive_t *drive) { int ret = ide_dma_end(drive); unsigned long timings = (unsigned long)ide_get_drivedata(drive); if (drive->current_speed < XFER_UDMA_0 && (timings >> 8) != (timings & IDE_DRV_MASK)) cs5536_program_dtc(drive, timings & IDE_DRV_MASK); return ret; } static const struct ide_port_ops cs5536_port_ops = { .set_pio_mode = cs5536_set_pio_mode, .set_dma_mode = cs5536_set_dma_mode, .cable_detect = cs5536_cable_detect, }; static const struct ide_dma_ops cs5536_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = cs5536_dma_start, .dma_end = cs5536_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_port_info cs5536_info = { .name = DRV_NAME, .port_ops = &cs5536_port_ops, .dma_ops = &cs5536_dma_ops, .host_flags = IDE_HFLAG_SINGLE, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, }; /** * cs5536_init_one * @dev: PCI device * @id: Entry in match table */ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id) { u32 cfg; if (use_msr) printk(KERN_INFO DRV_NAME ": Using MSR regs instead of PCI\n"); cs5536_read(dev, CFG, &cfg); if ((cfg & IDE_CFG_CHANEN) == 0) { printk(KERN_ERR DRV_NAME ": disabled by BIOS\n"); return -ENODEV; } return ide_pci_init_one(dev, &cs5536_info, NULL); } static const struct pci_device_id cs5536_pci_tbl[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), }, { }, }; static struct pci_driver cs5536_pci_driver = { .name = DRV_NAME, .id_table = cs5536_pci_tbl, .probe = cs5536_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init cs5536_init(void) { return pci_register_driver(&cs5536_pci_driver); } static void __exit cs5536_exit(void) { pci_unregister_driver(&cs5536_pci_driver); } MODULE_AUTHOR("Martin K. Petersen, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, cs5536_pci_tbl); module_param_named(msr, use_msr, int, 0644); MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)"); module_init(cs5536_init); module_exit(cs5536_exit);
gpl-2.0
kumajaya/android_kernel_samsung_lt01
drivers/ide/cs5536.c
11631
7926
/* * CS5536 PATA support * (C) 2007 Martin K. Petersen <mkp@mkp.net> * (C) 2009 Bartlomiej Zolnierkiewicz * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Documentation: * Available from AMD web site. * * The IDE timing registers for the CS5536 live in the Geode Machine * Specific Register file and not PCI config space. Most BIOSes * virtualize the PCI registers so the chip looks like a standard IDE * controller. Unfortunately not all implementations get this right. * In particular some have problems with unaligned accesses to the * virtualized PCI registers. This driver always does full dword * writes to work around the issue. Also, in case of a bad BIOS this * driver can be loaded with the "msr=1" parameter which forces using * the Machine Specific Registers to configure the device. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ide.h> #include <asm/msr.h> #define DRV_NAME "cs5536" enum { MSR_IDE_CFG = 0x51300010, PCI_IDE_CFG = 0x40, CFG = 0, DTC = 2, CAST = 3, ETC = 4, IDE_CFG_CHANEN = (1 << 1), IDE_CFG_CABLE = (1 << 17) | (1 << 16), IDE_D0_SHIFT = 24, IDE_D1_SHIFT = 16, IDE_DRV_MASK = 0xff, IDE_CAST_D0_SHIFT = 6, IDE_CAST_D1_SHIFT = 4, IDE_CAST_DRV_MASK = 0x3, IDE_CAST_CMD_SHIFT = 24, IDE_CAST_CMD_MASK = 0xff, IDE_ETC_UDMA_MASK = 0xc0, }; static int use_msr; static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val) { if (unlikely(use_msr)) { u32 dummy; rdmsr(MSR_IDE_CFG + reg, *val, dummy); return 0; } return pci_read_config_dword(pdev, PCI_IDE_CFG + reg * 4, val); } static int cs5536_write(struct pci_dev *pdev, int reg, int val) { if (unlikely(use_msr)) { wrmsr(MSR_IDE_CFG + reg, val, 0); return 0; } return pci_write_config_dword(pdev, PCI_IDE_CFG + reg * 4, val); } static void cs5536_program_dtc(ide_drive_t *drive, u8 tim) { struct pci_dev *pdev = to_pci_dev(drive->hwif->dev); int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT; u32 dtc; cs5536_read(pdev, DTC, &dtc); dtc &= ~(IDE_DRV_MASK << dshift); dtc |= tim << dshift; cs5536_write(pdev, DTC, dtc); } /** * cs5536_cable_detect - detect cable type * @hwif: Port to detect on * * Perform cable detection for ATA66 capable cable. * * Returns a cable type. */ static u8 cs5536_cable_detect(ide_hwif_t *hwif) { struct pci_dev *pdev = to_pci_dev(hwif->dev); u32 cfg; cs5536_read(pdev, CFG, &cfg); if (cfg & IDE_CFG_CABLE) return ATA_CBL_PATA80; else return ATA_CBL_PATA40; } /** * cs5536_set_pio_mode - PIO timing setup * @hwif: ATA port * @drive: ATA device */ static void cs5536_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static const u8 drv_timings[5] = { 0x98, 0x55, 0x32, 0x21, 0x20, }; static const u8 addr_timings[5] = { 0x2, 0x1, 0x0, 0x0, 0x0, }; static const u8 cmd_timings[5] = { 0x99, 0x92, 0x90, 0x22, 0x20, }; struct pci_dev *pdev = to_pci_dev(hwif->dev); ide_drive_t *pair = ide_get_pair_dev(drive); int cshift = (drive->dn & 1) ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT; unsigned long timings = (unsigned long)ide_get_drivedata(drive); u32 cast; const u8 pio = drive->pio_mode - XFER_PIO_0; u8 cmd_pio = pio; if (pair) cmd_pio = min_t(u8, pio, pair->pio_mode - XFER_PIO_0); timings &= (IDE_DRV_MASK << 8); timings |= drv_timings[pio]; ide_set_drivedata(drive, (void *)timings); cs5536_program_dtc(drive, drv_timings[pio]); cs5536_read(pdev, CAST, &cast); cast &= ~(IDE_CAST_DRV_MASK << cshift); cast |= addr_timings[pio] << cshift; cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT); cast |= cmd_timings[cmd_pio] << IDE_CAST_CMD_SHIFT; cs5536_write(pdev, CAST, cast); } /** * cs5536_set_dma_mode - DMA timing setup * @hwif: ATA port * @drive: ATA device */ static void cs5536_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static const u8 udma_timings[6] = { 0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6, }; static const u8 mwdma_timings[3] = { 0x67, 0x21, 0x20, }; struct pci_dev *pdev = to_pci_dev(hwif->dev); int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT; unsigned long timings = (unsigned long)ide_get_drivedata(drive); u32 etc; const u8 mode = drive->dma_mode; cs5536_read(pdev, ETC, &etc); if (mode >= XFER_UDMA_0) { etc &= ~(IDE_DRV_MASK << dshift); etc |= udma_timings[mode - XFER_UDMA_0] << dshift; } else { /* MWDMA */ etc &= ~(IDE_ETC_UDMA_MASK << dshift); timings &= IDE_DRV_MASK; timings |= mwdma_timings[mode - XFER_MW_DMA_0] << 8; ide_set_drivedata(drive, (void *)timings); } cs5536_write(pdev, ETC, etc); } static void cs5536_dma_start(ide_drive_t *drive) { unsigned long timings = (unsigned long)ide_get_drivedata(drive); if (drive->current_speed < XFER_UDMA_0 && (timings >> 8) != (timings & IDE_DRV_MASK)) cs5536_program_dtc(drive, timings >> 8); ide_dma_start(drive); } static int cs5536_dma_end(ide_drive_t *drive) { int ret = ide_dma_end(drive); unsigned long timings = (unsigned long)ide_get_drivedata(drive); if (drive->current_speed < XFER_UDMA_0 && (timings >> 8) != (timings & IDE_DRV_MASK)) cs5536_program_dtc(drive, timings & IDE_DRV_MASK); return ret; } static const struct ide_port_ops cs5536_port_ops = { .set_pio_mode = cs5536_set_pio_mode, .set_dma_mode = cs5536_set_dma_mode, .cable_detect = cs5536_cable_detect, }; static const struct ide_dma_ops cs5536_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = cs5536_dma_start, .dma_end = cs5536_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_port_info cs5536_info = { .name = DRV_NAME, .port_ops = &cs5536_port_ops, .dma_ops = &cs5536_dma_ops, .host_flags = IDE_HFLAG_SINGLE, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, }; /** * cs5536_init_one * @dev: PCI device * @id: Entry in match table */ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id) { u32 cfg; if (use_msr) printk(KERN_INFO DRV_NAME ": Using MSR regs instead of PCI\n"); cs5536_read(dev, CFG, &cfg); if ((cfg & IDE_CFG_CHANEN) == 0) { printk(KERN_ERR DRV_NAME ": disabled by BIOS\n"); return -ENODEV; } return ide_pci_init_one(dev, &cs5536_info, NULL); } static const struct pci_device_id cs5536_pci_tbl[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), }, { }, }; static struct pci_driver cs5536_pci_driver = { .name = DRV_NAME, .id_table = cs5536_pci_tbl, .probe = cs5536_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init cs5536_init(void) { return pci_register_driver(&cs5536_pci_driver); } static void __exit cs5536_exit(void) { pci_unregister_driver(&cs5536_pci_driver); } MODULE_AUTHOR("Martin K. Petersen, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, cs5536_pci_tbl); module_param_named(msr, use_msr, int, 0644); MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)"); module_init(cs5536_init); module_exit(cs5536_exit);
gpl-2.0
FrancescoCG/CrazySuperKernel-CM
sound/aoa/soundbus/sysfs.c
12143
1082
#include <linux/kernel.h> #include <linux/stat.h> /* FIX UP */ #include "soundbus.h" #define soundbus_config_of_attr(field, format_string) \ static ssize_t \ field##_show (struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct soundbus_dev *mdev = to_soundbus_device (dev); \ return sprintf (buf, format_string, mdev->ofdev.dev.of_node->field); \ } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct soundbus_dev *sdev = to_soundbus_device(dev); struct platform_device *of = &sdev->ofdev; int length; if (*sdev->modalias) { strlcpy(buf, sdev->modalias, sizeof(sdev->modalias) + 1); strcat(buf, "\n"); length = strlen(buf); } else { length = sprintf(buf, "of:N%sT%s\n", of->dev.of_node->name, of->dev.of_node->type); } return length; } soundbus_config_of_attr (name, "%s\n"); soundbus_config_of_attr (type, "%s\n"); struct device_attribute soundbus_dev_attrs[] = { __ATTR_RO(name), __ATTR_RO(type), __ATTR_RO(modalias), __ATTR_NULL };
gpl-2.0
Divaksh/Speedy-Kernel-u8500-old
scripts/kconfig/lxdialog/yesno.c
12655
2904
/* * yesno.c -- implements the yes/no box * * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk) * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "dialog.h" /* * Display termination buttons */ static void print_buttons(WINDOW * dialog, int height, int width, int selected) { int x = width / 2 - 10; int y = height - 2; print_button(dialog, gettext(" Yes "), y, x, selected == 0); print_button(dialog, gettext(" No "), y, x + 13, selected == 1); wmove(dialog, y, x + 1 + 13 * selected); wrefresh(dialog); } /* * Display a dialog box with two buttons - Yes and No */ int dialog_yesno(const char *title, const char *prompt, int height, int width) { int i, x, y, key = 0, button = 0; WINDOW *dialog; do_resize: if (getmaxy(stdscr) < (height + 4)) return -ERRDISPLAYTOOSMALL; if (getmaxx(stdscr) < (width + 4)) return -ERRDISPLAYTOOSMALL; /* center dialog box on screen */ x = (COLS - width) / 2; y = (LINES - height) / 2; draw_shadow(stdscr, y, x, height, width); dialog = newwin(height, width, y, x); keypad(dialog, TRUE); draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); wattrset(dialog, dlg.border.atr); mvwaddch(dialog, height - 3, 0, ACS_LTEE); for (i = 0; i < width - 2; i++) waddch(dialog, ACS_HLINE); wattrset(dialog, dlg.dialog.atr); waddch(dialog, ACS_RTEE); print_title(dialog, title, width); wattrset(dialog, dlg.dialog.atr); print_autowrap(dialog, prompt, width - 2, 1, 3); print_buttons(dialog, height, width, 0); while (key != KEY_ESC) { key = wgetch(dialog); switch (key) { case 'Y': case 'y': delwin(dialog); return 0; case 'N': case 'n': delwin(dialog); return 1; case TAB: case KEY_LEFT: case KEY_RIGHT: button = ((key == KEY_LEFT ? --button : ++button) < 0) ? 1 : (button > 1 ? 0 : button); print_buttons(dialog, height, width, button); wrefresh(dialog); break; case ' ': case '\n': delwin(dialog); return button; case KEY_ESC: key = on_key_esc(dialog); break; case KEY_RESIZE: delwin(dialog); on_key_resize(); goto do_resize; } } delwin(dialog); return key; /* ESC pressed */ }
gpl-2.0
spacecaker/Midnight_Acer_kernel
drivers/message/i2o/debug.c
13423
11212
#include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/i2o.h> static void i2o_report_util_cmd(u8 cmd); static void i2o_report_exec_cmd(u8 cmd); static void i2o_report_fail_status(u8 req_status, u32 * msg); static void i2o_report_common_status(u8 req_status); static void i2o_report_common_dsc(u16 detailed_status); /* * Used for error reporting/debugging purposes. * Report Cmd name, Request status, Detailed Status. */ void i2o_report_status(const char *severity, const char *str, struct i2o_message *m) { u32 *msg = (u32 *) m; u8 cmd = (msg[1] >> 24) & 0xFF; u8 req_status = (msg[4] >> 24) & 0xFF; u16 detailed_status = msg[4] & 0xFFFF; if (cmd == I2O_CMD_UTIL_EVT_REGISTER) return; // No status in this reply printk("%s%s: ", severity, str); if (cmd < 0x1F) // Utility cmd i2o_report_util_cmd(cmd); else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd i2o_report_exec_cmd(cmd); else printk("Cmd = %0#2x, ", cmd); // Other cmds if (msg[0] & MSG_FAIL) { i2o_report_fail_status(req_status, msg); return; } i2o_report_common_status(req_status); if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF)) i2o_report_common_dsc(detailed_status); else printk(" / DetailedStatus = %0#4x.\n", detailed_status); } /* Used to dump a message to syslog during debugging */ void i2o_dump_message(struct i2o_message *m) { #ifdef DEBUG u32 *msg = (u32 *) m; int i; printk(KERN_INFO "Dumping I2O message size %d @ %p\n", msg[0] >> 16 & 0xffff, msg); for (i = 0; i < ((msg[0] >> 16) & 0xffff); i++) printk(KERN_INFO " msg[%d] = %0#10x\n", i, msg[i]); #endif } /* * Used for error reporting/debugging purposes. * Following fail status are common to all classes. * The preserved message must be handled in the reply handler. */ static void i2o_report_fail_status(u8 req_status, u32 * msg) { static char *FAIL_STATUS[] = { "0x80", /* not used */ "SERVICE_SUSPENDED", /* 0x81 */ "SERVICE_TERMINATED", /* 0x82 */ "CONGESTION", "FAILURE", "STATE_ERROR", "TIME_OUT", "ROUTING_FAILURE", "INVALID_VERSION", "INVALID_OFFSET", "INVALID_MSG_FLAGS", "FRAME_TOO_SMALL", "FRAME_TOO_LARGE", "INVALID_TARGET_ID", "INVALID_INITIATOR_ID", "INVALID_INITIATOR_CONTEX", /* 0x8F */ "UNKNOWN_FAILURE" /* 0xFF */ }; if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE) printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n", req_status); else printk("TRANSPORT_%s.\n", FAIL_STATUS[req_status & 0x0F]); /* Dump some details */ printk(KERN_ERR " InitiatorId = %d, TargetId = %d\n", (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF); printk(KERN_ERR " LowestVersion = 0x%02X, HighestVersion = 0x%02X\n", (msg[4] >> 8) & 0xFF, msg[4] & 0xFF); printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n", msg[5] >> 16, msg[5] & 0xFFF); printk(KERN_ERR " Severity: 0x%02X\n", (msg[4] >> 16) & 0xFF); if (msg[4] & (1 << 16)) printk(KERN_DEBUG "(FormatError), " "this msg can never be delivered/processed.\n"); if (msg[4] & (1 << 17)) printk(KERN_DEBUG "(PathError), " "this msg can no longer be delivered/processed.\n"); if (msg[4] & (1 << 18)) printk(KERN_DEBUG "(PathState), " "the system state does not allow delivery.\n"); if (msg[4] & (1 << 19)) printk(KERN_DEBUG "(Congestion), resources temporarily not available;" "do not retry immediately.\n"); } /* * Used for error reporting/debugging purposes. * Following reply status are common to all classes. */ static void i2o_report_common_status(u8 req_status) { static char *REPLY_STATUS[] = { "SUCCESS", "ABORT_DIRTY", "ABORT_NO_DATA_TRANSFER", "ABORT_PARTIAL_TRANSFER", "ERROR_DIRTY", "ERROR_NO_DATA_TRANSFER", "ERROR_PARTIAL_TRANSFER", "PROCESS_ABORT_DIRTY", "PROCESS_ABORT_NO_DATA_TRANSFER", "PROCESS_ABORT_PARTIAL_TRANSFER", "TRANSACTION_ERROR", "PROGRESS_REPORT" }; if (req_status >= ARRAY_SIZE(REPLY_STATUS)) printk("RequestStatus = %0#2x", req_status); else printk("%s", REPLY_STATUS[req_status]); } /* * Used for error reporting/debugging purposes. * Following detailed status are valid for executive class, * utility class, DDM class and for transaction error replies. */ static void i2o_report_common_dsc(u16 detailed_status) { static char *COMMON_DSC[] = { "SUCCESS", "0x01", // not used "BAD_KEY", "TCL_ERROR", "REPLY_BUFFER_FULL", "NO_SUCH_PAGE", "INSUFFICIENT_RESOURCE_SOFT", "INSUFFICIENT_RESOURCE_HARD", "0x08", // not used "CHAIN_BUFFER_TOO_LARGE", "UNSUPPORTED_FUNCTION", "DEVICE_LOCKED", "DEVICE_RESET", "INAPPROPRIATE_FUNCTION", "INVALID_INITIATOR_ADDRESS", "INVALID_MESSAGE_FLAGS", "INVALID_OFFSET", "INVALID_PARAMETER", "INVALID_REQUEST", "INVALID_TARGET_ADDRESS", "MESSAGE_TOO_LARGE", "MESSAGE_TOO_SMALL", "MISSING_PARAMETER", "TIMEOUT", "UNKNOWN_ERROR", "UNKNOWN_FUNCTION", "UNSUPPORTED_VERSION", "DEVICE_BUSY", "DEVICE_NOT_AVAILABLE" }; if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE) printk(" / DetailedStatus = %0#4x.\n", detailed_status); else printk(" / %s.\n", COMMON_DSC[detailed_status]); } /* * Used for error reporting/debugging purposes */ static void i2o_report_util_cmd(u8 cmd) { switch (cmd) { case I2O_CMD_UTIL_NOP: printk("UTIL_NOP, "); break; case I2O_CMD_UTIL_ABORT: printk("UTIL_ABORT, "); break; case I2O_CMD_UTIL_CLAIM: printk("UTIL_CLAIM, "); break; case I2O_CMD_UTIL_RELEASE: printk("UTIL_CLAIM_RELEASE, "); break; case I2O_CMD_UTIL_CONFIG_DIALOG: printk("UTIL_CONFIG_DIALOG, "); break; case I2O_CMD_UTIL_DEVICE_RESERVE: printk("UTIL_DEVICE_RESERVE, "); break; case I2O_CMD_UTIL_DEVICE_RELEASE: printk("UTIL_DEVICE_RELEASE, "); break; case I2O_CMD_UTIL_EVT_ACK: printk("UTIL_EVENT_ACKNOWLEDGE, "); break; case I2O_CMD_UTIL_EVT_REGISTER: printk("UTIL_EVENT_REGISTER, "); break; case I2O_CMD_UTIL_LOCK: printk("UTIL_LOCK, "); break; case I2O_CMD_UTIL_LOCK_RELEASE: printk("UTIL_LOCK_RELEASE, "); break; case I2O_CMD_UTIL_PARAMS_GET: printk("UTIL_PARAMS_GET, "); break; case I2O_CMD_UTIL_PARAMS_SET: printk("UTIL_PARAMS_SET, "); break; case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY: printk("UTIL_REPLY_FAULT_NOTIFY, "); break; default: printk("Cmd = %0#2x, ", cmd); } } /* * Used for error reporting/debugging purposes */ static void i2o_report_exec_cmd(u8 cmd) { switch (cmd) { case I2O_CMD_ADAPTER_ASSIGN: printk("EXEC_ADAPTER_ASSIGN, "); break; case I2O_CMD_ADAPTER_READ: printk("EXEC_ADAPTER_READ, "); break; case I2O_CMD_ADAPTER_RELEASE: printk("EXEC_ADAPTER_RELEASE, "); break; case I2O_CMD_BIOS_INFO_SET: printk("EXEC_BIOS_INFO_SET, "); break; case I2O_CMD_BOOT_DEVICE_SET: printk("EXEC_BOOT_DEVICE_SET, "); break; case I2O_CMD_CONFIG_VALIDATE: printk("EXEC_CONFIG_VALIDATE, "); break; case I2O_CMD_CONN_SETUP: printk("EXEC_CONN_SETUP, "); break; case I2O_CMD_DDM_DESTROY: printk("EXEC_DDM_DESTROY, "); break; case I2O_CMD_DDM_ENABLE: printk("EXEC_DDM_ENABLE, "); break; case I2O_CMD_DDM_QUIESCE: printk("EXEC_DDM_QUIESCE, "); break; case I2O_CMD_DDM_RESET: printk("EXEC_DDM_RESET, "); break; case I2O_CMD_DDM_SUSPEND: printk("EXEC_DDM_SUSPEND, "); break; case I2O_CMD_DEVICE_ASSIGN: printk("EXEC_DEVICE_ASSIGN, "); break; case I2O_CMD_DEVICE_RELEASE: printk("EXEC_DEVICE_RELEASE, "); break; case I2O_CMD_HRT_GET: printk("EXEC_HRT_GET, "); break; case I2O_CMD_ADAPTER_CLEAR: printk("EXEC_IOP_CLEAR, "); break; case I2O_CMD_ADAPTER_CONNECT: printk("EXEC_IOP_CONNECT, "); break; case I2O_CMD_ADAPTER_RESET: printk("EXEC_IOP_RESET, "); break; case I2O_CMD_LCT_NOTIFY: printk("EXEC_LCT_NOTIFY, "); break; case I2O_CMD_OUTBOUND_INIT: printk("EXEC_OUTBOUND_INIT, "); break; case I2O_CMD_PATH_ENABLE: printk("EXEC_PATH_ENABLE, "); break; case I2O_CMD_PATH_QUIESCE: printk("EXEC_PATH_QUIESCE, "); break; case I2O_CMD_PATH_RESET: printk("EXEC_PATH_RESET, "); break; case I2O_CMD_STATIC_MF_CREATE: printk("EXEC_STATIC_MF_CREATE, "); break; case I2O_CMD_STATIC_MF_RELEASE: printk("EXEC_STATIC_MF_RELEASE, "); break; case I2O_CMD_STATUS_GET: printk("EXEC_STATUS_GET, "); break; case I2O_CMD_SW_DOWNLOAD: printk("EXEC_SW_DOWNLOAD, "); break; case I2O_CMD_SW_UPLOAD: printk("EXEC_SW_UPLOAD, "); break; case I2O_CMD_SW_REMOVE: printk("EXEC_SW_REMOVE, "); break; case I2O_CMD_SYS_ENABLE: printk("EXEC_SYS_ENABLE, "); break; case I2O_CMD_SYS_MODIFY: printk("EXEC_SYS_MODIFY, "); break; case I2O_CMD_SYS_QUIESCE: printk("EXEC_SYS_QUIESCE, "); break; case I2O_CMD_SYS_TAB_SET: printk("EXEC_SYS_TAB_SET, "); break; default: printk("Cmd = %#02x, ", cmd); } } void i2o_debug_state(struct i2o_controller *c) { printk(KERN_INFO "%s: State = ", c->name); switch (((i2o_status_block *) c->status_block.virt)->iop_state) { case 0x01: printk("INIT\n"); break; case 0x02: printk("RESET\n"); break; case 0x04: printk("HOLD\n"); break; case 0x05: printk("READY\n"); break; case 0x08: printk("OPERATIONAL\n"); break; case 0x10: printk("FAILED\n"); break; case 0x11: printk("FAULTED\n"); break; default: printk("%x (unknown !!)\n", ((i2o_status_block *) c->status_block.virt)->iop_state); } }; void i2o_dump_hrt(struct i2o_controller *c) { u32 *rows = (u32 *) c->hrt.virt; u8 *p = (u8 *) c->hrt.virt; u8 *d; int count; int length; int i; int state; if (p[3] != 0) { printk(KERN_ERR "%s: HRT table for controller is too new a version.\n", c->name); return; } count = p[0] | (p[1] << 8); length = p[2]; printk(KERN_INFO "%s: HRT has %d entries of %d bytes each.\n", c->name, count, length << 2); rows += 2; for (i = 0; i < count; i++) { printk(KERN_INFO "Adapter %08X: ", rows[0]); p = (u8 *) (rows + 1); d = (u8 *) (rows + 2); state = p[1] << 8 | p[0]; printk("TID %04X:[", state & 0xFFF); state >>= 12; if (state & (1 << 0)) printk("H"); /* Hidden */ if (state & (1 << 2)) { printk("P"); /* Present */ if (state & (1 << 1)) printk("C"); /* Controlled */ } if (state > 9) printk("*"); /* Hard */ printk("]:"); switch (p[3] & 0xFFFF) { case 0: /* Adapter private bus - easy */ printk("Local bus %d: I/O at 0x%04X Mem 0x%08X", p[2], d[1] << 8 | d[0], *(u32 *) (d + 4)); break; case 1: /* ISA bus */ printk("ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X", p[2], d[2], d[1] << 8 | d[0], *(u32 *) (d + 4)); break; case 2: /* EISA bus */ printk("EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X", p[2], d[3], d[1] << 8 | d[0], *(u32 *) (d + 4)); break; case 3: /* MCA bus */ printk("MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X", p[2], d[3], d[1] << 8 | d[0], *(u32 *) (d + 4)); break; case 4: /* PCI bus */ printk("PCI %d: Bus %d Device %d Function %d", p[2], d[2], d[1], d[0]); break; case 0x80: /* Other */ default: printk("Unsupported bus type."); break; } printk("\n"); rows += length; } } EXPORT_SYMBOL(i2o_dump_message);
gpl-2.0
fabiocannizzo/linux
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
112
3608
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "amdgpu_vm.h" #include "amdgpu_object.h" #include "amdgpu_trace.h" /** * amdgpu_vm_cpu_map_table - make sure new PDs/PTs are kmapped * * @table: newly allocated or validated PD/PT */ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table) { return amdgpu_bo_kmap(&table->bo, NULL); } /** * amdgpu_vm_cpu_prepare - prepare page table update with the CPU * * @p: see amdgpu_vm_update_params definition * @resv: reservation object with embedded fence * @sync_mode: synchronization mode * * Returns: * Negativ errno, 0 for success. */ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, struct dma_resv *resv, enum amdgpu_sync_mode sync_mode) { if (!resv) return 0; return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true); } /** * amdgpu_vm_cpu_update - helper to update page tables via CPU * * @p: see amdgpu_vm_update_params definition * @vmbo: PD/PT to update * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags * * Write count number of PT/PD entries directly. */ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, struct amdgpu_bo_vm *vmbo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) { unsigned int i; uint64_t value; int r; if (vmbo->bo.tbo.moving) { r = dma_fence_wait(vmbo->bo.tbo.moving, true); if (r) return r; } pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo); trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate); for (i = 0; i < count; i++) { value = p->pages_addr ? amdgpu_vm_map_gart(p->pages_addr, addr) : addr; amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe, i, value, flags); addr += incr; } return 0; } /** * amdgpu_vm_cpu_commit - commit page table update to the HW * * @p: see amdgpu_vm_update_params definition * @fence: unused * * Make sure that the hardware sees the page table updates. */ static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p, struct dma_fence **fence) { /* Flush HDP */ mb(); amdgpu_device_flush_hdp(p->adev, NULL); return 0; } const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = { .map_table = amdgpu_vm_cpu_map_table, .prepare = amdgpu_vm_cpu_prepare, .update = amdgpu_vm_cpu_update, .commit = amdgpu_vm_cpu_commit };
gpl-2.0
jderrick/linux-blkdev
fs/exofs/file.c
368
2516
/* * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh <ooo@electrozaur.com> * * Copyrights for code taken from ext2: * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * from * linux/fs/minix/inode.c * Copyright (C) 1991, 1992 Linus Torvalds * * This file is part of exofs. * * exofs is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. Since it is based on ext2, and the only * valid version of GPL for the Linux kernel is version 2, the only valid * version of GPL for exofs is version 2. * * exofs is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with exofs; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "exofs.h" static int exofs_release_file(struct inode *inode, struct file *filp) { return 0; } /* exofs_file_fsync - flush the inode to disk * * Note, in exofs all metadata is written as part of inode, regardless. * The writeout is synchronous */ static int exofs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; int ret; ret = filemap_write_and_wait_range(inode->i_mapping, start, end); if (ret) return ret; inode_lock(inode); ret = sync_inode_metadata(filp->f_mapping->host, 1); inode_unlock(inode); return ret; } static int exofs_flush(struct file *file, fl_owner_t id) { int ret = vfs_fsync(file, 0); /* TODO: Flush the OSD target */ return ret; } const struct file_operations exofs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .open = generic_file_open, .release = exofs_release_file, .fsync = exofs_file_fsync, .flush = exofs_flush, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, }; const struct inode_operations exofs_file_inode_operations = { .setattr = exofs_setattr, };
gpl-2.0
CPFL/linux
drivers/net/wireless/mwifiex/sta_tx.c
368
6972
/* * Marvell Wireless LAN device driver: station TX data handling * * Copyright (C) 2011-2014, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "main.h" #include "wmm.h" /* * This function fills the TxPD for tx packets. * * The Tx buffer received by this function should already have the * header space allocated for TxPD. * * This function inserts the TxPD in between interface header and actual * data and adjusts the buffer pointers accordingly. * * The following TxPD fields are set by this function, as required - * - BSS number * - Tx packet length and offset * - Priority * - Packet delay * - Priority specific Tx control * - Flags */ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv, struct sk_buff *skb) { struct mwifiex_adapter *adapter = priv->adapter; struct txpd *local_tx_pd; struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); unsigned int pad; u16 pkt_type, pkt_offset; int hroom = (priv->adapter->iface_type == MWIFIEX_USB) ? 0 : INTF_HEADER_LEN; if (!skb->len) { dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len); tx_info->status_code = -1; return skb->data; } BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN); pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0; pad = ((void *)skb->data - (sizeof(*local_tx_pd) + hroom)- NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1); skb_push(skb, sizeof(*local_tx_pd) + pad); local_tx_pd = (struct txpd *) skb->data; memset(local_tx_pd, 0, sizeof(struct txpd)); local_tx_pd->bss_num = priv->bss_num; local_tx_pd->bss_type = priv->bss_type; local_tx_pd->tx_pkt_length = cpu_to_le16((u16)(skb->len - (sizeof(struct txpd) + pad))); local_tx_pd->priority = (u8) skb->priority; local_tx_pd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb); if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS || tx_info->flags & MWIFIEX_BUF_FLAG_ACTION_TX_STATUS) { local_tx_pd->tx_token_id = tx_info->ack_frame_id; local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS; } if (local_tx_pd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl)) /* * Set the priority specific tx_control field, setting of 0 will * cause the default value to be used later in this function */ local_tx_pd->tx_control = cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[local_tx_pd-> priority]); if (adapter->pps_uapsd_mode) { if (mwifiex_check_last_packet_indication(priv)) { adapter->tx_lock_flag = true; local_tx_pd->flags = MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET; } } if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT) local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET; /* Offset of actual data */ pkt_offset = sizeof(struct txpd) + pad; if (pkt_type == PKT_TYPE_MGMT) { /* Set the packet type and add header for management frame */ local_tx_pd->tx_pkt_type = cpu_to_le16(pkt_type); pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE; } local_tx_pd->tx_pkt_offset = cpu_to_le16(pkt_offset); /* make space for INTF_HEADER_LEN */ skb_push(skb, hroom); if (!local_tx_pd->tx_control) /* TxCtrl set by user or default */ local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); return skb->data; } /* * This function tells firmware to send a NULL data packet. * * The function creates a NULL data packet with TxPD and sends to the * firmware for transmission, with highest priority setting. */ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags) { struct mwifiex_adapter *adapter = priv->adapter; struct txpd *local_tx_pd; struct mwifiex_tx_param tx_param; /* sizeof(struct txpd) + Interface specific header */ #define NULL_PACKET_HDR 64 u32 data_len = NULL_PACKET_HDR; struct sk_buff *skb; int ret; struct mwifiex_txinfo *tx_info = NULL; if (adapter->surprise_removed) return -1; if (!priv->media_connected) return -1; if (adapter->data_sent) return -1; skb = dev_alloc_skb(data_len); if (!skb) return -1; tx_info = MWIFIEX_SKB_TXCB(skb); memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN); skb_reserve(skb, sizeof(struct txpd) + INTF_HEADER_LEN); skb_push(skb, sizeof(struct txpd)); local_tx_pd = (struct txpd *) skb->data; local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); local_tx_pd->flags = flags; local_tx_pd->priority = WMM_HIGHEST_PRIORITY; local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd)); local_tx_pd->bss_num = priv->bss_num; local_tx_pd->bss_type = priv->bss_type; if (adapter->iface_type == MWIFIEX_USB) { ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA, skb, NULL); } else { skb_push(skb, INTF_HEADER_LEN); tx_param.next_pkt_len = 0; ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, skb, &tx_param); } switch (ret) { case -EBUSY: dev_kfree_skb_any(skb); dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n", __func__, ret); adapter->dbg.num_tx_host_to_card_failure++; break; case -1: adapter->data_sent = false; dev_kfree_skb_any(skb); dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n", __func__, ret); adapter->dbg.num_tx_host_to_card_failure++; break; case 0: dev_kfree_skb_any(skb); dev_dbg(adapter->dev, "data: %s: host_to_card succeeded\n", __func__); adapter->tx_lock_flag = true; break; case -EINPROGRESS: adapter->tx_lock_flag = true; break; default: break; } return ret; } /* * This function checks if we need to send last packet indication. */ u8 mwifiex_check_last_packet_indication(struct mwifiex_private *priv) { struct mwifiex_adapter *adapter = priv->adapter; u8 ret = false; if (!adapter->sleep_period.period) return ret; if (mwifiex_wmm_lists_empty(adapter)) ret = true; if (ret && !adapter->cmd_sent && !adapter->curr_cmd && !is_command_pending(adapter)) { adapter->delay_null_pkt = false; ret = true; } else { ret = false; adapter->delay_null_pkt = true; } return ret; }
gpl-2.0
fullerdj/ceph-client
arch/mips/mti-malta/malta-memory.c
368
5102
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * PROM library functions for acquiring/using memory descriptors given to * us from the YAMON. * * Copyright (C) 1999,2000,2012 MIPS Technologies, Inc. * All rights reserved. * Authors: Carsten Langgaard <carstenl@mips.com> * Steven J. Hill <sjhill@mips.com> */ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/string.h> #include <asm/bootinfo.h> #include <asm/maar.h> #include <asm/sections.h> #include <asm/fw/fw.h> static fw_memblock_t mdesc[FW_MAX_MEMBLOCKS]; /* determined physical memory size, not overridden by command line args */ unsigned long physical_memsize = 0L; fw_memblock_t * __init fw_getmdesc(int eva) { char *memsize_str, *ememsize_str = NULL, *ptr; unsigned long memsize = 0, ememsize = 0; static char cmdline[COMMAND_LINE_SIZE] __initdata; int tmp; /* otherwise look in the environment */ memsize_str = fw_getenv("memsize"); if (memsize_str) { tmp = kstrtoul(memsize_str, 0, &memsize); if (tmp) pr_warn("Failed to read the 'memsize' env variable.\n"); } if (eva) { /* Look for ememsize for EVA */ ememsize_str = fw_getenv("ememsize"); if (ememsize_str) { tmp = kstrtoul(ememsize_str, 0, &ememsize); if (tmp) pr_warn("Failed to read the 'ememsize' env variable.\n"); } } if (!memsize && !ememsize) { pr_warn("memsize not set in YAMON, set to default (32Mb)\n"); physical_memsize = 0x02000000; } else { /* If ememsize is set, then set physical_memsize to that */ physical_memsize = ememsize ? : memsize; } #ifdef CONFIG_CPU_BIG_ENDIAN /* SOC-it swaps, or perhaps doesn't swap, when DMA'ing the last word of physical memory */ physical_memsize -= PAGE_SIZE; #endif /* Check the command line for a memsize directive that overrides the physical/default amount */ strcpy(cmdline, arcs_cmdline); ptr = strstr(cmdline, "memsize="); if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' ')) ptr = strstr(ptr, " memsize="); /* And now look for ememsize */ if (eva) { ptr = strstr(cmdline, "ememsize="); if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' ')) ptr = strstr(ptr, " ememsize="); } if (ptr) memsize = memparse(ptr + 8 + (eva ? 1 : 0), &ptr); else memsize = physical_memsize; /* Last 64K for HIGHMEM arithmetics */ if (memsize > 0x7fff0000) memsize = 0x7fff0000; memset(mdesc, 0, sizeof(mdesc)); mdesc[0].type = fw_dontuse; mdesc[0].base = PHYS_OFFSET; mdesc[0].size = 0x00001000; mdesc[1].type = fw_code; mdesc[1].base = mdesc[0].base + 0x00001000UL; mdesc[1].size = 0x000ef000; /* * The area 0x000f0000-0x000fffff is allocated for BIOS memory by the * south bridge and PCI access always forwarded to the ISA Bus and * BIOSCS# is always generated. * This mean that this area can't be used as DMA memory for PCI * devices. */ mdesc[2].type = fw_dontuse; mdesc[2].base = mdesc[0].base + 0x000f0000UL; mdesc[2].size = 0x00010000; mdesc[3].type = fw_dontuse; mdesc[3].base = mdesc[0].base + 0x00100000UL; mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - 0x00100000UL; mdesc[4].type = fw_free; mdesc[4].base = mdesc[0].base + CPHYSADDR(PFN_ALIGN(&_end)); mdesc[4].size = memsize - CPHYSADDR(mdesc[4].base); return &mdesc[0]; } static void free_init_pages_eva_malta(void *begin, void *end) { free_init_pages("unused kernel", __pa_symbol((unsigned long *)begin), __pa_symbol((unsigned long *)end)); } static int __init fw_memtype_classify(unsigned int type) { switch (type) { case fw_free: return BOOT_MEM_RAM; case fw_code: return BOOT_MEM_ROM_DATA; default: return BOOT_MEM_RESERVED; } } void __init fw_meminit(void) { fw_memblock_t *p; p = fw_getmdesc(config_enabled(CONFIG_EVA)); free_init_pages_eva = (config_enabled(CONFIG_EVA) ? free_init_pages_eva_malta : NULL); while (p->size) { long type; unsigned long base, size; type = fw_memtype_classify(p->type); base = p->base; size = p->size; add_memory_region(base, size, type); p++; } } void __init prom_free_prom_memory(void) { unsigned long addr; int i; for (i = 0; i < boot_mem_map.nr_map; i++) { if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA) continue; addr = boot_mem_map.map[i].addr; free_init_pages("YAMON memory", addr, addr + boot_mem_map.map[i].size); } } unsigned platform_maar_init(unsigned num_pairs) { phys_addr_t mem_end = (physical_memsize & ~0xffffull) - 1; struct maar_config cfg[] = { /* DRAM preceding I/O */ { 0x00000000, 0x0fffffff, MIPS_MAAR_S }, /* DRAM following I/O */ { 0x20000000, mem_end, MIPS_MAAR_S }, /* DRAM alias in upper half of physical */ { 0x80000000, 0x80000000 + mem_end, MIPS_MAAR_S }, }; unsigned i, num_cfg = ARRAY_SIZE(cfg); /* If DRAM fits before I/O, drop the region following it */ if (physical_memsize <= 0x10000000) { num_cfg--; for (i = 1; i < num_cfg; i++) cfg[i] = cfg[i + 1]; } return maar_config(cfg, num_cfg, num_pairs); }
gpl-2.0
NETFORCE2/linux
drivers/staging/lustre/lustre/obdecho/lproc_echo.c
624
1874
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. * * GPL HEADER END */ /* * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. */ #define DEBUG_SUBSYSTEM S_ECHO #include "../include/lprocfs_status.h" #include "../include/obd_class.h" #if defined (CONFIG_PROC_FS) LPROC_SEQ_FOPS_RO_TYPE(echo, uuid); static struct lprocfs_vars lprocfs_echo_obd_vars[] = { { "uuid", &echo_uuid_fops, NULL, 0 }, { NULL } }; LPROC_SEQ_FOPS_RO_TYPE(echo, numrefs); static struct lprocfs_vars lprocfs_echo_module_vars[] = { { "num_refs", &echo_numrefs_fops, NULL, 0 }, { NULL } }; void lprocfs_echo_init_vars(struct lprocfs_static_vars *lvars) { lvars->module_vars = lprocfs_echo_module_vars; lvars->obd_vars = lprocfs_echo_obd_vars; } #endif /* CONFIG_PROC_FS */
gpl-2.0
GrandPrime/android_kernel_samsung_msm8916-caf
drivers/staging/prima/CORE/BAP/src/bapApiDebug.c
624
6974
/* * Copyright (c) 2012-2013 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * This file was originally distributed by Qualcomm Atheros, Inc. * under proprietary terms before Copyright ownership was assigned * to the Linux Foundation. */ /*=========================================================================== b a p A p i D e b u g . C OVERVIEW: This software unit holds the implementation of the WLAN BAP modules Debug functions. The functions externalized by this module are to be called ONLY by other WLAN modules (HDD) that properly register with the BAP Layer initially. DEPENDENCIES: Are listed for each API below. Copyright (c) 2008 QUALCOMM Incorporated. All Rights Reserved. Qualcomm Confidential and Proprietary ===========================================================================*/ /*=========================================================================== EDIT HISTORY FOR FILE This section contains comments describing changes made to the module. Notice that changes are listed in reverse chronological order. $Header: /cygdrive/e/Builds/M7201JSDCAAPAD52240B/WM/platform/msm7200/Src/Drivers/SD/ClientDrivers/WLAN/QCT/CORE/BAP/src/bapApiDebug.c,v 1.2 2008/11/10 22:37:58 jzmuda Exp jzmuda $$DateTime$$Author: jzmuda $ when who what, where, why ---------- --- -------------------------------------------------------- 2008-09-15 jez Created module ===========================================================================*/ /*---------------------------------------------------------------------------- * Include Files * -------------------------------------------------------------------------*/ //#include "wlan_qct_tl.h" #include "vos_trace.h" /* BT-AMP PAL API header file */ #include "bapApi.h" #include "bapInternal.h" // //#define BAP_DEBUG /*---------------------------------------------------------------------------- * Preprocessor Definitions and Constants * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Type Declarations * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Global Data Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Static Variable Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Static Function Declarations and Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Externalized Function Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Function Declarations and Documentation * -------------------------------------------------------------------------*/ /* Debug Commands */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPReadLoopbackMode() DESCRIPTION Implements the actual HCI Read Loopback Mode command. There is no need for a callback because when this call returns the action has been completed. DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIReadLoopbackMode: pointer to the "HCI Read Loopback Mode". IN/OUT pBapHCIEvent: Return event value for the command complete event. (The caller of this routine is responsible for sending the Command Complete event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIReadLoopbackMode or pBapHCILoopbackMode is NULL. VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPReadLoopbackMode ( ptBtampHandle btampHandle, tBtampTLVHCI_Read_Loopback_Mode_Cmd *pBapHCIReadLoopbackMode, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { return VOS_STATUS_SUCCESS; } /* WLAN_BAPReadLoopbackMode */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPWriteLoopbackMode() DESCRIPTION Implements the actual HCI Write Loopback Mode command. There is no need for a callback because when this call returns the action has been completed. DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIWriteLoopbackMode: pointer to the "HCI Write Loopback Mode" Structure. IN/OUT pBapHCIEvent: Return event value for the command complete event. (The caller of this routine is responsible for sending the Command Complete event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIWriteLoopbackMode is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPWriteLoopbackMode ( ptBtampHandle btampHandle, tBtampTLVHCI_Write_Loopback_Mode_Cmd *pBapHCIWriteLoopbackMode, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { return VOS_STATUS_SUCCESS; } /* WLAN_BAPWriteLoopbackMode */
gpl-2.0
DigilentInc/Linux-Digilent-Dev
tools/perf/tests/parse-no-sample-id-all.c
1392
2369
#include <linux/types.h> #include <stddef.h> #include "tests.h" #include "event.h" #include "evlist.h" #include "header.h" #include "util.h" #include "debug.h" static int process_event(struct perf_evlist **pevlist, union perf_event *event) { struct perf_sample sample; if (event->header.type == PERF_RECORD_HEADER_ATTR) { if (perf_event__process_attr(NULL, event, pevlist)) { pr_debug("perf_event__process_attr failed\n"); return -1; } return 0; } if (event->header.type >= PERF_RECORD_USER_TYPE_START) return -1; if (!*pevlist) return -1; if (perf_evlist__parse_sample(*pevlist, event, &sample)) { pr_debug("perf_evlist__parse_sample failed\n"); return -1; } return 0; } static int process_events(union perf_event **events, size_t count) { struct perf_evlist *evlist = NULL; int err = 0; size_t i; for (i = 0; i < count && !err; i++) err = process_event(&evlist, events[i]); if (evlist) perf_evlist__delete(evlist); return err; } struct test_attr_event { struct attr_event attr; u64 id; }; /** * test__parse_no_sample_id_all - test parsing with no sample_id_all bit set. * * This function tests parsing data produced on kernel's that do not support the * sample_id_all bit. Without the sample_id_all bit, non-sample events (such as * mmap events) do not have an id sample appended, and consequently logic * designed to determine the id will not work. That case happens when there is * more than one selected event, so this test processes three events: 2 * attributes representing the selected events and one mmap event. * * Return: %0 on success, %-1 if the test fails. */ int test__parse_no_sample_id_all(void) { int err; struct test_attr_event event1 = { .attr = { .header = { .type = PERF_RECORD_HEADER_ATTR, .size = sizeof(struct test_attr_event), }, }, .id = 1, }; struct test_attr_event event2 = { .attr = { .header = { .type = PERF_RECORD_HEADER_ATTR, .size = sizeof(struct test_attr_event), }, }, .id = 2, }; struct mmap_event event3 = { .header = { .type = PERF_RECORD_MMAP, .size = sizeof(struct mmap_event), }, }; union perf_event *events[] = { (union perf_event *)&event1, (union perf_event *)&event2, (union perf_event *)&event3, }; err = process_events(events, ARRAY_SIZE(events)); if (err) return -1; return 0; }
gpl-2.0
jeremytrimble/adi-linux
drivers/hid/hid-roccat-arvo.c
1904
11622
/* * Roccat Arvo driver for Linux * * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ /* * Roccat Arvo is a gamer keyboard with 5 macro keys that can be configured in * 5 profiles. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" #include "hid-roccat-arvo.h" static struct class *arvo_class; static ssize_t arvo_sysfs_show_mode_key(struct device *dev, struct device_attribute *attr, char *buf) { struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev->parent->parent)); struct arvo_mode_key temp_buf; int retval; mutex_lock(&arvo->arvo_lock); retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_MODE_KEY, &temp_buf, sizeof(struct arvo_mode_key)); mutex_unlock(&arvo->arvo_lock); if (retval) return retval; return snprintf(buf, PAGE_SIZE, "%d\n", temp_buf.state); } static ssize_t arvo_sysfs_set_mode_key(struct device *dev, struct device_attribute *attr, char const *buf, size_t size) { struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev->parent->parent)); struct arvo_mode_key temp_buf; unsigned long state; int retval; retval = kstrtoul(buf, 10, &state); if (retval) return retval; temp_buf.command = ARVO_COMMAND_MODE_KEY; temp_buf.state = state; mutex_lock(&arvo->arvo_lock); retval = roccat_common2_send(usb_dev, ARVO_COMMAND_MODE_KEY, &temp_buf, sizeof(struct arvo_mode_key)); mutex_unlock(&arvo->arvo_lock); if (retval) return retval; return size; } static DEVICE_ATTR(mode_key, 0660, arvo_sysfs_show_mode_key, arvo_sysfs_set_mode_key); static ssize_t arvo_sysfs_show_key_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev->parent->parent)); struct arvo_key_mask temp_buf; int retval; mutex_lock(&arvo->arvo_lock); retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_KEY_MASK, &temp_buf, sizeof(struct arvo_key_mask)); mutex_unlock(&arvo->arvo_lock); if (retval) return retval; return snprintf(buf, PAGE_SIZE, "%d\n", temp_buf.key_mask); } static ssize_t arvo_sysfs_set_key_mask(struct device *dev, struct device_attribute *attr, char const *buf, size_t size) { struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev->parent->parent)); struct arvo_key_mask temp_buf; unsigned long key_mask; int retval; retval = kstrtoul(buf, 10, &key_mask); if (retval) return retval; temp_buf.command = ARVO_COMMAND_KEY_MASK; temp_buf.key_mask = key_mask; mutex_lock(&arvo->arvo_lock); retval = roccat_common2_send(usb_dev, ARVO_COMMAND_KEY_MASK, &temp_buf, sizeof(struct arvo_key_mask)); mutex_unlock(&arvo->arvo_lock); if (retval) return retval; return size; } static DEVICE_ATTR(key_mask, 0660, arvo_sysfs_show_key_mask, arvo_sysfs_set_key_mask); /* retval is 1-5 on success, < 0 on error */ static int arvo_get_actual_profile(struct usb_device *usb_dev) { struct arvo_actual_profile temp_buf; int retval; retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE, &temp_buf, sizeof(struct arvo_actual_profile)); if (retval) return retval; return temp_buf.actual_profile; } static ssize_t arvo_sysfs_show_actual_profile(struct device *dev, struct device_attribute *attr, char *buf) { struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", arvo->actual_profile); } static ssize_t arvo_sysfs_set_actual_profile(struct device *dev, struct device_attribute *attr, char const *buf, size_t size) { struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev->parent->parent)); struct arvo_actual_profile temp_buf; unsigned long profile; int retval; retval = kstrtoul(buf, 10, &profile); if (retval) return retval; if (profile < 1 || profile > 5) return -EINVAL; temp_buf.command = ARVO_COMMAND_ACTUAL_PROFILE; temp_buf.actual_profile = profile; mutex_lock(&arvo->arvo_lock); retval = roccat_common2_send(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE, &temp_buf, sizeof(struct arvo_actual_profile)); if (!retval) { arvo->actual_profile = profile; retval = size; } mutex_unlock(&arvo->arvo_lock); return retval; } static DEVICE_ATTR(actual_profile, 0660, arvo_sysfs_show_actual_profile, arvo_sysfs_set_actual_profile); static ssize_t arvo_sysfs_write(struct file *fp, struct kobject *kobj, void const *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&arvo->arvo_lock); retval = roccat_common2_send(usb_dev, command, buf, real_size); mutex_unlock(&arvo->arvo_lock); return (retval ? retval : real_size); } static ssize_t arvo_sysfs_read(struct file *fp, struct kobject *kobj, void *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off >= real_size) return 0; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&arvo->arvo_lock); retval = roccat_common2_receive(usb_dev, command, buf, real_size); mutex_unlock(&arvo->arvo_lock); return (retval ? retval : real_size); } static ssize_t arvo_sysfs_write_button(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return arvo_sysfs_write(fp, kobj, buf, off, count, sizeof(struct arvo_button), ARVO_COMMAND_BUTTON); } static BIN_ATTR(button, 0220, NULL, arvo_sysfs_write_button, sizeof(struct arvo_button)); static ssize_t arvo_sysfs_read_info(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return arvo_sysfs_read(fp, kobj, buf, off, count, sizeof(struct arvo_info), ARVO_COMMAND_INFO); } static BIN_ATTR(info, 0440, arvo_sysfs_read_info, NULL, sizeof(struct arvo_info)); static struct attribute *arvo_attrs[] = { &dev_attr_mode_key.attr, &dev_attr_key_mask.attr, &dev_attr_actual_profile.attr, NULL, }; static struct bin_attribute *arvo_bin_attributes[] = { &bin_attr_button, &bin_attr_info, NULL, }; static const struct attribute_group arvo_group = { .attrs = arvo_attrs, .bin_attrs = arvo_bin_attributes, }; static const struct attribute_group *arvo_groups[] = { &arvo_group, NULL, }; static int arvo_init_arvo_device_struct(struct usb_device *usb_dev, struct arvo_device *arvo) { int retval; mutex_init(&arvo->arvo_lock); retval = arvo_get_actual_profile(usb_dev); if (retval < 0) return retval; arvo->actual_profile = retval; return 0; } static int arvo_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); struct arvo_device *arvo; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_KEYBOARD) { hid_set_drvdata(hdev, NULL); return 0; } arvo = kzalloc(sizeof(*arvo), GFP_KERNEL); if (!arvo) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, arvo); retval = arvo_init_arvo_device_struct(usb_dev, arvo); if (retval) { hid_err(hdev, "couldn't init struct arvo_device\n"); goto exit_free; } retval = roccat_connect(arvo_class, hdev, sizeof(struct arvo_roccat_report)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); } else { arvo->chrdev_minor = retval; arvo->roccat_claimed = 1; } return 0; exit_free: kfree(arvo); return retval; } static void arvo_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct arvo_device *arvo; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_KEYBOARD) return; arvo = hid_get_drvdata(hdev); if (arvo->roccat_claimed) roccat_disconnect(arvo->chrdev_minor); kfree(arvo); } static int arvo_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { hid_err(hdev, "hw start failed\n"); goto exit; } retval = arvo_init_specials(hdev); if (retval) { hid_err(hdev, "couldn't install keyboard\n"); goto exit_stop; } return 0; exit_stop: hid_hw_stop(hdev); exit: return retval; } static void arvo_remove(struct hid_device *hdev) { arvo_remove_specials(hdev); hid_hw_stop(hdev); } static void arvo_report_to_chrdev(struct arvo_device const *arvo, u8 const *data) { struct arvo_special_report const *special_report; struct arvo_roccat_report roccat_report; special_report = (struct arvo_special_report const *)data; roccat_report.profile = arvo->actual_profile; roccat_report.button = special_report->event & ARVO_SPECIAL_REPORT_EVENT_MASK_BUTTON; if ((special_report->event & ARVO_SPECIAL_REPORT_EVENT_MASK_ACTION) == ARVO_SPECIAL_REPORT_EVENT_ACTION_PRESS) roccat_report.action = ARVO_ROCCAT_REPORT_ACTION_PRESS; else roccat_report.action = ARVO_ROCCAT_REPORT_ACTION_RELEASE; roccat_report_event(arvo->chrdev_minor, (uint8_t const *)&roccat_report); } static int arvo_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct arvo_device *arvo = hid_get_drvdata(hdev); if (size != 3) return 0; if (arvo && arvo->roccat_claimed) arvo_report_to_chrdev(arvo, data); return 0; } static const struct hid_device_id arvo_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, { } }; MODULE_DEVICE_TABLE(hid, arvo_devices); static struct hid_driver arvo_driver = { .name = "arvo", .id_table = arvo_devices, .probe = arvo_probe, .remove = arvo_remove, .raw_event = arvo_raw_event }; static int __init arvo_init(void) { int retval; arvo_class = class_create(THIS_MODULE, "arvo"); if (IS_ERR(arvo_class)) return PTR_ERR(arvo_class); arvo_class->dev_groups = arvo_groups; retval = hid_register_driver(&arvo_driver); if (retval) class_destroy(arvo_class); return retval; } static void __exit arvo_exit(void) { hid_unregister_driver(&arvo_driver); class_destroy(arvo_class); } module_init(arvo_init); module_exit(arvo_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat Arvo driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
Dm47021/Linux-kernel_4.1.15-rt17_MusicOS
drivers/video/backlight/lm3630a_bl.c
1904
12797
/* * Simple driver for Texas Instruments LM3630A Backlight driver chip * Copyright (C) 2012 Texas Instruments * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/backlight.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/interrupt.h> #include <linux/regmap.h> #include <linux/pwm.h> #include <linux/platform_data/lm3630a_bl.h> #define REG_CTRL 0x00 #define REG_BOOST 0x02 #define REG_CONFIG 0x01 #define REG_BRT_A 0x03 #define REG_BRT_B 0x04 #define REG_I_A 0x05 #define REG_I_B 0x06 #define REG_INT_STATUS 0x09 #define REG_INT_EN 0x0A #define REG_FAULT 0x0B #define REG_PWM_OUTLOW 0x12 #define REG_PWM_OUTHIGH 0x13 #define REG_MAX 0x1F #define INT_DEBOUNCE_MSEC 10 struct lm3630a_chip { struct device *dev; struct delayed_work work; int irq; struct workqueue_struct *irqthread; struct lm3630a_platform_data *pdata; struct backlight_device *bleda; struct backlight_device *bledb; struct regmap *regmap; struct pwm_device *pwmd; }; /* i2c access */ static int lm3630a_read(struct lm3630a_chip *pchip, unsigned int reg) { int rval; unsigned int reg_val; rval = regmap_read(pchip->regmap, reg, &reg_val); if (rval < 0) return rval; return reg_val & 0xFF; } static int lm3630a_write(struct lm3630a_chip *pchip, unsigned int reg, unsigned int data) { return regmap_write(pchip->regmap, reg, data); } static int lm3630a_update(struct lm3630a_chip *pchip, unsigned int reg, unsigned int mask, unsigned int data) { return regmap_update_bits(pchip->regmap, reg, mask, data); } /* initialize chip */ static int lm3630a_chip_init(struct lm3630a_chip *pchip) { int rval; struct lm3630a_platform_data *pdata = pchip->pdata; usleep_range(1000, 2000); /* set Filter Strength Register */ rval = lm3630a_write(pchip, 0x50, 0x03); /* set Cofig. register */ rval |= lm3630a_update(pchip, REG_CONFIG, 0x07, pdata->pwm_ctrl); /* set boost control */ rval |= lm3630a_write(pchip, REG_BOOST, 0x38); /* set current A */ rval |= lm3630a_update(pchip, REG_I_A, 0x1F, 0x1F); /* set current B */ rval |= lm3630a_write(pchip, REG_I_B, 0x1F); /* set control */ rval |= lm3630a_update(pchip, REG_CTRL, 0x14, pdata->leda_ctrl); rval |= lm3630a_update(pchip, REG_CTRL, 0x0B, pdata->ledb_ctrl); usleep_range(1000, 2000); /* set brightness A and B */ rval |= lm3630a_write(pchip, REG_BRT_A, pdata->leda_init_brt); rval |= lm3630a_write(pchip, REG_BRT_B, pdata->ledb_init_brt); if (rval < 0) dev_err(pchip->dev, "i2c failed to access register\n"); return rval; } /* interrupt handling */ static void lm3630a_delayed_func(struct work_struct *work) { int rval; struct lm3630a_chip *pchip; pchip = container_of(work, struct lm3630a_chip, work.work); rval = lm3630a_read(pchip, REG_INT_STATUS); if (rval < 0) { dev_err(pchip->dev, "i2c failed to access REG_INT_STATUS Register\n"); return; } dev_info(pchip->dev, "REG_INT_STATUS Register is 0x%x\n", rval); } static irqreturn_t lm3630a_isr_func(int irq, void *chip) { int rval; struct lm3630a_chip *pchip = chip; unsigned long delay = msecs_to_jiffies(INT_DEBOUNCE_MSEC); queue_delayed_work(pchip->irqthread, &pchip->work, delay); rval = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00); if (rval < 0) { dev_err(pchip->dev, "i2c failed to access register\n"); return IRQ_NONE; } return IRQ_HANDLED; } static int lm3630a_intr_config(struct lm3630a_chip *pchip) { int rval; rval = lm3630a_write(pchip, REG_INT_EN, 0x87); if (rval < 0) return rval; INIT_DELAYED_WORK(&pchip->work, lm3630a_delayed_func); pchip->irqthread = create_singlethread_workqueue("lm3630a-irqthd"); if (!pchip->irqthread) { dev_err(pchip->dev, "create irq thread fail\n"); return -ENOMEM; } if (request_threaded_irq (pchip->irq, NULL, lm3630a_isr_func, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "lm3630a_irq", pchip)) { dev_err(pchip->dev, "request threaded irq fail\n"); destroy_workqueue(pchip->irqthread); return -ENOMEM; } return rval; } static void lm3630a_pwm_ctrl(struct lm3630a_chip *pchip, int br, int br_max) { unsigned int period = pwm_get_period(pchip->pwmd); unsigned int duty = br * period / br_max; pwm_config(pchip->pwmd, duty, period); if (duty) pwm_enable(pchip->pwmd); else pwm_disable(pchip->pwmd); } /* update and get brightness */ static int lm3630a_bank_a_update_status(struct backlight_device *bl) { int ret; struct lm3630a_chip *pchip = bl_get_data(bl); enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl; /* pwm control */ if ((pwm_ctrl & LM3630A_PWM_BANK_A) != 0) { lm3630a_pwm_ctrl(pchip, bl->props.brightness, bl->props.max_brightness); return bl->props.brightness; } /* disable sleep */ ret = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00); if (ret < 0) goto out_i2c_err; usleep_range(1000, 2000); /* minimum brightness is 0x04 */ ret = lm3630a_write(pchip, REG_BRT_A, bl->props.brightness); if (bl->props.brightness < 0x4) ret |= lm3630a_update(pchip, REG_CTRL, LM3630A_LEDA_ENABLE, 0); else ret |= lm3630a_update(pchip, REG_CTRL, LM3630A_LEDA_ENABLE, LM3630A_LEDA_ENABLE); if (ret < 0) goto out_i2c_err; return bl->props.brightness; out_i2c_err: dev_err(pchip->dev, "i2c failed to access\n"); return bl->props.brightness; } static int lm3630a_bank_a_get_brightness(struct backlight_device *bl) { int brightness, rval; struct lm3630a_chip *pchip = bl_get_data(bl); enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl; if ((pwm_ctrl & LM3630A_PWM_BANK_A) != 0) { rval = lm3630a_read(pchip, REG_PWM_OUTHIGH); if (rval < 0) goto out_i2c_err; brightness = (rval & 0x01) << 8; rval = lm3630a_read(pchip, REG_PWM_OUTLOW); if (rval < 0) goto out_i2c_err; brightness |= rval; goto out; } /* disable sleep */ rval = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00); if (rval < 0) goto out_i2c_err; usleep_range(1000, 2000); rval = lm3630a_read(pchip, REG_BRT_A); if (rval < 0) goto out_i2c_err; brightness = rval; out: bl->props.brightness = brightness; return bl->props.brightness; out_i2c_err: dev_err(pchip->dev, "i2c failed to access register\n"); return 0; } static const struct backlight_ops lm3630a_bank_a_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = lm3630a_bank_a_update_status, .get_brightness = lm3630a_bank_a_get_brightness, }; /* update and get brightness */ static int lm3630a_bank_b_update_status(struct backlight_device *bl) { int ret; struct lm3630a_chip *pchip = bl_get_data(bl); enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl; /* pwm control */ if ((pwm_ctrl & LM3630A_PWM_BANK_B) != 0) { lm3630a_pwm_ctrl(pchip, bl->props.brightness, bl->props.max_brightness); return bl->props.brightness; } /* disable sleep */ ret = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00); if (ret < 0) goto out_i2c_err; usleep_range(1000, 2000); /* minimum brightness is 0x04 */ ret = lm3630a_write(pchip, REG_BRT_B, bl->props.brightness); if (bl->props.brightness < 0x4) ret |= lm3630a_update(pchip, REG_CTRL, LM3630A_LEDB_ENABLE, 0); else ret |= lm3630a_update(pchip, REG_CTRL, LM3630A_LEDB_ENABLE, LM3630A_LEDB_ENABLE); if (ret < 0) goto out_i2c_err; return bl->props.brightness; out_i2c_err: dev_err(pchip->dev, "i2c failed to access REG_CTRL\n"); return bl->props.brightness; } static int lm3630a_bank_b_get_brightness(struct backlight_device *bl) { int brightness, rval; struct lm3630a_chip *pchip = bl_get_data(bl); enum lm3630a_pwm_ctrl pwm_ctrl = pchip->pdata->pwm_ctrl; if ((pwm_ctrl & LM3630A_PWM_BANK_B) != 0) { rval = lm3630a_read(pchip, REG_PWM_OUTHIGH); if (rval < 0) goto out_i2c_err; brightness = (rval & 0x01) << 8; rval = lm3630a_read(pchip, REG_PWM_OUTLOW); if (rval < 0) goto out_i2c_err; brightness |= rval; goto out; } /* disable sleep */ rval = lm3630a_update(pchip, REG_CTRL, 0x80, 0x00); if (rval < 0) goto out_i2c_err; usleep_range(1000, 2000); rval = lm3630a_read(pchip, REG_BRT_B); if (rval < 0) goto out_i2c_err; brightness = rval; out: bl->props.brightness = brightness; return bl->props.brightness; out_i2c_err: dev_err(pchip->dev, "i2c failed to access register\n"); return 0; } static const struct backlight_ops lm3630a_bank_b_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = lm3630a_bank_b_update_status, .get_brightness = lm3630a_bank_b_get_brightness, }; static int lm3630a_backlight_register(struct lm3630a_chip *pchip) { struct backlight_properties props; struct lm3630a_platform_data *pdata = pchip->pdata; props.type = BACKLIGHT_RAW; if (pdata->leda_ctrl != LM3630A_LEDA_DISABLE) { props.brightness = pdata->leda_init_brt; props.max_brightness = pdata->leda_max_brt; pchip->bleda = devm_backlight_device_register(pchip->dev, "lm3630a_leda", pchip->dev, pchip, &lm3630a_bank_a_ops, &props); if (IS_ERR(pchip->bleda)) return PTR_ERR(pchip->bleda); } if ((pdata->ledb_ctrl != LM3630A_LEDB_DISABLE) && (pdata->ledb_ctrl != LM3630A_LEDB_ON_A)) { props.brightness = pdata->ledb_init_brt; props.max_brightness = pdata->ledb_max_brt; pchip->bledb = devm_backlight_device_register(pchip->dev, "lm3630a_ledb", pchip->dev, pchip, &lm3630a_bank_b_ops, &props); if (IS_ERR(pchip->bledb)) return PTR_ERR(pchip->bledb); } return 0; } static const struct regmap_config lm3630a_regmap = { .reg_bits = 8, .val_bits = 8, .max_register = REG_MAX, }; static int lm3630a_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lm3630a_platform_data *pdata = dev_get_platdata(&client->dev); struct lm3630a_chip *pchip; int rval; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(&client->dev, "fail : i2c functionality check\n"); return -EOPNOTSUPP; } pchip = devm_kzalloc(&client->dev, sizeof(struct lm3630a_chip), GFP_KERNEL); if (!pchip) return -ENOMEM; pchip->dev = &client->dev; pchip->regmap = devm_regmap_init_i2c(client, &lm3630a_regmap); if (IS_ERR(pchip->regmap)) { rval = PTR_ERR(pchip->regmap); dev_err(&client->dev, "fail : allocate reg. map: %d\n", rval); return rval; } i2c_set_clientdata(client, pchip); if (pdata == NULL) { pdata = devm_kzalloc(pchip->dev, sizeof(struct lm3630a_platform_data), GFP_KERNEL); if (pdata == NULL) return -ENOMEM; /* default values */ pdata->leda_ctrl = LM3630A_LEDA_ENABLE; pdata->ledb_ctrl = LM3630A_LEDB_ENABLE; pdata->leda_max_brt = LM3630A_MAX_BRIGHTNESS; pdata->ledb_max_brt = LM3630A_MAX_BRIGHTNESS; pdata->leda_init_brt = LM3630A_MAX_BRIGHTNESS; pdata->ledb_init_brt = LM3630A_MAX_BRIGHTNESS; } pchip->pdata = pdata; /* chip initialize */ rval = lm3630a_chip_init(pchip); if (rval < 0) { dev_err(&client->dev, "fail : init chip\n"); return rval; } /* backlight register */ rval = lm3630a_backlight_register(pchip); if (rval < 0) { dev_err(&client->dev, "fail : backlight register.\n"); return rval; } /* pwm */ if (pdata->pwm_ctrl != LM3630A_PWM_DISABLE) { pchip->pwmd = devm_pwm_get(pchip->dev, "lm3630a-pwm"); if (IS_ERR(pchip->pwmd)) { dev_err(&client->dev, "fail : get pwm device\n"); return PTR_ERR(pchip->pwmd); } } pchip->pwmd->period = pdata->pwm_period; /* interrupt enable : irq 0 is not allowed */ pchip->irq = client->irq; if (pchip->irq) { rval = lm3630a_intr_config(pchip); if (rval < 0) return rval; } dev_info(&client->dev, "LM3630A backlight register OK.\n"); return 0; } static int lm3630a_remove(struct i2c_client *client) { int rval; struct lm3630a_chip *pchip = i2c_get_clientdata(client); rval = lm3630a_write(pchip, REG_BRT_A, 0); if (rval < 0) dev_err(pchip->dev, "i2c failed to access register\n"); rval = lm3630a_write(pchip, REG_BRT_B, 0); if (rval < 0) dev_err(pchip->dev, "i2c failed to access register\n"); if (pchip->irq) { free_irq(pchip->irq, pchip); flush_workqueue(pchip->irqthread); destroy_workqueue(pchip->irqthread); } return 0; } static const struct i2c_device_id lm3630a_id[] = { {LM3630A_NAME, 0}, {} }; MODULE_DEVICE_TABLE(i2c, lm3630a_id); static struct i2c_driver lm3630a_i2c_driver = { .driver = { .name = LM3630A_NAME, }, .probe = lm3630a_probe, .remove = lm3630a_remove, .id_table = lm3630a_id, }; module_i2c_driver(lm3630a_i2c_driver); MODULE_DESCRIPTION("Texas Instruments Backlight driver for LM3630A"); MODULE_AUTHOR("Daniel Jeong <gshark.jeong@gmail.com>"); MODULE_AUTHOR("LDD MLP <ldd-mlp@list.ti.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
MoKee/android_kernel_htc_flounder
drivers/usb/gadget/pxa25x_udc.c
2160
58175
/* * Intel PXA25x and IXP4xx on-chip full speed USB device controllers * * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker) * Copyright (C) 2003 Robert Schwebel, Pengutronix * Copyright (C) 2003 Benedikt Spranger, Pengutronix * Copyright (C) 2003 David Brownell * Copyright (C) 2003 Joshua Wise * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* #define VERBOSE_DEBUG */ #include <linux/device.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/platform_data/pxa2xx_udc.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/irq.h> #include <linux/clk.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/io.h> #include <linux/prefetch.h> #include <asm/byteorder.h> #include <asm/dma.h> #include <asm/gpio.h> #include <asm/mach-types.h> #include <asm/unaligned.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/otg.h> /* * This driver is PXA25x only. Grab the right register definitions. */ #ifdef CONFIG_ARCH_PXA #include <mach/pxa25x-udc.h> #endif #ifdef CONFIG_ARCH_LUBBOCK #include <mach/lubbock.h> #endif /* * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x * series processors. The UDC for the IXP 4xx series is very similar. * There are fifteen endpoints, in addition to ep0. * * Such controller drivers work with a gadget driver. The gadget driver * returns descriptors, implements configuration and data protocols used * by the host to interact with this device, and allocates endpoints to * the different protocol interfaces. The controller driver virtualizes * usb hardware so that the gadget drivers will be more portable. * * This UDC hardware wants to implement a bit too much USB protocol, so * it constrains the sorts of USB configuration change events that work. * The errata for these chips are misleading; some "fixed" bugs from * pxa250 a0/a1 b0/b1/b2 sure act like they're still there. * * Note that the UDC hardware supports DMA (except on IXP) but that's * not used here. IN-DMA (to host) is simple enough, when the data is * suitably aligned (16 bytes) ... the network stack doesn't do that, * other software can. OUT-DMA is buggy in most chip versions, as well * as poorly designed (data toggle not automatic). So this driver won't * bother using DMA. (Mostly-working IN-DMA support was available in * kernels before 2.6.23, but was never enabled or well tested.) */ #define DRIVER_VERSION "30-June-2007" #define DRIVER_DESC "PXA 25x USB Device Controller driver" static const char driver_name [] = "pxa25x_udc"; static const char ep0name [] = "ep0"; #ifdef CONFIG_ARCH_IXP4XX /* cpu-specific register addresses are compiled in to this code */ #ifdef CONFIG_ARCH_PXA #error "Can't configure both IXP and PXA" #endif /* IXP doesn't yet support <linux/clk.h> */ #define clk_get(dev,name) NULL #define clk_enable(clk) do { } while (0) #define clk_disable(clk) do { } while (0) #define clk_put(clk) do { } while (0) #endif #include "pxa25x_udc.h" #ifdef CONFIG_USB_PXA25X_SMALL #define SIZE_STR " (small)" #else #define SIZE_STR "" #endif /* --------------------------------------------------------------------------- * endpoint related parts of the api to the usb controller hardware, * used by gadget driver; and the inner talker-to-hardware core. * --------------------------------------------------------------------------- */ static void pxa25x_ep_fifo_flush (struct usb_ep *ep); static void nuke (struct pxa25x_ep *, int status); /* one GPIO should control a D+ pullup, so host sees this device (or not) */ static void pullup_off(void) { struct pxa2xx_udc_mach_info *mach = the_controller->mach; int off_level = mach->gpio_pullup_inverted; if (gpio_is_valid(mach->gpio_pullup)) gpio_set_value(mach->gpio_pullup, off_level); else if (mach->udc_command) mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT); } static void pullup_on(void) { struct pxa2xx_udc_mach_info *mach = the_controller->mach; int on_level = !mach->gpio_pullup_inverted; if (gpio_is_valid(mach->gpio_pullup)) gpio_set_value(mach->gpio_pullup, on_level); else if (mach->udc_command) mach->udc_command(PXA2XX_UDC_CMD_CONNECT); } static void pio_irq_enable(int bEndpointAddress) { bEndpointAddress &= 0xf; if (bEndpointAddress < 8) UICR0 &= ~(1 << bEndpointAddress); else { bEndpointAddress -= 8; UICR1 &= ~(1 << bEndpointAddress); } } static void pio_irq_disable(int bEndpointAddress) { bEndpointAddress &= 0xf; if (bEndpointAddress < 8) UICR0 |= 1 << bEndpointAddress; else { bEndpointAddress -= 8; UICR1 |= 1 << bEndpointAddress; } } /* The UDCCR reg contains mask and interrupt status bits, * so using '|=' isn't safe as it may ack an interrupt. */ #define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE) static inline void udc_set_mask_UDCCR(int mask) { UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS); } static inline void udc_clear_mask_UDCCR(int mask) { UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS); } static inline void udc_ack_int_UDCCR(int mask) { /* udccr contains the bits we dont want to change */ __u32 udccr = UDCCR & UDCCR_MASK_BITS; UDCCR = udccr | (mask & ~UDCCR_MASK_BITS); } /* * endpoint enable/disable * * we need to verify the descriptors used to enable endpoints. since pxa25x * endpoint configurations are fixed, and are pretty much always enabled, * there's not a lot to manage here. * * because pxa25x can't selectively initialize bulk (or interrupt) endpoints, * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except * for a single interface (with only the default altsetting) and for gadget * drivers that don't halt endpoints (not reset by set_interface). that also * means that if you use ISO, you must violate the USB spec rule that all * iso endpoints must be in non-default altsettings. */ static int pxa25x_ep_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct pxa25x_ep *ep; struct pxa25x_udc *dev; ep = container_of (_ep, struct pxa25x_ep, ep); if (!_ep || !desc || _ep->name == ep0name || desc->bDescriptorType != USB_DT_ENDPOINT || ep->bEndpointAddress != desc->bEndpointAddress || ep->fifo_size < usb_endpoint_maxp (desc)) { DMSG("%s, bad ep or descriptor\n", __func__); return -EINVAL; } /* xfer types must match, except that interrupt ~= bulk */ if (ep->bmAttributes != desc->bmAttributes && ep->bmAttributes != USB_ENDPOINT_XFER_BULK && desc->bmAttributes != USB_ENDPOINT_XFER_INT) { DMSG("%s, %s type mismatch\n", __func__, _ep->name); return -EINVAL; } /* hardware _could_ do smaller, but driver doesn't */ if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK && usb_endpoint_maxp (desc) != BULK_FIFO_SIZE) || !desc->wMaxPacketSize) { DMSG("%s, bad %s maxpacket\n", __func__, _ep->name); return -ERANGE; } dev = ep->dev; if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) { DMSG("%s, bogus device state\n", __func__); return -ESHUTDOWN; } ep->ep.desc = desc; ep->stopped = 0; ep->pio_irqs = 0; ep->ep.maxpacket = usb_endpoint_maxp (desc); /* flush fifo (mostly for OUT buffers) */ pxa25x_ep_fifo_flush (_ep); /* ... reset halt state too, if we could ... */ DBG(DBG_VERBOSE, "enabled %s\n", _ep->name); return 0; } static int pxa25x_ep_disable (struct usb_ep *_ep) { struct pxa25x_ep *ep; unsigned long flags; ep = container_of (_ep, struct pxa25x_ep, ep); if (!_ep || !ep->ep.desc) { DMSG("%s, %s not enabled\n", __func__, _ep ? ep->ep.name : NULL); return -EINVAL; } local_irq_save(flags); nuke (ep, -ESHUTDOWN); /* flush fifo (mostly for IN buffers) */ pxa25x_ep_fifo_flush (_ep); ep->ep.desc = NULL; ep->stopped = 1; local_irq_restore(flags); DBG(DBG_VERBOSE, "%s disabled\n", _ep->name); return 0; } /*-------------------------------------------------------------------------*/ /* for the pxa25x, these can just wrap kmalloc/kfree. gadget drivers * must still pass correctly initialized endpoints, since other controller * drivers may care about how it's currently set up (dma issues etc). */ /* * pxa25x_ep_alloc_request - allocate a request data structure */ static struct usb_request * pxa25x_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags) { struct pxa25x_request *req; req = kzalloc(sizeof(*req), gfp_flags); if (!req) return NULL; INIT_LIST_HEAD (&req->queue); return &req->req; } /* * pxa25x_ep_free_request - deallocate a request data structure */ static void pxa25x_ep_free_request (struct usb_ep *_ep, struct usb_request *_req) { struct pxa25x_request *req; req = container_of (_req, struct pxa25x_request, req); WARN_ON(!list_empty (&req->queue)); kfree(req); } /*-------------------------------------------------------------------------*/ /* * done - retire a request; caller blocked irqs */ static void done(struct pxa25x_ep *ep, struct pxa25x_request *req, int status) { unsigned stopped = ep->stopped; list_del_init(&req->queue); if (likely (req->req.status == -EINPROGRESS)) req->req.status = status; else status = req->req.status; if (status && status != -ESHUTDOWN) DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n", ep->ep.name, &req->req, status, req->req.actual, req->req.length); /* don't modify queue heads during completion callback */ ep->stopped = 1; req->req.complete(&ep->ep, &req->req); ep->stopped = stopped; } static inline void ep0_idle (struct pxa25x_udc *dev) { dev->ep0state = EP0_IDLE; } static int write_packet(volatile u32 *uddr, struct pxa25x_request *req, unsigned max) { u8 *buf; unsigned length, count; buf = req->req.buf + req->req.actual; prefetch(buf); /* how big will this packet be? */ length = min(req->req.length - req->req.actual, max); req->req.actual += length; count = length; while (likely(count--)) *uddr = *buf++; return length; } /* * write to an IN endpoint fifo, as many packets as possible. * irqs will use this to write the rest later. * caller guarantees at least one packet buffer is ready (or a zlp). */ static int write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) { unsigned max; max = usb_endpoint_maxp(ep->ep.desc); do { unsigned count; int is_last, is_short; count = write_packet(ep->reg_uddr, req, max); /* last packet is usually short (or a zlp) */ if (unlikely (count != max)) is_last = is_short = 1; else { if (likely(req->req.length != req->req.actual) || req->req.zero) is_last = 0; else is_last = 1; /* interrupt/iso maxpacket may not fill the fifo */ is_short = unlikely (max < ep->fifo_size); } DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n", ep->ep.name, count, is_last ? "/L" : "", is_short ? "/S" : "", req->req.length - req->req.actual, req); /* let loose that packet. maybe try writing another one, * double buffering might work. TSP, TPC, and TFS * bit values are the same for all normal IN endpoints. */ *ep->reg_udccs = UDCCS_BI_TPC; if (is_short) *ep->reg_udccs = UDCCS_BI_TSP; /* requests complete when all IN data is in the FIFO */ if (is_last) { done (ep, req, 0); if (list_empty(&ep->queue)) pio_irq_disable (ep->bEndpointAddress); return 1; } // TODO experiment: how robust can fifo mode tweaking be? // double buffering is off in the default fifo mode, which // prevents TFS from being set here. } while (*ep->reg_udccs & UDCCS_BI_TFS); return 0; } /* caller asserts req->pending (ep0 irq status nyet cleared); starts * ep0 data stage. these chips want very simple state transitions. */ static inline void ep0start(struct pxa25x_udc *dev, u32 flags, const char *tag) { UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR; USIR0 = USIR0_IR0; dev->req_pending = 0; DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n", __func__, tag, UDCCS0, flags); } static int write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) { unsigned count; int is_short; count = write_packet(&UDDR0, req, EP0_FIFO_SIZE); ep->dev->stats.write.bytes += count; /* last packet "must be" short (or a zlp) */ is_short = (count != EP0_FIFO_SIZE); DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count, req->req.length - req->req.actual, req); if (unlikely (is_short)) { if (ep->dev->req_pending) ep0start(ep->dev, UDCCS0_IPR, "short IN"); else UDCCS0 = UDCCS0_IPR; count = req->req.length; done (ep, req, 0); ep0_idle(ep->dev); #ifndef CONFIG_ARCH_IXP4XX #if 1 /* This seems to get rid of lost status irqs in some cases: * host responds quickly, or next request involves config * change automagic, or should have been hidden, or ... * * FIXME get rid of all udelays possible... */ if (count >= EP0_FIFO_SIZE) { count = 100; do { if ((UDCCS0 & UDCCS0_OPR) != 0) { /* clear OPR, generate ack */ UDCCS0 = UDCCS0_OPR; break; } count--; udelay(1); } while (count); } #endif #endif } else if (ep->dev->req_pending) ep0start(ep->dev, 0, "IN"); return is_short; } /* * read_fifo - unload packet(s) from the fifo we use for usb OUT * transfers and put them into the request. caller should have made * sure there's at least one packet ready. * * returns true if the request completed because of short packet or the * request buffer having filled (and maybe overran till end-of-packet). */ static int read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) { for (;;) { u32 udccs; u8 *buf; unsigned bufferspace, count, is_short; /* make sure there's a packet in the FIFO. * UDCCS_{BO,IO}_RPC are all the same bit value. * UDCCS_{BO,IO}_RNE are all the same bit value. */ udccs = *ep->reg_udccs; if (unlikely ((udccs & UDCCS_BO_RPC) == 0)) break; buf = req->req.buf + req->req.actual; prefetchw(buf); bufferspace = req->req.length - req->req.actual; /* read all bytes from this packet */ if (likely (udccs & UDCCS_BO_RNE)) { count = 1 + (0x0ff & *ep->reg_ubcr); req->req.actual += min (count, bufferspace); } else /* zlp */ count = 0; is_short = (count < ep->ep.maxpacket); DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n", ep->ep.name, udccs, count, is_short ? "/S" : "", req, req->req.actual, req->req.length); while (likely (count-- != 0)) { u8 byte = (u8) *ep->reg_uddr; if (unlikely (bufferspace == 0)) { /* this happens when the driver's buffer * is smaller than what the host sent. * discard the extra data. */ if (req->req.status != -EOVERFLOW) DMSG("%s overflow %d\n", ep->ep.name, count); req->req.status = -EOVERFLOW; } else { *buf++ = byte; bufferspace--; } } *ep->reg_udccs = UDCCS_BO_RPC; /* RPC/RSP/RNE could now reflect the other packet buffer */ /* iso is one request per packet */ if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) { if (udccs & UDCCS_IO_ROF) req->req.status = -EHOSTUNREACH; /* more like "is_done" */ is_short = 1; } /* completion */ if (is_short || req->req.actual == req->req.length) { done (ep, req, 0); if (list_empty(&ep->queue)) pio_irq_disable (ep->bEndpointAddress); return 1; } /* finished that packet. the next one may be waiting... */ } return 0; } /* * special ep0 version of the above. no UBCR0 or double buffering; status * handshaking is magic. most device protocols don't need control-OUT. * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other * protocols do use them. */ static int read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) { u8 *buf, byte; unsigned bufferspace; buf = req->req.buf + req->req.actual; bufferspace = req->req.length - req->req.actual; while (UDCCS0 & UDCCS0_RNE) { byte = (u8) UDDR0; if (unlikely (bufferspace == 0)) { /* this happens when the driver's buffer * is smaller than what the host sent. * discard the extra data. */ if (req->req.status != -EOVERFLOW) DMSG("%s overflow\n", ep->ep.name); req->req.status = -EOVERFLOW; } else { *buf++ = byte; req->req.actual++; bufferspace--; } } UDCCS0 = UDCCS0_OPR | UDCCS0_IPR; /* completion */ if (req->req.actual >= req->req.length) return 1; /* finished that packet. the next one may be waiting... */ return 0; } /*-------------------------------------------------------------------------*/ static int pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct pxa25x_request *req; struct pxa25x_ep *ep; struct pxa25x_udc *dev; unsigned long flags; req = container_of(_req, struct pxa25x_request, req); if (unlikely (!_req || !_req->complete || !_req->buf || !list_empty(&req->queue))) { DMSG("%s, bad params\n", __func__); return -EINVAL; } ep = container_of(_ep, struct pxa25x_ep, ep); if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) { DMSG("%s, bad ep\n", __func__); return -EINVAL; } dev = ep->dev; if (unlikely (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) { DMSG("%s, bogus device state\n", __func__); return -ESHUTDOWN; } /* iso is always one packet per request, that's the only way * we can report per-packet status. that also helps with dma. */ if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC && req->req.length > usb_endpoint_maxp(ep->ep.desc))) return -EMSGSIZE; DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n", _ep->name, _req, _req->length, _req->buf); local_irq_save(flags); _req->status = -EINPROGRESS; _req->actual = 0; /* kickstart this i/o queue? */ if (list_empty(&ep->queue) && !ep->stopped) { if (ep->ep.desc == NULL/* ep0 */) { unsigned length = _req->length; switch (dev->ep0state) { case EP0_IN_DATA_PHASE: dev->stats.write.ops++; if (write_ep0_fifo(ep, req)) req = NULL; break; case EP0_OUT_DATA_PHASE: dev->stats.read.ops++; /* messy ... */ if (dev->req_config) { DBG(DBG_VERBOSE, "ep0 config ack%s\n", dev->has_cfr ? "" : " raced"); if (dev->has_cfr) UDCCFR = UDCCFR_AREN|UDCCFR_ACM |UDCCFR_MB1; done(ep, req, 0); dev->ep0state = EP0_END_XFER; local_irq_restore (flags); return 0; } if (dev->req_pending) ep0start(dev, UDCCS0_IPR, "OUT"); if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0 && read_ep0_fifo(ep, req))) { ep0_idle(dev); done(ep, req, 0); req = NULL; } break; default: DMSG("ep0 i/o, odd state %d\n", dev->ep0state); local_irq_restore (flags); return -EL2HLT; } /* can the FIFO can satisfy the request immediately? */ } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) { if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0 && write_fifo(ep, req)) req = NULL; } else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0 && read_fifo(ep, req)) { req = NULL; } if (likely(req && ep->ep.desc)) pio_irq_enable(ep->bEndpointAddress); } /* pio or dma irq handler advances the queue. */ if (likely(req != NULL)) list_add_tail(&req->queue, &ep->queue); local_irq_restore(flags); return 0; } /* * nuke - dequeue ALL requests */ static void nuke(struct pxa25x_ep *ep, int status) { struct pxa25x_request *req; /* called with irqs blocked */ while (!list_empty(&ep->queue)) { req = list_entry(ep->queue.next, struct pxa25x_request, queue); done(ep, req, status); } if (ep->ep.desc) pio_irq_disable (ep->bEndpointAddress); } /* dequeue JUST ONE request */ static int pxa25x_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct pxa25x_ep *ep; struct pxa25x_request *req; unsigned long flags; ep = container_of(_ep, struct pxa25x_ep, ep); if (!_ep || ep->ep.name == ep0name) return -EINVAL; local_irq_save(flags); /* make sure it's actually queued on this endpoint */ list_for_each_entry (req, &ep->queue, queue) { if (&req->req == _req) break; } if (&req->req != _req) { local_irq_restore(flags); return -EINVAL; } done(ep, req, -ECONNRESET); local_irq_restore(flags); return 0; } /*-------------------------------------------------------------------------*/ static int pxa25x_ep_set_halt(struct usb_ep *_ep, int value) { struct pxa25x_ep *ep; unsigned long flags; ep = container_of(_ep, struct pxa25x_ep, ep); if (unlikely (!_ep || (!ep->ep.desc && ep->ep.name != ep0name)) || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) { DMSG("%s, bad ep\n", __func__); return -EINVAL; } if (value == 0) { /* this path (reset toggle+halt) is needed to implement * SET_INTERFACE on normal hardware. but it can't be * done from software on the PXA UDC, and the hardware * forgets to do it as part of SET_INTERFACE automagic. */ DMSG("only host can clear %s halt\n", _ep->name); return -EROFS; } local_irq_save(flags); if ((ep->bEndpointAddress & USB_DIR_IN) != 0 && ((*ep->reg_udccs & UDCCS_BI_TFS) == 0 || !list_empty(&ep->queue))) { local_irq_restore(flags); return -EAGAIN; } /* FST bit is the same for control, bulk in, bulk out, interrupt in */ *ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF; /* ep0 needs special care */ if (!ep->ep.desc) { start_watchdog(ep->dev); ep->dev->req_pending = 0; ep->dev->ep0state = EP0_STALL; /* and bulk/intr endpoints like dropping stalls too */ } else { unsigned i; for (i = 0; i < 1000; i += 20) { if (*ep->reg_udccs & UDCCS_BI_SST) break; udelay(20); } } local_irq_restore(flags); DBG(DBG_VERBOSE, "%s halt\n", _ep->name); return 0; } static int pxa25x_ep_fifo_status(struct usb_ep *_ep) { struct pxa25x_ep *ep; ep = container_of(_ep, struct pxa25x_ep, ep); if (!_ep) { DMSG("%s, bad ep\n", __func__); return -ENODEV; } /* pxa can't report unclaimed bytes from IN fifos */ if ((ep->bEndpointAddress & USB_DIR_IN) != 0) return -EOPNOTSUPP; if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN || (*ep->reg_udccs & UDCCS_BO_RFS) == 0) return 0; else return (*ep->reg_ubcr & 0xfff) + 1; } static void pxa25x_ep_fifo_flush(struct usb_ep *_ep) { struct pxa25x_ep *ep; ep = container_of(_ep, struct pxa25x_ep, ep); if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) { DMSG("%s, bad ep\n", __func__); return; } /* toggle and halt bits stay unchanged */ /* for OUT, just read and discard the FIFO contents. */ if ((ep->bEndpointAddress & USB_DIR_IN) == 0) { while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0) (void) *ep->reg_uddr; return; } /* most IN status is the same, but ISO can't stall */ *ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC ? 0 : UDCCS_BI_SST); } static struct usb_ep_ops pxa25x_ep_ops = { .enable = pxa25x_ep_enable, .disable = pxa25x_ep_disable, .alloc_request = pxa25x_ep_alloc_request, .free_request = pxa25x_ep_free_request, .queue = pxa25x_ep_queue, .dequeue = pxa25x_ep_dequeue, .set_halt = pxa25x_ep_set_halt, .fifo_status = pxa25x_ep_fifo_status, .fifo_flush = pxa25x_ep_fifo_flush, }; /* --------------------------------------------------------------------------- * device-scoped parts of the api to the usb controller hardware * --------------------------------------------------------------------------- */ static int pxa25x_udc_get_frame(struct usb_gadget *_gadget) { return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff); } static int pxa25x_udc_wakeup(struct usb_gadget *_gadget) { /* host may not have enabled remote wakeup */ if ((UDCCS0 & UDCCS0_DRWF) == 0) return -EHOSTUNREACH; udc_set_mask_UDCCR(UDCCR_RSM); return 0; } static void stop_activity(struct pxa25x_udc *, struct usb_gadget_driver *); static void udc_enable (struct pxa25x_udc *); static void udc_disable(struct pxa25x_udc *); /* We disable the UDC -- and its 48 MHz clock -- whenever it's not * in active use. */ static int pullup(struct pxa25x_udc *udc) { int is_active = udc->vbus && udc->pullup && !udc->suspended; DMSG("%s\n", is_active ? "active" : "inactive"); if (is_active) { if (!udc->active) { udc->active = 1; /* Enable clock for USB device */ clk_enable(udc->clk); udc_enable(udc); } } else { if (udc->active) { if (udc->gadget.speed != USB_SPEED_UNKNOWN) { DMSG("disconnect %s\n", udc->driver ? udc->driver->driver.name : "(no driver)"); stop_activity(udc, udc->driver); } udc_disable(udc); /* Disable clock for USB device */ clk_disable(udc->clk); udc->active = 0; } } return 0; } /* VBUS reporting logically comes from a transceiver */ static int pxa25x_udc_vbus_session(struct usb_gadget *_gadget, int is_active) { struct pxa25x_udc *udc; udc = container_of(_gadget, struct pxa25x_udc, gadget); udc->vbus = is_active; DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); pullup(udc); return 0; } /* drivers may have software control over D+ pullup */ static int pxa25x_udc_pullup(struct usb_gadget *_gadget, int is_active) { struct pxa25x_udc *udc; udc = container_of(_gadget, struct pxa25x_udc, gadget); /* not all boards support pullup control */ if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command) return -EOPNOTSUPP; udc->pullup = (is_active != 0); pullup(udc); return 0; } /* boards may consume current from VBUS, up to 100-500mA based on config. * the 500uA suspend ceiling means that exclusively vbus-powered PXA designs * violate USB specs. */ static int pxa25x_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA) { struct pxa25x_udc *udc; udc = container_of(_gadget, struct pxa25x_udc, gadget); if (!IS_ERR_OR_NULL(udc->transceiver)) return usb_phy_set_power(udc->transceiver, mA); return -EOPNOTSUPP; } static int pxa25x_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver); static int pxa25x_udc_stop(struct usb_gadget *g, struct usb_gadget_driver *driver); static const struct usb_gadget_ops pxa25x_udc_ops = { .get_frame = pxa25x_udc_get_frame, .wakeup = pxa25x_udc_wakeup, .vbus_session = pxa25x_udc_vbus_session, .pullup = pxa25x_udc_pullup, .vbus_draw = pxa25x_udc_vbus_draw, .udc_start = pxa25x_udc_start, .udc_stop = pxa25x_udc_stop, }; /*-------------------------------------------------------------------------*/ #ifdef CONFIG_USB_GADGET_DEBUG_FS static int udc_seq_show(struct seq_file *m, void *_d) { struct pxa25x_udc *dev = m->private; unsigned long flags; int i; u32 tmp; local_irq_save(flags); /* basic device status */ seq_printf(m, DRIVER_DESC "\n" "%s version: %s\nGadget driver: %s\nHost %s\n\n", driver_name, DRIVER_VERSION SIZE_STR "(pio)", dev->driver ? dev->driver->driver.name : "(none)", dev->gadget.speed == USB_SPEED_FULL ? "full speed" : "disconnected"); /* registers for device and ep0 */ seq_printf(m, "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n", UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL); tmp = UDCCR; seq_printf(m, "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp, (tmp & UDCCR_REM) ? " rem" : "", (tmp & UDCCR_RSTIR) ? " rstir" : "", (tmp & UDCCR_SRM) ? " srm" : "", (tmp & UDCCR_SUSIR) ? " susir" : "", (tmp & UDCCR_RESIR) ? " resir" : "", (tmp & UDCCR_RSM) ? " rsm" : "", (tmp & UDCCR_UDA) ? " uda" : "", (tmp & UDCCR_UDE) ? " ude" : ""); tmp = UDCCS0; seq_printf(m, "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp, (tmp & UDCCS0_SA) ? " sa" : "", (tmp & UDCCS0_RNE) ? " rne" : "", (tmp & UDCCS0_FST) ? " fst" : "", (tmp & UDCCS0_SST) ? " sst" : "", (tmp & UDCCS0_DRWF) ? " dwrf" : "", (tmp & UDCCS0_FTF) ? " ftf" : "", (tmp & UDCCS0_IPR) ? " ipr" : "", (tmp & UDCCS0_OPR) ? " opr" : ""); if (dev->has_cfr) { tmp = UDCCFR; seq_printf(m, "udccfr %02X =%s%s\n", tmp, (tmp & UDCCFR_AREN) ? " aren" : "", (tmp & UDCCFR_ACM) ? " acm" : ""); } if (dev->gadget.speed != USB_SPEED_FULL || !dev->driver) goto done; seq_printf(m, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n", dev->stats.write.bytes, dev->stats.write.ops, dev->stats.read.bytes, dev->stats.read.ops, dev->stats.irqs); /* dump endpoint queues */ for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { struct pxa25x_ep *ep = &dev->ep [i]; struct pxa25x_request *req; if (i != 0) { const struct usb_endpoint_descriptor *desc; desc = ep->ep.desc; if (!desc) continue; tmp = *dev->ep [i].reg_udccs; seq_printf(m, "%s max %d %s udccs %02x irqs %lu\n", ep->ep.name, usb_endpoint_maxp(desc), "pio", tmp, ep->pio_irqs); /* TODO translate all five groups of udccs bits! */ } else /* ep0 should only have one transfer queued */ seq_printf(m, "ep0 max 16 pio irqs %lu\n", ep->pio_irqs); if (list_empty(&ep->queue)) { seq_printf(m, "\t(nothing queued)\n"); continue; } list_for_each_entry(req, &ep->queue, queue) { seq_printf(m, "\treq %p len %d/%d buf %p\n", &req->req, req->req.actual, req->req.length, req->req.buf); } } done: local_irq_restore(flags); return 0; } static int udc_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, udc_seq_show, inode->i_private); } static const struct file_operations debug_fops = { .open = udc_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; #define create_debug_files(dev) \ do { \ dev->debugfs_udc = debugfs_create_file(dev->gadget.name, \ S_IRUGO, NULL, dev, &debug_fops); \ } while (0) #define remove_debug_files(dev) \ do { \ if (dev->debugfs_udc) \ debugfs_remove(dev->debugfs_udc); \ } while (0) #else /* !CONFIG_USB_GADGET_DEBUG_FILES */ #define create_debug_files(dev) do {} while (0) #define remove_debug_files(dev) do {} while (0) #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ /*-------------------------------------------------------------------------*/ /* * udc_disable - disable USB device controller */ static void udc_disable(struct pxa25x_udc *dev) { /* block all irqs */ udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM); UICR0 = UICR1 = 0xff; UFNRH = UFNRH_SIM; /* if hardware supports it, disconnect from usb */ pullup_off(); udc_clear_mask_UDCCR(UDCCR_UDE); ep0_idle (dev); dev->gadget.speed = USB_SPEED_UNKNOWN; } /* * udc_reinit - initialize software state */ static void udc_reinit(struct pxa25x_udc *dev) { u32 i; /* device/ep0 records init */ INIT_LIST_HEAD (&dev->gadget.ep_list); INIT_LIST_HEAD (&dev->gadget.ep0->ep_list); dev->ep0state = EP0_IDLE; /* basic endpoint records init */ for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { struct pxa25x_ep *ep = &dev->ep[i]; if (i != 0) list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list); ep->ep.desc = NULL; ep->stopped = 0; INIT_LIST_HEAD (&ep->queue); ep->pio_irqs = 0; } /* the rest was statically initialized, and is read-only */ } /* until it's enabled, this UDC should be completely invisible * to any USB host. */ static void udc_enable (struct pxa25x_udc *dev) { udc_clear_mask_UDCCR(UDCCR_UDE); /* try to clear these bits before we enable the udc */ udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); ep0_idle(dev); dev->gadget.speed = USB_SPEED_UNKNOWN; dev->stats.irqs = 0; /* * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual: * - enable UDC * - if RESET is already in progress, ack interrupt * - unmask reset interrupt */ udc_set_mask_UDCCR(UDCCR_UDE); if (!(UDCCR & UDCCR_UDA)) udc_ack_int_UDCCR(UDCCR_RSTIR); if (dev->has_cfr /* UDC_RES2 is defined */) { /* pxa255 (a0+) can avoid a set_config race that could * prevent gadget drivers from configuring correctly */ UDCCFR = UDCCFR_ACM | UDCCFR_MB1; } else { /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1) * which could result in missing packets and interrupts. * supposedly one bit per endpoint, controlling whether it * double buffers or not; ACM/AREN bits fit into the holes. * zero bits (like USIR0_IRx) disable double buffering. */ UDC_RES1 = 0x00; UDC_RES2 = 0x00; } /* enable suspend/resume and reset irqs */ udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM); /* enable ep0 irqs */ UICR0 &= ~UICR0_IM0; /* if hardware supports it, pullup D+ and wait for reset */ pullup_on(); } /* when a driver is successfully registered, it will receive * control requests including set_configuration(), which enables * non-control requests. then usb traffic follows until a * disconnect is reported. then a host may connect again, or * the driver might get unbound. */ static int pxa25x_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver) { struct pxa25x_udc *dev = to_pxa25x(g); int retval; /* first hook up the driver ... */ dev->driver = driver; dev->pullup = 1; /* ... then enable host detection and ep0; and we're ready * for set_configuration as well as eventual disconnect. */ /* connect to bus through transceiver */ if (!IS_ERR_OR_NULL(dev->transceiver)) { retval = otg_set_peripheral(dev->transceiver->otg, &dev->gadget); if (retval) goto bind_fail; } pullup(dev); dump_state(dev); return 0; bind_fail: return retval; } static void stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver) { int i; /* don't disconnect drivers more than once */ if (dev->gadget.speed == USB_SPEED_UNKNOWN) driver = NULL; dev->gadget.speed = USB_SPEED_UNKNOWN; /* prevent new request submissions, kill any outstanding requests */ for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { struct pxa25x_ep *ep = &dev->ep[i]; ep->stopped = 1; nuke(ep, -ESHUTDOWN); } del_timer_sync(&dev->timer); /* report disconnect; the driver is already quiesced */ if (driver) driver->disconnect(&dev->gadget); /* re-init driver-visible data structures */ udc_reinit(dev); } static int pxa25x_udc_stop(struct usb_gadget*g, struct usb_gadget_driver *driver) { struct pxa25x_udc *dev = to_pxa25x(g); local_irq_disable(); dev->pullup = 0; pullup(dev); stop_activity(dev, driver); local_irq_enable(); if (!IS_ERR_OR_NULL(dev->transceiver)) (void) otg_set_peripheral(dev->transceiver->otg, NULL); dev->driver = NULL; dump_state(dev); return 0; } /*-------------------------------------------------------------------------*/ #ifdef CONFIG_ARCH_LUBBOCK /* Lubbock has separate connect and disconnect irqs. More typical designs * use one GPIO as the VBUS IRQ, and another to control the D+ pullup. */ static irqreturn_t lubbock_vbus_irq(int irq, void *_dev) { struct pxa25x_udc *dev = _dev; int vbus; dev->stats.irqs++; switch (irq) { case LUBBOCK_USB_IRQ: vbus = 1; disable_irq(LUBBOCK_USB_IRQ); enable_irq(LUBBOCK_USB_DISC_IRQ); break; case LUBBOCK_USB_DISC_IRQ: vbus = 0; disable_irq(LUBBOCK_USB_DISC_IRQ); enable_irq(LUBBOCK_USB_IRQ); break; default: return IRQ_NONE; } pxa25x_udc_vbus_session(&dev->gadget, vbus); return IRQ_HANDLED; } #endif /*-------------------------------------------------------------------------*/ static inline void clear_ep_state (struct pxa25x_udc *dev) { unsigned i; /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint * fifos, and pending transactions mustn't be continued in any case. */ for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++) nuke(&dev->ep[i], -ECONNABORTED); } static void udc_watchdog(unsigned long _dev) { struct pxa25x_udc *dev = (void *)_dev; local_irq_disable(); if (dev->ep0state == EP0_STALL && (UDCCS0 & UDCCS0_FST) == 0 && (UDCCS0 & UDCCS0_SST) == 0) { UDCCS0 = UDCCS0_FST|UDCCS0_FTF; DBG(DBG_VERBOSE, "ep0 re-stall\n"); start_watchdog(dev); } local_irq_enable(); } static void handle_ep0 (struct pxa25x_udc *dev) { u32 udccs0 = UDCCS0; struct pxa25x_ep *ep = &dev->ep [0]; struct pxa25x_request *req; union { struct usb_ctrlrequest r; u8 raw [8]; u32 word [2]; } u; if (list_empty(&ep->queue)) req = NULL; else req = list_entry(ep->queue.next, struct pxa25x_request, queue); /* clear stall status */ if (udccs0 & UDCCS0_SST) { nuke(ep, -EPIPE); UDCCS0 = UDCCS0_SST; del_timer(&dev->timer); ep0_idle(dev); } /* previous request unfinished? non-error iff back-to-back ... */ if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) { nuke(ep, 0); del_timer(&dev->timer); ep0_idle(dev); } switch (dev->ep0state) { case EP0_IDLE: /* late-breaking status? */ udccs0 = UDCCS0; /* start control request? */ if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE)) == (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) { int i; nuke (ep, -EPROTO); /* read SETUP packet */ for (i = 0; i < 8; i++) { if (unlikely(!(UDCCS0 & UDCCS0_RNE))) { bad_setup: DMSG("SETUP %d!\n", i); goto stall; } u.raw [i] = (u8) UDDR0; } if (unlikely((UDCCS0 & UDCCS0_RNE) != 0)) goto bad_setup; got_setup: DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n", u.r.bRequestType, u.r.bRequest, le16_to_cpu(u.r.wValue), le16_to_cpu(u.r.wIndex), le16_to_cpu(u.r.wLength)); /* cope with automagic for some standard requests. */ dev->req_std = (u.r.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD; dev->req_config = 0; dev->req_pending = 1; switch (u.r.bRequest) { /* hardware restricts gadget drivers here! */ case USB_REQ_SET_CONFIGURATION: if (u.r.bRequestType == USB_RECIP_DEVICE) { /* reflect hardware's automagic * up to the gadget driver. */ config_change: dev->req_config = 1; clear_ep_state(dev); /* if !has_cfr, there's no synch * else use AREN (later) not SA|OPR * USIR0_IR0 acts edge sensitive */ } break; /* ... and here, even more ... */ case USB_REQ_SET_INTERFACE: if (u.r.bRequestType == USB_RECIP_INTERFACE) { /* udc hardware is broken by design: * - altsetting may only be zero; * - hw resets all interfaces' eps; * - ep reset doesn't include halt(?). */ DMSG("broken set_interface (%d/%d)\n", le16_to_cpu(u.r.wIndex), le16_to_cpu(u.r.wValue)); goto config_change; } break; /* hardware was supposed to hide this */ case USB_REQ_SET_ADDRESS: if (u.r.bRequestType == USB_RECIP_DEVICE) { ep0start(dev, 0, "address"); return; } break; } if (u.r.bRequestType & USB_DIR_IN) dev->ep0state = EP0_IN_DATA_PHASE; else dev->ep0state = EP0_OUT_DATA_PHASE; i = dev->driver->setup(&dev->gadget, &u.r); if (i < 0) { /* hardware automagic preventing STALL... */ if (dev->req_config) { /* hardware sometimes neglects to tell * tell us about config change events, * so later ones may fail... */ WARNING("config change %02x fail %d?\n", u.r.bRequest, i); return; /* TODO experiment: if has_cfr, * hardware didn't ACK; maybe we * could actually STALL! */ } DBG(DBG_VERBOSE, "protocol STALL, " "%02x err %d\n", UDCCS0, i); stall: /* the watchdog timer helps deal with cases * where udc seems to clear FST wrongly, and * then NAKs instead of STALLing. */ ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall"); start_watchdog(dev); dev->ep0state = EP0_STALL; /* deferred i/o == no response yet */ } else if (dev->req_pending) { if (likely(dev->ep0state == EP0_IN_DATA_PHASE || dev->req_std || u.r.wLength)) ep0start(dev, 0, "defer"); else ep0start(dev, UDCCS0_IPR, "defer/IPR"); } /* expect at least one data or status stage irq */ return; } else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA)) == (UDCCS0_OPR|UDCCS0_SA))) { unsigned i; /* pxa210/250 erratum 131 for B0/B1 says RNE lies. * still observed on a pxa255 a0. */ DBG(DBG_VERBOSE, "e131\n"); nuke(ep, -EPROTO); /* read SETUP data, but don't trust it too much */ for (i = 0; i < 8; i++) u.raw [i] = (u8) UDDR0; if ((u.r.bRequestType & USB_RECIP_MASK) > USB_RECIP_OTHER) goto stall; if (u.word [0] == 0 && u.word [1] == 0) goto stall; goto got_setup; } else { /* some random early IRQ: * - we acked FST * - IPR cleared * - OPR got set, without SA (likely status stage) */ UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR); } break; case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */ if (udccs0 & UDCCS0_OPR) { UDCCS0 = UDCCS0_OPR|UDCCS0_FTF; DBG(DBG_VERBOSE, "ep0in premature status\n"); if (req) done(ep, req, 0); ep0_idle(dev); } else /* irq was IPR clearing */ { if (req) { /* this IN packet might finish the request */ (void) write_ep0_fifo(ep, req); } /* else IN token before response was written */ } break; case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */ if (udccs0 & UDCCS0_OPR) { if (req) { /* this OUT packet might finish the request */ if (read_ep0_fifo(ep, req)) done(ep, req, 0); /* else more OUT packets expected */ } /* else OUT token before read was issued */ } else /* irq was IPR clearing */ { DBG(DBG_VERBOSE, "ep0out premature status\n"); if (req) done(ep, req, 0); ep0_idle(dev); } break; case EP0_END_XFER: if (req) done(ep, req, 0); /* ack control-IN status (maybe in-zlp was skipped) * also appears after some config change events. */ if (udccs0 & UDCCS0_OPR) UDCCS0 = UDCCS0_OPR; ep0_idle(dev); break; case EP0_STALL: UDCCS0 = UDCCS0_FST; break; } USIR0 = USIR0_IR0; } static void handle_ep(struct pxa25x_ep *ep) { struct pxa25x_request *req; int is_in = ep->bEndpointAddress & USB_DIR_IN; int completed; u32 udccs, tmp; do { completed = 0; if (likely (!list_empty(&ep->queue))) req = list_entry(ep->queue.next, struct pxa25x_request, queue); else req = NULL; // TODO check FST handling udccs = *ep->reg_udccs; if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */ tmp = UDCCS_BI_TUR; if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK)) tmp |= UDCCS_BI_SST; tmp &= udccs; if (likely (tmp)) *ep->reg_udccs = tmp; if (req && likely ((udccs & UDCCS_BI_TFS) != 0)) completed = write_fifo(ep, req); } else { /* irq from RPC (or for ISO, ROF) */ if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK)) tmp = UDCCS_BO_SST | UDCCS_BO_DME; else tmp = UDCCS_IO_ROF | UDCCS_IO_DME; tmp &= udccs; if (likely(tmp)) *ep->reg_udccs = tmp; /* fifos can hold packets, ready for reading... */ if (likely(req)) { completed = read_fifo(ep, req); } else pio_irq_disable (ep->bEndpointAddress); } ep->pio_irqs++; } while (completed); } /* * pxa25x_udc_irq - interrupt handler * * avoid delays in ep0 processing. the control handshaking isn't always * under software control (pxa250c0 and the pxa255 are better), and delays * could cause usb protocol errors. */ static irqreturn_t pxa25x_udc_irq(int irq, void *_dev) { struct pxa25x_udc *dev = _dev; int handled; dev->stats.irqs++; do { u32 udccr = UDCCR; handled = 0; /* SUSpend Interrupt Request */ if (unlikely(udccr & UDCCR_SUSIR)) { udc_ack_int_UDCCR(UDCCR_SUSIR); handled = 1; DBG(DBG_VERBOSE, "USB suspend\n"); if (dev->gadget.speed != USB_SPEED_UNKNOWN && dev->driver && dev->driver->suspend) dev->driver->suspend(&dev->gadget); ep0_idle (dev); } /* RESume Interrupt Request */ if (unlikely(udccr & UDCCR_RESIR)) { udc_ack_int_UDCCR(UDCCR_RESIR); handled = 1; DBG(DBG_VERBOSE, "USB resume\n"); if (dev->gadget.speed != USB_SPEED_UNKNOWN && dev->driver && dev->driver->resume) dev->driver->resume(&dev->gadget); } /* ReSeT Interrupt Request - USB reset */ if (unlikely(udccr & UDCCR_RSTIR)) { udc_ack_int_UDCCR(UDCCR_RSTIR); handled = 1; if ((UDCCR & UDCCR_UDA) == 0) { DBG(DBG_VERBOSE, "USB reset start\n"); /* reset driver and endpoints, * in case that's not yet done */ stop_activity (dev, dev->driver); } else { DBG(DBG_VERBOSE, "USB reset end\n"); dev->gadget.speed = USB_SPEED_FULL; memset(&dev->stats, 0, sizeof dev->stats); /* driver and endpoints are still reset */ } } else { u32 usir0 = USIR0 & ~UICR0; u32 usir1 = USIR1 & ~UICR1; int i; if (unlikely (!usir0 && !usir1)) continue; DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0); /* control traffic */ if (usir0 & USIR0_IR0) { dev->ep[0].pio_irqs++; handle_ep0(dev); handled = 1; } /* endpoint data transfers */ for (i = 0; i < 8; i++) { u32 tmp = 1 << i; if (i && (usir0 & tmp)) { handle_ep(&dev->ep[i]); USIR0 |= tmp; handled = 1; } #ifndef CONFIG_USB_PXA25X_SMALL if (usir1 & tmp) { handle_ep(&dev->ep[i+8]); USIR1 |= tmp; handled = 1; } #endif } } /* we could also ask for 1 msec SOF (SIR) interrupts */ } while (handled); return IRQ_HANDLED; } /*-------------------------------------------------------------------------*/ static void nop_release (struct device *dev) { DMSG("%s %s\n", __func__, dev_name(dev)); } /* this uses load-time allocation and initialization (instead of * doing it at run-time) to save code, eliminate fault paths, and * be more obviously correct. */ static struct pxa25x_udc memory = { .gadget = { .ops = &pxa25x_udc_ops, .ep0 = &memory.ep[0].ep, .name = driver_name, .dev = { .init_name = "gadget", .release = nop_release, }, }, /* control endpoint */ .ep[0] = { .ep = { .name = ep0name, .ops = &pxa25x_ep_ops, .maxpacket = EP0_FIFO_SIZE, }, .dev = &memory, .reg_udccs = &UDCCS0, .reg_uddr = &UDDR0, }, /* first group of endpoints */ .ep[1] = { .ep = { .name = "ep1in-bulk", .ops = &pxa25x_ep_ops, .maxpacket = BULK_FIFO_SIZE, }, .dev = &memory, .fifo_size = BULK_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 1, .bmAttributes = USB_ENDPOINT_XFER_BULK, .reg_udccs = &UDCCS1, .reg_uddr = &UDDR1, }, .ep[2] = { .ep = { .name = "ep2out-bulk", .ops = &pxa25x_ep_ops, .maxpacket = BULK_FIFO_SIZE, }, .dev = &memory, .fifo_size = BULK_FIFO_SIZE, .bEndpointAddress = 2, .bmAttributes = USB_ENDPOINT_XFER_BULK, .reg_udccs = &UDCCS2, .reg_ubcr = &UBCR2, .reg_uddr = &UDDR2, }, #ifndef CONFIG_USB_PXA25X_SMALL .ep[3] = { .ep = { .name = "ep3in-iso", .ops = &pxa25x_ep_ops, .maxpacket = ISO_FIFO_SIZE, }, .dev = &memory, .fifo_size = ISO_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 3, .bmAttributes = USB_ENDPOINT_XFER_ISOC, .reg_udccs = &UDCCS3, .reg_uddr = &UDDR3, }, .ep[4] = { .ep = { .name = "ep4out-iso", .ops = &pxa25x_ep_ops, .maxpacket = ISO_FIFO_SIZE, }, .dev = &memory, .fifo_size = ISO_FIFO_SIZE, .bEndpointAddress = 4, .bmAttributes = USB_ENDPOINT_XFER_ISOC, .reg_udccs = &UDCCS4, .reg_ubcr = &UBCR4, .reg_uddr = &UDDR4, }, .ep[5] = { .ep = { .name = "ep5in-int", .ops = &pxa25x_ep_ops, .maxpacket = INT_FIFO_SIZE, }, .dev = &memory, .fifo_size = INT_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 5, .bmAttributes = USB_ENDPOINT_XFER_INT, .reg_udccs = &UDCCS5, .reg_uddr = &UDDR5, }, /* second group of endpoints */ .ep[6] = { .ep = { .name = "ep6in-bulk", .ops = &pxa25x_ep_ops, .maxpacket = BULK_FIFO_SIZE, }, .dev = &memory, .fifo_size = BULK_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 6, .bmAttributes = USB_ENDPOINT_XFER_BULK, .reg_udccs = &UDCCS6, .reg_uddr = &UDDR6, }, .ep[7] = { .ep = { .name = "ep7out-bulk", .ops = &pxa25x_ep_ops, .maxpacket = BULK_FIFO_SIZE, }, .dev = &memory, .fifo_size = BULK_FIFO_SIZE, .bEndpointAddress = 7, .bmAttributes = USB_ENDPOINT_XFER_BULK, .reg_udccs = &UDCCS7, .reg_ubcr = &UBCR7, .reg_uddr = &UDDR7, }, .ep[8] = { .ep = { .name = "ep8in-iso", .ops = &pxa25x_ep_ops, .maxpacket = ISO_FIFO_SIZE, }, .dev = &memory, .fifo_size = ISO_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 8, .bmAttributes = USB_ENDPOINT_XFER_ISOC, .reg_udccs = &UDCCS8, .reg_uddr = &UDDR8, }, .ep[9] = { .ep = { .name = "ep9out-iso", .ops = &pxa25x_ep_ops, .maxpacket = ISO_FIFO_SIZE, }, .dev = &memory, .fifo_size = ISO_FIFO_SIZE, .bEndpointAddress = 9, .bmAttributes = USB_ENDPOINT_XFER_ISOC, .reg_udccs = &UDCCS9, .reg_ubcr = &UBCR9, .reg_uddr = &UDDR9, }, .ep[10] = { .ep = { .name = "ep10in-int", .ops = &pxa25x_ep_ops, .maxpacket = INT_FIFO_SIZE, }, .dev = &memory, .fifo_size = INT_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 10, .bmAttributes = USB_ENDPOINT_XFER_INT, .reg_udccs = &UDCCS10, .reg_uddr = &UDDR10, }, /* third group of endpoints */ .ep[11] = { .ep = { .name = "ep11in-bulk", .ops = &pxa25x_ep_ops, .maxpacket = BULK_FIFO_SIZE, }, .dev = &memory, .fifo_size = BULK_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 11, .bmAttributes = USB_ENDPOINT_XFER_BULK, .reg_udccs = &UDCCS11, .reg_uddr = &UDDR11, }, .ep[12] = { .ep = { .name = "ep12out-bulk", .ops = &pxa25x_ep_ops, .maxpacket = BULK_FIFO_SIZE, }, .dev = &memory, .fifo_size = BULK_FIFO_SIZE, .bEndpointAddress = 12, .bmAttributes = USB_ENDPOINT_XFER_BULK, .reg_udccs = &UDCCS12, .reg_ubcr = &UBCR12, .reg_uddr = &UDDR12, }, .ep[13] = { .ep = { .name = "ep13in-iso", .ops = &pxa25x_ep_ops, .maxpacket = ISO_FIFO_SIZE, }, .dev = &memory, .fifo_size = ISO_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 13, .bmAttributes = USB_ENDPOINT_XFER_ISOC, .reg_udccs = &UDCCS13, .reg_uddr = &UDDR13, }, .ep[14] = { .ep = { .name = "ep14out-iso", .ops = &pxa25x_ep_ops, .maxpacket = ISO_FIFO_SIZE, }, .dev = &memory, .fifo_size = ISO_FIFO_SIZE, .bEndpointAddress = 14, .bmAttributes = USB_ENDPOINT_XFER_ISOC, .reg_udccs = &UDCCS14, .reg_ubcr = &UBCR14, .reg_uddr = &UDDR14, }, .ep[15] = { .ep = { .name = "ep15in-int", .ops = &pxa25x_ep_ops, .maxpacket = INT_FIFO_SIZE, }, .dev = &memory, .fifo_size = INT_FIFO_SIZE, .bEndpointAddress = USB_DIR_IN | 15, .bmAttributes = USB_ENDPOINT_XFER_INT, .reg_udccs = &UDCCS15, .reg_uddr = &UDDR15, }, #endif /* !CONFIG_USB_PXA25X_SMALL */ }; #define CP15R0_VENDOR_MASK 0xffffe000 #if defined(CONFIG_ARCH_PXA) #define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */ #elif defined(CONFIG_ARCH_IXP4XX) #define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp4xx */ #endif #define CP15R0_PROD_MASK 0x000003f0 #define PXA25x 0x00000100 /* and PXA26x */ #define PXA210 0x00000120 #define CP15R0_REV_MASK 0x0000000f #define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK) #define PXA255_A0 0x00000106 /* or PXA260_B1 */ #define PXA250_C0 0x00000105 /* or PXA26x_B0 */ #define PXA250_B2 0x00000104 #define PXA250_B1 0x00000103 /* or PXA260_A0 */ #define PXA250_B0 0x00000102 #define PXA250_A1 0x00000101 #define PXA250_A0 0x00000100 #define PXA210_C0 0x00000125 #define PXA210_B2 0x00000124 #define PXA210_B1 0x00000123 #define PXA210_B0 0x00000122 #define IXP425_A0 0x000001c1 #define IXP425_B0 0x000001f1 #define IXP465_AD 0x00000200 /* * probe - binds to the platform device */ static int __init pxa25x_udc_probe(struct platform_device *pdev) { struct pxa25x_udc *dev = &memory; int retval, irq; u32 chiprev; pr_info("%s: version %s\n", driver_name, DRIVER_VERSION); /* insist on Intel/ARM/XScale */ asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev)); if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) { pr_err("%s: not XScale!\n", driver_name); return -ENODEV; } /* trigger chiprev-specific logic */ switch (chiprev & CP15R0_PRODREV_MASK) { #if defined(CONFIG_ARCH_PXA) case PXA255_A0: dev->has_cfr = 1; break; case PXA250_A0: case PXA250_A1: /* A0/A1 "not released"; ep 13, 15 unusable */ /* fall through */ case PXA250_B2: case PXA210_B2: case PXA250_B1: case PXA210_B1: case PXA250_B0: case PXA210_B0: /* OUT-DMA is broken ... */ /* fall through */ case PXA250_C0: case PXA210_C0: break; #elif defined(CONFIG_ARCH_IXP4XX) case IXP425_A0: case IXP425_B0: case IXP465_AD: dev->has_cfr = 1; break; #endif default: pr_err("%s: unrecognized processor: %08x\n", driver_name, chiprev); /* iop3xx, ixp4xx, ... */ return -ENODEV; } irq = platform_get_irq(pdev, 0); if (irq < 0) return -ENODEV; dev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) { retval = PTR_ERR(dev->clk); goto err_clk; } pr_debug("%s: IRQ %d%s%s\n", driver_name, irq, dev->has_cfr ? "" : " (!cfr)", SIZE_STR "(pio)" ); /* other non-static parts of init */ dev->dev = &pdev->dev; dev->mach = pdev->dev.platform_data; dev->transceiver = usb_get_phy(USB_PHY_TYPE_USB2); if (gpio_is_valid(dev->mach->gpio_pullup)) { if ((retval = gpio_request(dev->mach->gpio_pullup, "pca25x_udc GPIO PULLUP"))) { dev_dbg(&pdev->dev, "can't get pullup gpio %d, err: %d\n", dev->mach->gpio_pullup, retval); goto err_gpio_pullup; } gpio_direction_output(dev->mach->gpio_pullup, 0); } init_timer(&dev->timer); dev->timer.function = udc_watchdog; dev->timer.data = (unsigned long) dev; the_controller = dev; platform_set_drvdata(pdev, dev); udc_disable(dev); udc_reinit(dev); dev->vbus = 0; /* irq setup after old hardware state is cleaned up */ retval = request_irq(irq, pxa25x_udc_irq, 0, driver_name, dev); if (retval != 0) { pr_err("%s: can't get irq %d, err %d\n", driver_name, irq, retval); goto err_irq1; } dev->got_irq = 1; #ifdef CONFIG_ARCH_LUBBOCK if (machine_is_lubbock()) { retval = request_irq(LUBBOCK_USB_DISC_IRQ, lubbock_vbus_irq, 0, driver_name, dev); if (retval != 0) { pr_err("%s: can't get irq %i, err %d\n", driver_name, LUBBOCK_USB_DISC_IRQ, retval); goto err_irq_lub; } retval = request_irq(LUBBOCK_USB_IRQ, lubbock_vbus_irq, 0, driver_name, dev); if (retval != 0) { pr_err("%s: can't get irq %i, err %d\n", driver_name, LUBBOCK_USB_IRQ, retval); goto lubbock_fail0; } } else #endif create_debug_files(dev); retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget); if (!retval) return retval; remove_debug_files(dev); #ifdef CONFIG_ARCH_LUBBOCK lubbock_fail0: free_irq(LUBBOCK_USB_DISC_IRQ, dev); err_irq_lub: free_irq(irq, dev); #endif err_irq1: if (gpio_is_valid(dev->mach->gpio_pullup)) gpio_free(dev->mach->gpio_pullup); err_gpio_pullup: if (!IS_ERR_OR_NULL(dev->transceiver)) { usb_put_phy(dev->transceiver); dev->transceiver = NULL; } clk_put(dev->clk); err_clk: return retval; } static void pxa25x_udc_shutdown(struct platform_device *_dev) { pullup_off(); } static int __exit pxa25x_udc_remove(struct platform_device *pdev) { struct pxa25x_udc *dev = platform_get_drvdata(pdev); if (dev->driver) return -EBUSY; usb_del_gadget_udc(&dev->gadget); dev->pullup = 0; pullup(dev); remove_debug_files(dev); if (dev->got_irq) { free_irq(platform_get_irq(pdev, 0), dev); dev->got_irq = 0; } #ifdef CONFIG_ARCH_LUBBOCK if (machine_is_lubbock()) { free_irq(LUBBOCK_USB_DISC_IRQ, dev); free_irq(LUBBOCK_USB_IRQ, dev); } #endif if (gpio_is_valid(dev->mach->gpio_pullup)) gpio_free(dev->mach->gpio_pullup); clk_put(dev->clk); if (!IS_ERR_OR_NULL(dev->transceiver)) { usb_put_phy(dev->transceiver); dev->transceiver = NULL; } the_controller = NULL; return 0; } /*-------------------------------------------------------------------------*/ #ifdef CONFIG_PM /* USB suspend (controlled by the host) and system suspend (controlled * by the PXA) don't necessarily work well together. If USB is active, * the 48 MHz clock is required; so the system can't enter 33 MHz idle * mode, or any deeper PM saving state. * * For now, we punt and forcibly disconnect from the USB host when PXA * enters any suspend state. While we're disconnected, we always disable * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states. * Boards without software pullup control shouldn't use those states. * VBUS IRQs should probably be ignored so that the PXA device just acts * "dead" to USB hosts until system resume. */ static int pxa25x_udc_suspend(struct platform_device *dev, pm_message_t state) { struct pxa25x_udc *udc = platform_get_drvdata(dev); unsigned long flags; if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command) WARNING("USB host won't detect disconnect!\n"); udc->suspended = 1; local_irq_save(flags); pullup(udc); local_irq_restore(flags); return 0; } static int pxa25x_udc_resume(struct platform_device *dev) { struct pxa25x_udc *udc = platform_get_drvdata(dev); unsigned long flags; udc->suspended = 0; local_irq_save(flags); pullup(udc); local_irq_restore(flags); return 0; } #else #define pxa25x_udc_suspend NULL #define pxa25x_udc_resume NULL #endif /*-------------------------------------------------------------------------*/ static struct platform_driver udc_driver = { .shutdown = pxa25x_udc_shutdown, .remove = __exit_p(pxa25x_udc_remove), .suspend = pxa25x_udc_suspend, .resume = pxa25x_udc_resume, .driver = { .owner = THIS_MODULE, .name = "pxa25x-udc", }, }; module_platform_driver_probe(udc_driver, pxa25x_udc_probe); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa25x-udc");
gpl-2.0
SebastianFM/HTC-One-X-Plus-kernel
drivers/net/ibm_newemac/tah.c
3184
4144
/* * drivers/net/ibm_newemac/tah.c * * Driver for PowerPC 4xx on-chip ethernet controller, TAH support. * * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * Based on the arch/ppc version of the driver: * * Copyright 2004 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <asm/io.h> #include "emac.h" #include "core.h" int __devinit tah_attach(struct platform_device *ofdev, int channel) { struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); mutex_lock(&dev->lock); /* Reset has been done at probe() time... nothing else to do for now */ ++dev->users; mutex_unlock(&dev->lock); return 0; } void tah_detach(struct platform_device *ofdev, int channel) { struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); mutex_lock(&dev->lock); --dev->users; mutex_unlock(&dev->lock); } void tah_reset(struct platform_device *ofdev) { struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); struct tah_regs __iomem *p = dev->base; int n; /* Reset TAH */ out_be32(&p->mr, TAH_MR_SR); n = 100; while ((in_be32(&p->mr) & TAH_MR_SR) && n) --n; if (unlikely(!n)) printk(KERN_ERR "%s: reset timeout\n", ofdev->dev.of_node->full_name); /* 10KB TAH TX FIFO accommodates the max MTU of 9000 */ out_be32(&p->mr, TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP | TAH_MR_DIG); } int tah_get_regs_len(struct platform_device *ofdev) { return sizeof(struct emac_ethtool_regs_subhdr) + sizeof(struct tah_regs); } void *tah_dump_regs(struct platform_device *ofdev, void *buf) { struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); struct emac_ethtool_regs_subhdr *hdr = buf; struct tah_regs *regs = (struct tah_regs *)(hdr + 1); hdr->version = 0; hdr->index = 0; /* for now, are there chips with more than one * zmii ? if yes, then we'll add a cell_index * like we do for emac */ memcpy_fromio(regs, dev->base, sizeof(struct tah_regs)); return regs + 1; } static int __devinit tah_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; struct tah_instance *dev; struct resource regs; int rc; rc = -ENOMEM; dev = kzalloc(sizeof(struct tah_instance), GFP_KERNEL); if (dev == NULL) { printk(KERN_ERR "%s: could not allocate TAH device!\n", np->full_name); goto err_gone; } mutex_init(&dev->lock); dev->ofdev = ofdev; rc = -ENXIO; if (of_address_to_resource(np, 0, &regs)) { printk(KERN_ERR "%s: Can't get registers address\n", np->full_name); goto err_free; } rc = -ENOMEM; dev->base = (struct tah_regs __iomem *)ioremap(regs.start, sizeof(struct tah_regs)); if (dev->base == NULL) { printk(KERN_ERR "%s: Can't map device registers!\n", np->full_name); goto err_free; } dev_set_drvdata(&ofdev->dev, dev); /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */ tah_reset(ofdev); printk(KERN_INFO "TAH %s initialized\n", ofdev->dev.of_node->full_name); wmb(); return 0; err_free: kfree(dev); err_gone: return rc; } static int __devexit tah_remove(struct platform_device *ofdev) { struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); dev_set_drvdata(&ofdev->dev, NULL); WARN_ON(dev->users != 0); iounmap(dev->base); kfree(dev); return 0; } static struct of_device_id tah_match[] = { { .compatible = "ibm,tah", }, /* For backward compat with old DT */ { .type = "tah", }, {}, }; static struct platform_driver tah_driver = { .driver = { .name = "emac-tah", .owner = THIS_MODULE, .of_match_table = tah_match, }, .probe = tah_probe, .remove = tah_remove, }; int __init tah_init(void) { return platform_driver_register(&tah_driver); } void tah_exit(void) { platform_driver_unregister(&tah_driver); }
gpl-2.0
garwynn/SMN900P_MI3_Kernel
drivers/net/phy/broadcom.c
4720
26788
/* * drivers/net/phy/broadcom.c * * Broadcom BCM5411, BCM5421 and BCM5461 Gigabit Ethernet * transceivers. * * Copyright (c) 2006 Maciej W. Rozycki * * Inspired by code written by Amy Fong. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/phy.h> #include <linux/brcmphy.h> #define BRCM_PHY_MODEL(phydev) \ ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask) #define BRCM_PHY_REV(phydev) \ ((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask)) #define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */ #define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */ #define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */ #define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */ #define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */ #define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */ #define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */ #define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */ #define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */ #define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */ #define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */ #define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */ #define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */ #define MII_BCM54XX_INT_LINK 0x0002 /* Link status changed */ #define MII_BCM54XX_INT_SPEED 0x0004 /* Link speed change */ #define MII_BCM54XX_INT_DUPLEX 0x0008 /* Duplex mode changed */ #define MII_BCM54XX_INT_LRS 0x0010 /* Local receiver status changed */ #define MII_BCM54XX_INT_RRS 0x0020 /* Remote receiver status changed */ #define MII_BCM54XX_INT_SSERR 0x0040 /* Scrambler synchronization error */ #define MII_BCM54XX_INT_UHCD 0x0080 /* Unsupported HCD negotiated */ #define MII_BCM54XX_INT_NHCD 0x0100 /* No HCD */ #define MII_BCM54XX_INT_NHCDL 0x0200 /* No HCD link */ #define MII_BCM54XX_INT_ANPR 0x0400 /* Auto-negotiation page received */ #define MII_BCM54XX_INT_LC 0x0800 /* All counters below 128 */ #define MII_BCM54XX_INT_HC 0x1000 /* Counter above 32768 */ #define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */ #define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */ #define MII_BCM54XX_SHD 0x1c /* 0x1c shadow registers */ #define MII_BCM54XX_SHD_WRITE 0x8000 #define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10) #define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0) /* * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18) */ #define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 #define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400 #define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 #define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 #define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 #define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007 #define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 /* * Broadcom LED source encodings. These are used in BCM5461, BCM5481, * BCM5482, and possibly some others. */ #define BCM_LED_SRC_LINKSPD1 0x0 #define BCM_LED_SRC_LINKSPD2 0x1 #define BCM_LED_SRC_XMITLED 0x2 #define BCM_LED_SRC_ACTIVITYLED 0x3 #define BCM_LED_SRC_FDXLED 0x4 #define BCM_LED_SRC_SLAVE 0x5 #define BCM_LED_SRC_INTR 0x6 #define BCM_LED_SRC_QUALITY 0x7 #define BCM_LED_SRC_RCVLED 0x8 #define BCM_LED_SRC_MULTICOLOR1 0xa #define BCM_LED_SRC_OPENSHORT 0xb #define BCM_LED_SRC_OFF 0xe /* Tied high */ #define BCM_LED_SRC_ON 0xf /* Tied low */ /* * BCM5482: Shadow registers * Shadow values go into bits [14:10] of register 0x1c to select a shadow * register to access. */ /* 00101: Spare Control Register 3 */ #define BCM54XX_SHD_SCR3 0x05 #define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 #define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002 #define BCM54XX_SHD_SCR3_TRDDAPD 0x0004 /* 01010: Auto Power-Down */ #define BCM54XX_SHD_APD 0x0a #define BCM54XX_SHD_APD_EN 0x0020 #define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ /* LED3 / ~LINKSPD[2] selector */ #define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4) /* LED1 / ~LINKSPD[1] selector */ #define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0) #define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */ #define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */ #define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */ #define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */ #define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */ #define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */ /* * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17) */ #define MII_BCM54XX_EXP_AADJ1CH0 0x001f #define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200 #define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100 #define MII_BCM54XX_EXP_AADJ1CH3 0x601f #define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002 #define MII_BCM54XX_EXP_EXP08 0x0F08 #define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001 #define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200 #define MII_BCM54XX_EXP_EXP75 0x0f75 #define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c #define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001 #define MII_BCM54XX_EXP_EXP96 0x0f96 #define MII_BCM54XX_EXP_EXP96_MYST 0x0010 #define MII_BCM54XX_EXP_EXP97 0x0f97 #define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c /* * BCM5482: Secondary SerDes registers */ #define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */ #define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */ #define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */ #define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ #define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ /*****************************************************************************/ /* Fast Ethernet Transceiver definitions. */ /*****************************************************************************/ #define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */ #define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */ #define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */ #define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */ #define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */ #define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */ #define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */ #define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */ /*** Shadow register definitions ***/ #define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */ #define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */ #define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */ #define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003 #define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001 #define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */ #define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */ MODULE_DESCRIPTION("Broadcom PHY driver"); MODULE_AUTHOR("Maciej W. Rozycki"); MODULE_LICENSE("GPL"); /* * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T * 0x1c shadow registers. */ static int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow) { phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow)); return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD)); } static int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow, u16 val) { return phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_WRITE | MII_BCM54XX_SHD_VAL(shadow) | MII_BCM54XX_SHD_DATA(val)); } /* Indirect register access functions for the Expansion Registers */ static int bcm54xx_exp_read(struct phy_device *phydev, u16 regnum) { int val; val = phy_write(phydev, MII_BCM54XX_EXP_SEL, regnum); if (val < 0) return val; val = phy_read(phydev, MII_BCM54XX_EXP_DATA); /* Restore default value. It's O.K. if this write fails. */ phy_write(phydev, MII_BCM54XX_EXP_SEL, 0); return val; } static int bcm54xx_exp_write(struct phy_device *phydev, u16 regnum, u16 val) { int ret; ret = phy_write(phydev, MII_BCM54XX_EXP_SEL, regnum); if (ret < 0) return ret; ret = phy_write(phydev, MII_BCM54XX_EXP_DATA, val); /* Restore default value. It's O.K. if this write fails. */ phy_write(phydev, MII_BCM54XX_EXP_SEL, 0); return ret; } static int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val) { return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val); } /* Needs SMDSP clock enabled via bcm54xx_phydsp_config() */ static int bcm50610_a0_workaround(struct phy_device *phydev) { int err; err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH0, MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN | MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF); if (err < 0) return err; err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH3, MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ); if (err < 0) return err; err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, MII_BCM54XX_EXP_EXP75_VDACCTRL); if (err < 0) return err; err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP96, MII_BCM54XX_EXP_EXP96_MYST); if (err < 0) return err; err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP97, MII_BCM54XX_EXP_EXP97_MYST); return err; } static int bcm54xx_phydsp_config(struct phy_device *phydev) { int err, err2; /* Enable the SMDSP clock */ err = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL, MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA | MII_BCM54XX_AUXCTL_ACTL_TX_6DB); if (err < 0) return err; if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 || BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) { /* Clear bit 9 to fix a phy interop issue. */ err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP08, MII_BCM54XX_EXP_EXP08_RJCT_2MHZ); if (err < 0) goto error; if (phydev->drv->phy_id == PHY_ID_BCM50610) { err = bcm50610_a0_workaround(phydev); if (err < 0) goto error; } } if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) { int val; val = bcm54xx_exp_read(phydev, MII_BCM54XX_EXP_EXP75); if (val < 0) goto error; val |= MII_BCM54XX_EXP_EXP75_CM_OSC; err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, val); } error: /* Disable the SMDSP clock */ err2 = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL, MII_BCM54XX_AUXCTL_ACTL_TX_6DB); /* Return the first error reported. */ return err ? err : err2; } static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev) { u32 orig; int val; bool clk125en = true; /* Abort if we are using an untested phy. */ if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 && BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 && BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M) return; val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_SCR3); if (val < 0) return; orig = val; if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 || BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) && BRCM_PHY_REV(phydev) >= 0x3) { /* * Here, bit 0 _disables_ CLK125 when set. * This bit is set by default. */ clk125en = false; } else { if (phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) { /* Here, bit 0 _enables_ CLK125 when set */ val &= ~BCM54XX_SHD_SCR3_DEF_CLK125; clk125en = false; } } if (!clk125en || (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE)) val &= ~BCM54XX_SHD_SCR3_DLLAPD_DIS; else val |= BCM54XX_SHD_SCR3_DLLAPD_DIS; if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) val |= BCM54XX_SHD_SCR3_TRDDAPD; if (orig != val) bcm54xx_shadow_write(phydev, BCM54XX_SHD_SCR3, val); val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_APD); if (val < 0) return; orig = val; if (!clk125en || (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE)) val |= BCM54XX_SHD_APD_EN; else val &= ~BCM54XX_SHD_APD_EN; if (orig != val) bcm54xx_shadow_write(phydev, BCM54XX_SHD_APD, val); } static int bcm54xx_config_init(struct phy_device *phydev) { int reg, err; reg = phy_read(phydev, MII_BCM54XX_ECR); if (reg < 0) return reg; /* Mask interrupts globally. */ reg |= MII_BCM54XX_ECR_IM; err = phy_write(phydev, MII_BCM54XX_ECR, reg); if (err < 0) return err; /* Unmask events we are interested in. */ reg = ~(MII_BCM54XX_INT_DUPLEX | MII_BCM54XX_INT_SPEED | MII_BCM54XX_INT_LINK); err = phy_write(phydev, MII_BCM54XX_IMR, reg); if (err < 0) return err; if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 || BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) && (phydev->dev_flags & PHY_BRCM_CLEAR_RGMII_MODE)) bcm54xx_shadow_write(phydev, BCM54XX_SHD_RGMII_MODE, 0); if ((phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) || (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) || (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE)) bcm54xx_adjust_rxrefclk(phydev); bcm54xx_phydsp_config(phydev); return 0; } static int bcm5482_config_init(struct phy_device *phydev) { int err, reg; err = bcm54xx_config_init(phydev); if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX) { /* * Enable secondary SerDes and its use as an LED source */ reg = bcm54xx_shadow_read(phydev, BCM5482_SHD_SSD); bcm54xx_shadow_write(phydev, BCM5482_SHD_SSD, reg | BCM5482_SHD_SSD_LEDM | BCM5482_SHD_SSD_EN); /* * Enable SGMII slave mode and auto-detection */ reg = BCM5482_SSD_SGMII_SLAVE | MII_BCM54XX_EXP_SEL_SSD; err = bcm54xx_exp_read(phydev, reg); if (err < 0) return err; err = bcm54xx_exp_write(phydev, reg, err | BCM5482_SSD_SGMII_SLAVE_EN | BCM5482_SSD_SGMII_SLAVE_AD); if (err < 0) return err; /* * Disable secondary SerDes powerdown */ reg = BCM5482_SSD_1000BX_CTL | MII_BCM54XX_EXP_SEL_SSD; err = bcm54xx_exp_read(phydev, reg); if (err < 0) return err; err = bcm54xx_exp_write(phydev, reg, err & ~BCM5482_SSD_1000BX_CTL_PWRDOWN); if (err < 0) return err; /* * Select 1000BASE-X register set (primary SerDes) */ reg = bcm54xx_shadow_read(phydev, BCM5482_SHD_MODE); bcm54xx_shadow_write(phydev, BCM5482_SHD_MODE, reg | BCM5482_SHD_MODE_1000BX); /* * LED1=ACTIVITYLED, LED3=LINKSPD[2] * (Use LED1 as secondary SerDes ACTIVITY LED) */ bcm54xx_shadow_write(phydev, BCM5482_SHD_LEDS1, BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_ACTIVITYLED) | BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_LINKSPD2)); /* * Auto-negotiation doesn't seem to work quite right * in this mode, so we disable it and force it to the * right speed/duplex setting. Only 'link status' * is important. */ phydev->autoneg = AUTONEG_DISABLE; phydev->speed = SPEED_1000; phydev->duplex = DUPLEX_FULL; } return err; } static int bcm5482_read_status(struct phy_device *phydev) { int err; err = genphy_read_status(phydev); if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX) { /* * Only link status matters for 1000Base-X mode, so force * 1000 Mbit/s full-duplex status */ if (phydev->link) { phydev->speed = SPEED_1000; phydev->duplex = DUPLEX_FULL; } } return err; } static int bcm54xx_ack_interrupt(struct phy_device *phydev) { int reg; /* Clear pending interrupts. */ reg = phy_read(phydev, MII_BCM54XX_ISR); if (reg < 0) return reg; return 0; } static int bcm54xx_config_intr(struct phy_device *phydev) { int reg, err; reg = phy_read(phydev, MII_BCM54XX_ECR); if (reg < 0) return reg; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) reg &= ~MII_BCM54XX_ECR_IM; else reg |= MII_BCM54XX_ECR_IM; err = phy_write(phydev, MII_BCM54XX_ECR, reg); return err; } static int bcm5481_config_aneg(struct phy_device *phydev) { int ret; /* Aneg firsly. */ ret = genphy_config_aneg(phydev); /* Then we can set up the delay. */ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { u16 reg; /* * There is no BCM5481 specification available, so down * here is everything we know about "register 0x18". This * at least helps BCM5481 to successfully receive packets * on MPC8360E-RDK board. Peter Barada <peterb@logicpd.com> * says: "This sets delay between the RXD and RXC signals * instead of using trace lengths to achieve timing". */ /* Set RDX clk delay. */ reg = 0x7 | (0x7 << 12); phy_write(phydev, 0x18, reg); reg = phy_read(phydev, 0x18); /* Set RDX-RXC skew. */ reg |= (1 << 8); /* Write bits 14:0. */ reg |= (1 << 15); phy_write(phydev, 0x18, reg); } return ret; } static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set) { int val; val = phy_read(phydev, reg); if (val < 0) return val; return phy_write(phydev, reg, val | set); } static int brcm_fet_config_init(struct phy_device *phydev) { int reg, err, err2, brcmtest; /* Reset the PHY to bring it to a known state. */ err = phy_write(phydev, MII_BMCR, BMCR_RESET); if (err < 0) return err; reg = phy_read(phydev, MII_BRCM_FET_INTREG); if (reg < 0) return reg; /* Unmask events we are interested in and mask interrupts globally. */ reg = MII_BRCM_FET_IR_DUPLEX_EN | MII_BRCM_FET_IR_SPEED_EN | MII_BRCM_FET_IR_LINK_EN | MII_BRCM_FET_IR_ENABLE | MII_BRCM_FET_IR_MASK; err = phy_write(phydev, MII_BRCM_FET_INTREG, reg); if (err < 0) return err; /* Enable shadow register access */ brcmtest = phy_read(phydev, MII_BRCM_FET_BRCMTEST); if (brcmtest < 0) return brcmtest; reg = brcmtest | MII_BRCM_FET_BT_SRE; err = phy_write(phydev, MII_BRCM_FET_BRCMTEST, reg); if (err < 0) return err; /* Set the LED mode */ reg = phy_read(phydev, MII_BRCM_FET_SHDW_AUXMODE4); if (reg < 0) { err = reg; goto done; } reg &= ~MII_BRCM_FET_SHDW_AM4_LED_MASK; reg |= MII_BRCM_FET_SHDW_AM4_LED_MODE1; err = phy_write(phydev, MII_BRCM_FET_SHDW_AUXMODE4, reg); if (err < 0) goto done; /* Enable auto MDIX */ err = brcm_phy_setbits(phydev, MII_BRCM_FET_SHDW_MISCCTRL, MII_BRCM_FET_SHDW_MC_FAME); if (err < 0) goto done; if (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE) { /* Enable auto power down */ err = brcm_phy_setbits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2, MII_BRCM_FET_SHDW_AS2_APDE); } done: /* Disable shadow register access */ err2 = phy_write(phydev, MII_BRCM_FET_BRCMTEST, brcmtest); if (!err) err = err2; return err; } static int brcm_fet_ack_interrupt(struct phy_device *phydev) { int reg; /* Clear pending interrupts. */ reg = phy_read(phydev, MII_BRCM_FET_INTREG); if (reg < 0) return reg; return 0; } static int brcm_fet_config_intr(struct phy_device *phydev) { int reg, err; reg = phy_read(phydev, MII_BRCM_FET_INTREG); if (reg < 0) return reg; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) reg &= ~MII_BRCM_FET_IR_MASK; else reg |= MII_BRCM_FET_IR_MASK; err = phy_write(phydev, MII_BRCM_FET_INTREG, reg); return err; } static struct phy_driver bcm5411_driver = { .phy_id = PHY_ID_BCM5411, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5411", .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = bcm54xx_ack_interrupt, .config_intr = bcm54xx_config_intr, .driver = { .owner = THIS_MODULE }, }; static struct phy_driver bcm5421_driver = { .phy_id = PHY_ID_BCM5421, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5421", .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = bcm54xx_ack_interrupt, .config_intr = bcm54xx_config_intr, .driver = { .owner = THIS_MODULE }, }; static struct phy_driver bcm5461_driver = { .phy_id = PHY_ID_BCM5461, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5461", .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = bcm54xx_ack_interrupt, .config_intr = bcm54xx_config_intr, .driver = { .owner = THIS_MODULE }, }; static struct phy_driver bcm5464_driver = { .phy_id = PHY_ID_BCM5464, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5464", .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = bcm54xx_ack_interrupt, .config_intr = bcm54xx_config_intr, .driver = { .owner = THIS_MODULE }, }; static struct phy_driver bcm5481_driver = { .phy_id = PHY_ID_BCM5481, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5481", .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .read_status = genphy_read_status, .ack_interrupt = bcm54xx_ack_interrupt, .config_intr = bcm54xx_config_intr, .driver = { .owner = THIS_MODULE }, }; static struct phy_driver bcm5482_driver = { .phy_id = PHY_ID_BCM5482, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5482", .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = bcm5482_config_init, .config_aneg = genphy_config_aneg, .read_status = bcm5482_read_status, .ack_interrupt = bcm54xx_ack_interrupt, .config_intr = bcm54xx_config_intr, .driver = { .owner = THIS_MODULE }, }; static struct phy_driver bcm50610_driver = { .phy_id = PHY_ID_BCM50610, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM50610", .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = bcm54xx_ack_interrupt, .config_intr = bcm54xx_config_intr, .driver = { .owner = THIS_MODULE }, }; static struct phy_driver bcm50610m_driver = { .phy_id = PHY_ID_BCM50610M, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM50610M", .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = bcm54xx_ack_interrupt, .config_intr = bcm54xx_config_intr, .driver = { .owner = THIS_MODULE }, }; static struct phy_driver bcm57780_driver = { .phy_id = PHY_ID_BCM57780, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM57780", .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = bcm54xx_ack_interrupt, .config_intr = bcm54xx_config_intr, .driver = { .owner = THIS_MODULE }, }; static struct phy_driver bcmac131_driver = { .phy_id = PHY_ID_BCMAC131, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCMAC131", .features = PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = brcm_fet_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, .driver = { .owner = THIS_MODULE }, }; static struct phy_driver bcm5241_driver = { .phy_id = PHY_ID_BCM5241, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5241", .features = PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = brcm_fet_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, .driver = { .owner = THIS_MODULE }, }; static int __init broadcom_init(void) { int ret; ret = phy_driver_register(&bcm5411_driver); if (ret) goto out_5411; ret = phy_driver_register(&bcm5421_driver); if (ret) goto out_5421; ret = phy_driver_register(&bcm5461_driver); if (ret) goto out_5461; ret = phy_driver_register(&bcm5464_driver); if (ret) goto out_5464; ret = phy_driver_register(&bcm5481_driver); if (ret) goto out_5481; ret = phy_driver_register(&bcm5482_driver); if (ret) goto out_5482; ret = phy_driver_register(&bcm50610_driver); if (ret) goto out_50610; ret = phy_driver_register(&bcm50610m_driver); if (ret) goto out_50610m; ret = phy_driver_register(&bcm57780_driver); if (ret) goto out_57780; ret = phy_driver_register(&bcmac131_driver); if (ret) goto out_ac131; ret = phy_driver_register(&bcm5241_driver); if (ret) goto out_5241; return ret; out_5241: phy_driver_unregister(&bcmac131_driver); out_ac131: phy_driver_unregister(&bcm57780_driver); out_57780: phy_driver_unregister(&bcm50610m_driver); out_50610m: phy_driver_unregister(&bcm50610_driver); out_50610: phy_driver_unregister(&bcm5482_driver); out_5482: phy_driver_unregister(&bcm5481_driver); out_5481: phy_driver_unregister(&bcm5464_driver); out_5464: phy_driver_unregister(&bcm5461_driver); out_5461: phy_driver_unregister(&bcm5421_driver); out_5421: phy_driver_unregister(&bcm5411_driver); out_5411: return ret; } static void __exit broadcom_exit(void) { phy_driver_unregister(&bcm5241_driver); phy_driver_unregister(&bcmac131_driver); phy_driver_unregister(&bcm57780_driver); phy_driver_unregister(&bcm50610m_driver); phy_driver_unregister(&bcm50610_driver); phy_driver_unregister(&bcm5482_driver); phy_driver_unregister(&bcm5481_driver); phy_driver_unregister(&bcm5464_driver); phy_driver_unregister(&bcm5461_driver); phy_driver_unregister(&bcm5421_driver); phy_driver_unregister(&bcm5411_driver); } module_init(broadcom_init); module_exit(broadcom_exit); static struct mdio_device_id __maybe_unused broadcom_tbl[] = { { PHY_ID_BCM5411, 0xfffffff0 }, { PHY_ID_BCM5421, 0xfffffff0 }, { PHY_ID_BCM5461, 0xfffffff0 }, { PHY_ID_BCM5464, 0xfffffff0 }, { PHY_ID_BCM5482, 0xfffffff0 }, { PHY_ID_BCM5482, 0xfffffff0 }, { PHY_ID_BCM50610, 0xfffffff0 }, { PHY_ID_BCM50610M, 0xfffffff0 }, { PHY_ID_BCM57780, 0xfffffff0 }, { PHY_ID_BCMAC131, 0xfffffff0 }, { PHY_ID_BCM5241, 0xfffffff0 }, { } }; MODULE_DEVICE_TABLE(mdio, broadcom_tbl);
gpl-2.0
Neves4/DatKernel
arch/x86/xen/suspend.c
10352
1740
#include <linux/types.h> #include <linux/clockchips.h> #include <xen/interface/xen.h> #include <xen/grant_table.h> #include <xen/events.h> #include <asm/xen/hypercall.h> #include <asm/xen/page.h> #include <asm/fixmap.h> #include "xen-ops.h" #include "mmu.h" void xen_arch_pre_suspend(void) { xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn); xen_start_info->console.domU.mfn = mfn_to_pfn(xen_start_info->console.domU.mfn); BUG_ON(!irqs_disabled()); HYPERVISOR_shared_info = &xen_dummy_shared_info; if (HYPERVISOR_update_va_mapping(fix_to_virt(FIX_PARAVIRT_BOOTMAP), __pte_ma(0), 0)) BUG(); } void xen_arch_hvm_post_suspend(int suspend_cancelled) { #ifdef CONFIG_XEN_PVHVM int cpu; xen_hvm_init_shared_info(); xen_callback_vector(); xen_unplug_emulated_devices(); if (xen_feature(XENFEAT_hvm_safe_pvclock)) { for_each_online_cpu(cpu) { xen_setup_runstate_info(cpu); } } #endif } void xen_arch_post_suspend(int suspend_cancelled) { xen_build_mfn_list_list(); xen_setup_shared_info(); if (suspend_cancelled) { xen_start_info->store_mfn = pfn_to_mfn(xen_start_info->store_mfn); xen_start_info->console.domU.mfn = pfn_to_mfn(xen_start_info->console.domU.mfn); } else { #ifdef CONFIG_SMP BUG_ON(xen_cpu_initialized_map == NULL); cpumask_copy(xen_cpu_initialized_map, cpu_online_mask); #endif xen_vcpu_restore(); } } static void xen_vcpu_notify_restore(void *data) { unsigned long reason = (unsigned long)data; /* Boot processor notified via generic timekeeping_resume() */ if ( smp_processor_id() == 0) return; clockevents_notify(reason, NULL); } void xen_arch_resume(void) { on_each_cpu(xen_vcpu_notify_restore, (void *)CLOCK_EVT_NOTIFY_RESUME, 1); }
gpl-2.0
MoKee/android_kernel_motorola_apq8084
scripts/kconfig/images.c
16496
6565
/* * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org> * Released under the terms of the GNU GPL v2.0. */ static const char *xpm_load[] = { "22 22 5 1", ". c None", "# c #000000", "c c #838100", "a c #ffff00", "b c #ffffff", "......................", "......................", "......................", "............####....#.", "...........#....##.##.", "..................###.", ".................####.", ".####...........#####.", "#abab##########.......", "#babababababab#.......", "#ababababababa#.......", "#babababababab#.......", "#ababab###############", "#babab##cccccccccccc##", "#abab##cccccccccccc##.", "#bab##cccccccccccc##..", "#ab##cccccccccccc##...", "#b##cccccccccccc##....", "###cccccccccccc##.....", "##cccccccccccc##......", "###############.......", "......................"}; static const char *xpm_save[] = { "22 22 5 1", ". c None", "# c #000000", "a c #838100", "b c #c5c2c5", "c c #cdb6d5", "......................", ".####################.", ".#aa#bbbbbbbbbbbb#bb#.", ".#aa#bbbbbbbbbbbb#bb#.", ".#aa#bbbbbbbbbcbb####.", ".#aa#bbbccbbbbbbb#aa#.", ".#aa#bbbccbbbbbbb#aa#.", ".#aa#bbbbbbbbbbbb#aa#.", ".#aa#bbbbbbbbbbbb#aa#.", ".#aa#bbbbbbbbbbbb#aa#.", ".#aa#bbbbbbbbbbbb#aa#.", ".#aaa############aaa#.", ".#aaaaaaaaaaaaaaaaaa#.", ".#aaaaaaaaaaaaaaaaaa#.", ".#aaa#############aa#.", ".#aaa#########bbb#aa#.", ".#aaa#########bbb#aa#.", ".#aaa#########bbb#aa#.", ".#aaa#########bbb#aa#.", ".#aaa#########bbb#aa#.", "..##################..", "......................"}; static const char *xpm_back[] = { "22 22 3 1", ". c None", "# c #000083", "a c #838183", "......................", "......................", "......................", "......................", "......................", "...........######a....", "..#......##########...", "..##...####......##a..", "..###.###.........##..", "..######..........##..", "..#####...........##..", "..######..........##..", "..#######.........##..", "..########.......##a..", "...............a###...", "...............###....", "......................", "......................", "......................", "......................", "......................", "......................"}; static const char *xpm_tree_view[] = { "22 22 2 1", ". c None", "# c #000000", "......................", "......................", "......#...............", "......#...............", "......#...............", "......#...............", "......#...............", "......########........", "......#...............", "......#...............", "......#...............", "......#...............", "......#...............", "......########........", "......#...............", "......#...............", "......#...............", "......#...............", "......#...............", "......########........", "......................", "......................"}; static const char *xpm_single_view[] = { "22 22 2 1", ". c None", "# c #000000", "......................", "......................", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "......................", "......................"}; static const char *xpm_split_view[] = { "22 22 2 1", ". c None", "# c #000000", "......................", "......................", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......................", "......................"}; static const char *xpm_symbol_no[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . . ", " . . ", " . . ", " . . ", " . . ", " . . ", " . . ", " .......... ", " "}; static const char *xpm_symbol_mod[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . . ", " . .. . ", " . .... . ", " . .... . ", " . .. . ", " . . ", " . . ", " .......... ", " "}; static const char *xpm_symbol_yes[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . . ", " . . . ", " . .. . ", " . . .. . ", " . .... . ", " . .. . ", " . . ", " .......... ", " "}; static const char *xpm_choice_no[] = { "12 12 2 1", " c white", ". c black", " ", " .... ", " .. .. ", " . . ", " . . ", " . . ", " . . ", " . . ", " . . ", " .. .. ", " .... ", " "}; static const char *xpm_choice_yes[] = { "12 12 2 1", " c white", ". c black", " ", " .... ", " .. .. ", " . . ", " . .. . ", " . .... . ", " . .... . ", " . .. . ", " . . ", " .. .. ", " .... ", " "}; static const char *xpm_menu[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . .. . ", " . .... . ", " . ...... . ", " . ...... . ", " . .... . ", " . .. . ", " . . ", " .......... ", " "}; static const char *xpm_menu_inv[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " .......... ", " .. ...... ", " .. .... ", " .. .. ", " .. .. ", " .. .... ", " .. ...... ", " .......... ", " .......... ", " "}; static const char *xpm_menuback[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . .. . ", " . .... . ", " . ...... . ", " . ...... . ", " . .... . ", " . .. . ", " . . ", " .......... ", " "}; static const char *xpm_void[] = { "12 12 2 1", " c white", ". c black", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "};
gpl-2.0
asis92/defroost-kernel
arch/arm/mach-msm/qdsp5v2/snddev_virtual.c
881
2816
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/err.h> #include <asm/uaccess.h> #include <mach/qdsp5v2/audio_dev_ctl.h> #include <mach/qdsp5v2/snddev_virtual.h> #include <mach/debug_mm.h> #include <linux/slab.h> static int snddev_virtual_open(struct msm_snddev_info *dev_info) { int rc = 0; if (!dev_info) rc = -EINVAL; return rc; } static int snddev_virtual_close(struct msm_snddev_info *dev_info) { int rc = 0; if (!dev_info) rc = -EINVAL; return rc; } static int snddev_virtual_set_freq(struct msm_snddev_info *dev_info, u32 rate) { int rc = 0; if (!dev_info) rc = -EINVAL; return rate; } static int snddev_virtual_probe(struct platform_device *pdev) { int rc = 0; struct snddev_virtual_data *pdata; struct msm_snddev_info *dev_info; if (!pdev || !pdev->dev.platform_data) { MM_ERR("Invalid caller\n"); rc = -EPERM; goto error; } pdata = pdev->dev.platform_data; dev_info = kmalloc(sizeof(struct msm_snddev_info), GFP_KERNEL); if (!dev_info) { rc = -ENOMEM; goto error; } dev_info->name = pdata->name; dev_info->copp_id = pdata->copp_id; dev_info->acdb_id = pdata->acdb_id; dev_info->private_data = (void *) NULL; dev_info->dev_ops.open = snddev_virtual_open; dev_info->dev_ops.close = snddev_virtual_close; dev_info->dev_ops.set_freq = snddev_virtual_set_freq; dev_info->capability = pdata->capability; dev_info->sample_rate = 8000; dev_info->opened = 0; dev_info->sessions = 0; msm_snddev_register(dev_info); error: return rc; } static int snddev_virtual_remove(struct platform_device *pdev) { return 0; } static struct platform_driver snddev_virtual_driver = { .probe = snddev_virtual_probe, .remove = snddev_virtual_remove, .driver = { .name = "snddev_virtual" } }; static int __init snddev_virtual_init(void) { int rc = 0; MM_DBG(" snddev_virtual_init \n"); rc = platform_driver_register(&snddev_virtual_driver); if (IS_ERR_VALUE(rc)) { MM_ERR("platform driver register failure\n"); return -ENODEV; } return 0; } static void __exit snddev_virtual_exit(void) { platform_driver_unregister(&snddev_virtual_driver); return; } module_init(snddev_virtual_init); module_exit(snddev_virtual_exit); MODULE_DESCRIPTION("Virtual Sound Device driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
verygreen/ti-omap-encore-kernel
arch/x86/kernel/x86_init.c
881
2543
/* * Copyright (C) 2009 Thomas Gleixner <tglx@linutronix.de> * * For licencing details see kernel-base/COPYING */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/module.h> #include <asm/bios_ebda.h> #include <asm/paravirt.h> #include <asm/pci_x86.h> #include <asm/mpspec.h> #include <asm/setup.h> #include <asm/apic.h> #include <asm/e820.h> #include <asm/time.h> #include <asm/irq.h> #include <asm/pat.h> #include <asm/tsc.h> #include <asm/iommu.h> void __cpuinit x86_init_noop(void) { } void __init x86_init_uint_noop(unsigned int unused) { } void __init x86_init_pgd_noop(pgd_t *unused) { } int __init iommu_init_noop(void) { return 0; } void iommu_shutdown_noop(void) { } /* * The platform setup functions are preset with the default functions * for standard PC hardware. */ struct x86_init_ops x86_init __initdata = { .resources = { .probe_roms = x86_init_noop, .reserve_resources = reserve_standard_io_resources, .memory_setup = default_machine_specific_memory_setup, }, .mpparse = { .mpc_record = x86_init_uint_noop, .setup_ioapic_ids = x86_init_noop, .mpc_apic_id = default_mpc_apic_id, .smp_read_mpc_oem = default_smp_read_mpc_oem, .mpc_oem_bus_info = default_mpc_oem_bus_info, .find_smp_config = default_find_smp_config, .get_smp_config = default_get_smp_config, }, .irqs = { .pre_vector_init = init_ISA_irqs, .intr_init = native_init_IRQ, .trap_init = x86_init_noop, }, .oem = { .arch_setup = x86_init_noop, .banner = default_banner, }, .paging = { .pagetable_setup_start = native_pagetable_setup_start, .pagetable_setup_done = native_pagetable_setup_done, }, .timers = { .setup_percpu_clockev = setup_boot_APIC_clock, .tsc_pre_init = x86_init_noop, .timer_init = hpet_time_init, }, .iommu = { .iommu_init = iommu_init_noop, }, .pci = { .init = x86_default_pci_init, .init_irq = x86_default_pci_init_irq, .fixup_irqs = x86_default_pci_fixup_irqs, }, }; struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { .setup_percpu_clockev = setup_secondary_APIC_clock, }; static void default_nmi_init(void) { }; static int default_i8042_detect(void) { return 1; }; struct x86_platform_ops x86_platform = { .calibrate_tsc = native_calibrate_tsc, .get_wallclock = mach_get_cmos_time, .set_wallclock = mach_set_rtc_mmss, .iommu_shutdown = iommu_shutdown_noop, .is_untracked_pat_range = is_ISA_range, .nmi_init = default_nmi_init, .i8042_detect = default_i8042_detect }; EXPORT_SYMBOL_GPL(x86_platform);
gpl-2.0
loansindi/linux
drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
881
3876
/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Christian König. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Christian König */ #include <linux/hdmi.h> #include <linux/gcd.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" static const struct amdgpu_afmt_acr amdgpu_afmt_predefined_acr[] = { /* 32kHz 44.1kHz 48kHz */ /* Clock N CTS N CTS N CTS */ { 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */ { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */ { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */ { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */ { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */ { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */ { 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */ { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */ { 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */ { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */ }; /* * calculate CTS and N values if they are not found in the table */ static void amdgpu_afmt_calc_cts(uint32_t clock, int *CTS, int *N, int freq) { int n, cts; unsigned long div, mul; /* Safe, but overly large values */ n = 128 * freq; cts = clock * 1000; /* Smallest valid fraction */ div = gcd(n, cts); n /= div; cts /= div; /* * The optimal N is 128*freq/1000. Calculate the closest larger * value that doesn't truncate any bits. */ mul = ((128*freq/1000) + (n-1))/n; n *= mul; cts *= mul; /* Check that we are in spec (not always possible) */ if (n < (128*freq/1500)) printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n"); if (n > (128*freq/300)) printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n"); *N = n; *CTS = cts; DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n", *N, *CTS, freq); } struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock) { struct amdgpu_afmt_acr res; u8 i; /* Precalculated values for common clocks */ for (i = 0; i < ARRAY_SIZE(amdgpu_afmt_predefined_acr); i++) { if (amdgpu_afmt_predefined_acr[i].clock == clock) return amdgpu_afmt_predefined_acr[i]; } /* And odd clocks get manually calculated */ amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000); amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100); amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000); return res; }
gpl-2.0
humberos/android_kernel_sony_msm8994
drivers/net/wireless/ti/wl1251/event.c
2417
4828
/* * This file is part of wl1251 * * Copyright (c) 1998-2007 Texas Instruments Incorporated * Copyright (C) 2008 Nokia Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include "wl1251.h" #include "reg.h" #include "io.h" #include "event.h" #include "ps.h" static int wl1251_event_scan_complete(struct wl1251 *wl, struct event_mailbox *mbox) { int ret = 0; wl1251_debug(DEBUG_EVENT, "status: 0x%x, channels: %d", mbox->scheduled_scan_status, mbox->scheduled_scan_channels); if (wl->scanning) { ieee80211_scan_completed(wl->hw, false); wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed"); wl->scanning = false; if (wl->hw->conf.flags & IEEE80211_CONF_IDLE) ret = wl1251_ps_set_mode(wl, STATION_IDLE); } return ret; } static void wl1251_event_mbox_dump(struct event_mailbox *mbox) { wl1251_debug(DEBUG_EVENT, "MBOX DUMP:"); wl1251_debug(DEBUG_EVENT, "\tvector: 0x%x", mbox->events_vector); wl1251_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask); } static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox) { int ret; u32 vector; wl1251_event_mbox_dump(mbox); vector = mbox->events_vector & ~(mbox->events_mask); wl1251_debug(DEBUG_EVENT, "vector: 0x%x", vector); if (vector & SCAN_COMPLETE_EVENT_ID) { ret = wl1251_event_scan_complete(wl, mbox); if (ret < 0) return ret; } if (vector & BSS_LOSE_EVENT_ID) { wl1251_debug(DEBUG_EVENT, "BSS_LOSE_EVENT"); if (wl->psm_requested && wl->station_mode != STATION_ACTIVE_MODE) { ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE); if (ret < 0) return ret; } } if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) { wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT"); /* indicate to the stack, that beacons have been lost */ ieee80211_beacon_loss(wl->vif); } if (vector & REGAINED_BSS_EVENT_ID) { if (wl->psm_requested) { ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE); if (ret < 0) return ret; } } if (wl->vif && wl->rssi_thold) { if (vector & ROAMING_TRIGGER_LOW_RSSI_EVENT_ID) { wl1251_debug(DEBUG_EVENT, "ROAMING_TRIGGER_LOW_RSSI_EVENT"); ieee80211_cqm_rssi_notify(wl->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, GFP_KERNEL); } if (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID) { wl1251_debug(DEBUG_EVENT, "ROAMING_TRIGGER_REGAINED_RSSI_EVENT"); ieee80211_cqm_rssi_notify(wl->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, GFP_KERNEL); } } return 0; } /* * Poll the mailbox event field until any of the bits in the mask is set or a * timeout occurs (WL1251_EVENT_TIMEOUT in msecs) */ int wl1251_event_wait(struct wl1251 *wl, u32 mask, int timeout_ms) { u32 events_vector, event; unsigned long timeout; timeout = jiffies + msecs_to_jiffies(timeout_ms); do { if (time_after(jiffies, timeout)) return -ETIMEDOUT; msleep(1); /* read from both event fields */ wl1251_mem_read(wl, wl->mbox_ptr[0], &events_vector, sizeof(events_vector)); event = events_vector & mask; wl1251_mem_read(wl, wl->mbox_ptr[1], &events_vector, sizeof(events_vector)); event |= events_vector & mask; } while (!event); return 0; } int wl1251_event_unmask(struct wl1251 *wl) { int ret; ret = wl1251_acx_event_mbox_mask(wl, ~(wl->event_mask)); if (ret < 0) return ret; return 0; } void wl1251_event_mbox_config(struct wl1251 *wl) { wl->mbox_ptr[0] = wl1251_reg_read32(wl, REG_EVENT_MAILBOX_PTR); wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); wl1251_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x", wl->mbox_ptr[0], wl->mbox_ptr[1]); } int wl1251_event_handle(struct wl1251 *wl, u8 mbox_num) { struct event_mailbox mbox; int ret; wl1251_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num); if (mbox_num > 1) return -EINVAL; /* first we read the mbox descriptor */ wl1251_mem_read(wl, wl->mbox_ptr[mbox_num], &mbox, sizeof(struct event_mailbox)); /* process the descriptor */ ret = wl1251_event_process(wl, &mbox); if (ret < 0) return ret; /* then we let the firmware know it can go on...*/ wl1251_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK); return 0; }
gpl-2.0
SOKP/kernel_mediatek_sprout
arch/powerpc/kernel/of_platform.c
2673
3030
/* * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * and Arnd Bergmann, IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #undef DEBUG #include <linux/string.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/export.h> #include <linux/mod_devicetable.h> #include <linux/pci.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/atomic.h> #include <asm/errno.h> #include <asm/topology.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <asm/eeh.h> #ifdef CONFIG_PPC_OF_PLATFORM_PCI /* The probing of PCI controllers from of_platform is currently * 64 bits only, mostly due to gratuitous differences between * the 32 and 64 bits PCI code on PowerPC and the 32 bits one * lacking some bits needed here. */ static int of_pci_phb_probe(struct platform_device *dev) { struct pci_controller *phb; /* Check if we can do that ... */ if (ppc_md.pci_setup_phb == NULL) return -ENODEV; pr_info("Setting up PCI bus %s\n", dev->dev.of_node->full_name); /* Alloc and setup PHB data structure */ phb = pcibios_alloc_controller(dev->dev.of_node); if (!phb) return -ENODEV; /* Setup parent in sysfs */ phb->parent = &dev->dev; /* Setup the PHB using arch provided callback */ if (ppc_md.pci_setup_phb(phb)) { pcibios_free_controller(phb); return -ENODEV; } /* Process "ranges" property */ pci_process_bridge_OF_ranges(phb, dev->dev.of_node, 0); /* Init pci_dn data structures */ pci_devs_phb_init_dynamic(phb); /* Create EEH devices for the PHB */ eeh_dev_phb_init_dynamic(phb); /* Register devices with EEH */ if (dev->dev.of_node->child) eeh_add_device_tree_early(dev->dev.of_node); /* Scan the bus */ pcibios_scan_phb(phb); if (phb->bus == NULL) return -ENXIO; /* Claim resources. This might need some rework as well depending * whether we are doing probe-only or not, like assigning unassigned * resources etc... */ pcibios_claim_one_bus(phb->bus); /* Finish EEH setup */ eeh_add_device_tree_late(phb->bus); /* Add probed PCI devices to the device model */ pci_bus_add_devices(phb->bus); /* sysfs files should only be added after devices are added */ eeh_add_sysfs_files(phb->bus); return 0; } static struct of_device_id of_pci_phb_ids[] = { { .type = "pci", }, { .type = "pcix", }, { .type = "pcie", }, { .type = "pciex", }, { .type = "ht", }, {} }; static struct platform_driver of_pci_phb_driver = { .probe = of_pci_phb_probe, .driver = { .name = "of-pci", .owner = THIS_MODULE, .of_match_table = of_pci_phb_ids, }, }; static __init int of_pci_phb_init(void) { return platform_driver_register(&of_pci_phb_driver); } device_initcall(of_pci_phb_init); #endif /* CONFIG_PPC_OF_PLATFORM_PCI */
gpl-2.0
MoKee/android_kernel_samsung_piranha
drivers/video/acornfb.c
2929
35822
/* * linux/drivers/video/acornfb.c * * Copyright (C) 1998-2001 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Frame buffer code for Acorn platforms * * NOTE: Most of the modes with X!=640 will disappear shortly. * NOTE: Startup setting of HS & VS polarity not supported. * (do we need to support it if we're coming up in 640x480?) * * FIXME: (things broken by the "new improved" FBCON API) * - Blanking 8bpp displays with VIDC */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/fb.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/gfp.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <asm/pgtable.h> #include "acornfb.h" /* * VIDC machines can't do 16 or 32BPP modes. */ #ifdef HAS_VIDC #undef FBCON_HAS_CFB16 #undef FBCON_HAS_CFB32 #endif /* * Default resolution. * NOTE that it has to be supported in the table towards * the end of this file. */ #define DEFAULT_XRES 640 #define DEFAULT_YRES 480 #define DEFAULT_BPP 4 /* * define this to debug the video mode selection */ #undef DEBUG_MODE_SELECTION /* * Translation from RISC OS monitor types to actual * HSYNC and VSYNC frequency ranges. These are * probably not right, but they're the best info I * have. Allow 1% either way on the nominal for TVs. */ #define NR_MONTYPES 6 static struct fb_monspecs monspecs[NR_MONTYPES] __devinitdata = { { /* TV */ .hfmin = 15469, .hfmax = 15781, .vfmin = 49, .vfmax = 51, }, { /* Multi Freq */ .hfmin = 0, .hfmax = 99999, .vfmin = 0, .vfmax = 199, }, { /* Hi-res mono */ .hfmin = 58608, .hfmax = 58608, .vfmin = 64, .vfmax = 64, }, { /* VGA */ .hfmin = 30000, .hfmax = 70000, .vfmin = 60, .vfmax = 60, }, { /* SVGA */ .hfmin = 30000, .hfmax = 70000, .vfmin = 56, .vfmax = 75, }, { .hfmin = 30000, .hfmax = 70000, .vfmin = 60, .vfmax = 60, } }; static struct fb_info fb_info; static struct acornfb_par current_par; static struct vidc_timing current_vidc; extern unsigned int vram_size; /* set by setup.c */ #ifdef HAS_VIDC #define MAX_SIZE 480*1024 /* CTL VIDC Actual * 24.000 0 8.000 * 25.175 0 8.392 * 36.000 0 12.000 * 24.000 1 12.000 * 25.175 1 12.588 * 24.000 2 16.000 * 25.175 2 16.783 * 36.000 1 18.000 * 24.000 3 24.000 * 36.000 2 24.000 * 25.175 3 25.175 * 36.000 3 36.000 */ struct pixclock { u_long min_clock; u_long max_clock; u_int vidc_ctl; u_int vid_ctl; }; static struct pixclock arc_clocks[] = { /* we allow +/-1% on these */ { 123750, 126250, VIDC_CTRL_DIV3, VID_CTL_24MHz }, /* 8.000MHz */ { 82500, 84167, VIDC_CTRL_DIV2, VID_CTL_24MHz }, /* 12.000MHz */ { 61875, 63125, VIDC_CTRL_DIV1_5, VID_CTL_24MHz }, /* 16.000MHz */ { 41250, 42083, VIDC_CTRL_DIV1, VID_CTL_24MHz }, /* 24.000MHz */ }; static struct pixclock * acornfb_valid_pixrate(struct fb_var_screeninfo *var) { u_long pixclock = var->pixclock; u_int i; if (!var->pixclock) return NULL; for (i = 0; i < ARRAY_SIZE(arc_clocks); i++) if (pixclock > arc_clocks[i].min_clock && pixclock < arc_clocks[i].max_clock) return arc_clocks + i; return NULL; } /* VIDC Rules: * hcr : must be even (interlace, hcr/2 must be even) * hswr : must be even * hdsr : must be odd * hder : must be odd * * vcr : must be odd * vswr : >= 1 * vdsr : >= 1 * vder : >= vdsr * if interlaced, then hcr/2 must be even */ static void acornfb_set_timing(struct fb_var_screeninfo *var) { struct pixclock *pclk; struct vidc_timing vidc; u_int horiz_correction; u_int sync_len, display_start, display_end, cycle; u_int is_interlaced; u_int vid_ctl, vidc_ctl; u_int bandwidth; memset(&vidc, 0, sizeof(vidc)); pclk = acornfb_valid_pixrate(var); vidc_ctl = pclk->vidc_ctl; vid_ctl = pclk->vid_ctl; bandwidth = var->pixclock * 8 / var->bits_per_pixel; /* 25.175, 4bpp = 79.444ns per byte, 317.776ns per word: fifo = 2,6 */ if (bandwidth > 143500) vidc_ctl |= VIDC_CTRL_FIFO_3_7; else if (bandwidth > 71750) vidc_ctl |= VIDC_CTRL_FIFO_2_6; else if (bandwidth > 35875) vidc_ctl |= VIDC_CTRL_FIFO_1_5; else vidc_ctl |= VIDC_CTRL_FIFO_0_4; switch (var->bits_per_pixel) { case 1: horiz_correction = 19; vidc_ctl |= VIDC_CTRL_1BPP; break; case 2: horiz_correction = 11; vidc_ctl |= VIDC_CTRL_2BPP; break; case 4: horiz_correction = 7; vidc_ctl |= VIDC_CTRL_4BPP; break; default: case 8: horiz_correction = 5; vidc_ctl |= VIDC_CTRL_8BPP; break; } if (var->sync & FB_SYNC_COMP_HIGH_ACT) /* should be FB_SYNC_COMP */ vidc_ctl |= VIDC_CTRL_CSYNC; else { if (!(var->sync & FB_SYNC_HOR_HIGH_ACT)) vid_ctl |= VID_CTL_HS_NHSYNC; if (!(var->sync & FB_SYNC_VERT_HIGH_ACT)) vid_ctl |= VID_CTL_VS_NVSYNC; } sync_len = var->hsync_len; display_start = sync_len + var->left_margin; display_end = display_start + var->xres; cycle = display_end + var->right_margin; /* if interlaced, then hcr/2 must be even */ is_interlaced = (var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED; if (is_interlaced) { vidc_ctl |= VIDC_CTRL_INTERLACE; if (cycle & 2) { cycle += 2; var->right_margin += 2; } } vidc.h_cycle = (cycle - 2) / 2; vidc.h_sync_width = (sync_len - 2) / 2; vidc.h_border_start = (display_start - 1) / 2; vidc.h_display_start = (display_start - horiz_correction) / 2; vidc.h_display_end = (display_end - horiz_correction) / 2; vidc.h_border_end = (display_end - 1) / 2; vidc.h_interlace = (vidc.h_cycle + 1) / 2; sync_len = var->vsync_len; display_start = sync_len + var->upper_margin; display_end = display_start + var->yres; cycle = display_end + var->lower_margin; if (is_interlaced) cycle = (cycle - 3) / 2; else cycle = cycle - 1; vidc.v_cycle = cycle; vidc.v_sync_width = sync_len - 1; vidc.v_border_start = display_start - 1; vidc.v_display_start = vidc.v_border_start; vidc.v_display_end = display_end - 1; vidc.v_border_end = vidc.v_display_end; if (machine_is_a5k()) __raw_writeb(vid_ctl, IOEB_VID_CTL); if (memcmp(&current_vidc, &vidc, sizeof(vidc))) { current_vidc = vidc; vidc_writel(0xe0000000 | vidc_ctl); vidc_writel(0x80000000 | (vidc.h_cycle << 14)); vidc_writel(0x84000000 | (vidc.h_sync_width << 14)); vidc_writel(0x88000000 | (vidc.h_border_start << 14)); vidc_writel(0x8c000000 | (vidc.h_display_start << 14)); vidc_writel(0x90000000 | (vidc.h_display_end << 14)); vidc_writel(0x94000000 | (vidc.h_border_end << 14)); vidc_writel(0x98000000); vidc_writel(0x9c000000 | (vidc.h_interlace << 14)); vidc_writel(0xa0000000 | (vidc.v_cycle << 14)); vidc_writel(0xa4000000 | (vidc.v_sync_width << 14)); vidc_writel(0xa8000000 | (vidc.v_border_start << 14)); vidc_writel(0xac000000 | (vidc.v_display_start << 14)); vidc_writel(0xb0000000 | (vidc.v_display_end << 14)); vidc_writel(0xb4000000 | (vidc.v_border_end << 14)); vidc_writel(0xb8000000); vidc_writel(0xbc000000); } #ifdef DEBUG_MODE_SELECTION printk(KERN_DEBUG "VIDC registers for %dx%dx%d:\n", var->xres, var->yres, var->bits_per_pixel); printk(KERN_DEBUG " H-cycle : %d\n", vidc.h_cycle); printk(KERN_DEBUG " H-sync-width : %d\n", vidc.h_sync_width); printk(KERN_DEBUG " H-border-start : %d\n", vidc.h_border_start); printk(KERN_DEBUG " H-display-start : %d\n", vidc.h_display_start); printk(KERN_DEBUG " H-display-end : %d\n", vidc.h_display_end); printk(KERN_DEBUG " H-border-end : %d\n", vidc.h_border_end); printk(KERN_DEBUG " H-interlace : %d\n", vidc.h_interlace); printk(KERN_DEBUG " V-cycle : %d\n", vidc.v_cycle); printk(KERN_DEBUG " V-sync-width : %d\n", vidc.v_sync_width); printk(KERN_DEBUG " V-border-start : %d\n", vidc.v_border_start); printk(KERN_DEBUG " V-display-start : %d\n", vidc.v_display_start); printk(KERN_DEBUG " V-display-end : %d\n", vidc.v_display_end); printk(KERN_DEBUG " V-border-end : %d\n", vidc.v_border_end); printk(KERN_DEBUG " VIDC Ctrl (E) : 0x%08X\n", vidc_ctl); printk(KERN_DEBUG " IOEB Ctrl : 0x%08X\n", vid_ctl); #endif } static int acornfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int trans, struct fb_info *info) { union palette pal; if (regno >= current_par.palette_size) return 1; pal.p = 0; pal.vidc.reg = regno; pal.vidc.red = red >> 12; pal.vidc.green = green >> 12; pal.vidc.blue = blue >> 12; current_par.palette[regno] = pal; vidc_writel(pal.p); return 0; } #endif #ifdef HAS_VIDC20 #include <mach/acornfb.h> #define MAX_SIZE 2*1024*1024 /* VIDC20 has a different set of rules from the VIDC: * hcr : must be multiple of 4 * hswr : must be even * hdsr : must be even * hder : must be even * vcr : >= 2, (interlace, must be odd) * vswr : >= 1 * vdsr : >= 1 * vder : >= vdsr */ static void acornfb_set_timing(struct fb_info *info) { struct fb_var_screeninfo *var = &info->var; struct vidc_timing vidc; u_int vcr, fsize; u_int ext_ctl, dat_ctl; u_int words_per_line; memset(&vidc, 0, sizeof(vidc)); vidc.h_sync_width = var->hsync_len - 8; vidc.h_border_start = vidc.h_sync_width + var->left_margin + 8 - 12; vidc.h_display_start = vidc.h_border_start + 12 - 18; vidc.h_display_end = vidc.h_display_start + var->xres; vidc.h_border_end = vidc.h_display_end + 18 - 12; vidc.h_cycle = vidc.h_border_end + var->right_margin + 12 - 8; vidc.h_interlace = vidc.h_cycle / 2; vidc.v_sync_width = var->vsync_len - 1; vidc.v_border_start = vidc.v_sync_width + var->upper_margin; vidc.v_display_start = vidc.v_border_start; vidc.v_display_end = vidc.v_display_start + var->yres; vidc.v_border_end = vidc.v_display_end; vidc.control = acornfb_default_control(); vcr = var->vsync_len + var->upper_margin + var->yres + var->lower_margin; if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) { vidc.v_cycle = (vcr - 3) / 2; vidc.control |= VIDC20_CTRL_INT; } else vidc.v_cycle = vcr - 2; switch (var->bits_per_pixel) { case 1: vidc.control |= VIDC20_CTRL_1BPP; break; case 2: vidc.control |= VIDC20_CTRL_2BPP; break; case 4: vidc.control |= VIDC20_CTRL_4BPP; break; default: case 8: vidc.control |= VIDC20_CTRL_8BPP; break; case 16: vidc.control |= VIDC20_CTRL_16BPP; break; case 32: vidc.control |= VIDC20_CTRL_32BPP; break; } acornfb_vidc20_find_rates(&vidc, var); fsize = var->vsync_len + var->upper_margin + var->lower_margin - 1; if (memcmp(&current_vidc, &vidc, sizeof(vidc))) { current_vidc = vidc; vidc_writel(VIDC20_CTRL| vidc.control); vidc_writel(0xd0000000 | vidc.pll_ctl); vidc_writel(0x80000000 | vidc.h_cycle); vidc_writel(0x81000000 | vidc.h_sync_width); vidc_writel(0x82000000 | vidc.h_border_start); vidc_writel(0x83000000 | vidc.h_display_start); vidc_writel(0x84000000 | vidc.h_display_end); vidc_writel(0x85000000 | vidc.h_border_end); vidc_writel(0x86000000); vidc_writel(0x87000000 | vidc.h_interlace); vidc_writel(0x90000000 | vidc.v_cycle); vidc_writel(0x91000000 | vidc.v_sync_width); vidc_writel(0x92000000 | vidc.v_border_start); vidc_writel(0x93000000 | vidc.v_display_start); vidc_writel(0x94000000 | vidc.v_display_end); vidc_writel(0x95000000 | vidc.v_border_end); vidc_writel(0x96000000); vidc_writel(0x97000000); } iomd_writel(fsize, IOMD_FSIZE); ext_ctl = acornfb_default_econtrol(); if (var->sync & FB_SYNC_COMP_HIGH_ACT) /* should be FB_SYNC_COMP */ ext_ctl |= VIDC20_ECTL_HS_NCSYNC | VIDC20_ECTL_VS_NCSYNC; else { if (var->sync & FB_SYNC_HOR_HIGH_ACT) ext_ctl |= VIDC20_ECTL_HS_HSYNC; else ext_ctl |= VIDC20_ECTL_HS_NHSYNC; if (var->sync & FB_SYNC_VERT_HIGH_ACT) ext_ctl |= VIDC20_ECTL_VS_VSYNC; else ext_ctl |= VIDC20_ECTL_VS_NVSYNC; } vidc_writel(VIDC20_ECTL | ext_ctl); words_per_line = var->xres * var->bits_per_pixel / 32; if (current_par.using_vram && info->fix.smem_len == 2048*1024) words_per_line /= 2; /* RiscPC doesn't use the VIDC's VRAM control. */ dat_ctl = VIDC20_DCTL_VRAM_DIS | VIDC20_DCTL_SNA | words_per_line; /* The data bus width is dependent on both the type * and amount of video memory. * DRAM 32bit low * 1MB VRAM 32bit * 2MB VRAM 64bit */ if (current_par.using_vram && current_par.vram_half_sam == 2048) dat_ctl |= VIDC20_DCTL_BUS_D63_0; else dat_ctl |= VIDC20_DCTL_BUS_D31_0; vidc_writel(VIDC20_DCTL | dat_ctl); #ifdef DEBUG_MODE_SELECTION printk(KERN_DEBUG "VIDC registers for %dx%dx%d:\n", var->xres, var->yres, var->bits_per_pixel); printk(KERN_DEBUG " H-cycle : %d\n", vidc.h_cycle); printk(KERN_DEBUG " H-sync-width : %d\n", vidc.h_sync_width); printk(KERN_DEBUG " H-border-start : %d\n", vidc.h_border_start); printk(KERN_DEBUG " H-display-start : %d\n", vidc.h_display_start); printk(KERN_DEBUG " H-display-end : %d\n", vidc.h_display_end); printk(KERN_DEBUG " H-border-end : %d\n", vidc.h_border_end); printk(KERN_DEBUG " H-interlace : %d\n", vidc.h_interlace); printk(KERN_DEBUG " V-cycle : %d\n", vidc.v_cycle); printk(KERN_DEBUG " V-sync-width : %d\n", vidc.v_sync_width); printk(KERN_DEBUG " V-border-start : %d\n", vidc.v_border_start); printk(KERN_DEBUG " V-display-start : %d\n", vidc.v_display_start); printk(KERN_DEBUG " V-display-end : %d\n", vidc.v_display_end); printk(KERN_DEBUG " V-border-end : %d\n", vidc.v_border_end); printk(KERN_DEBUG " Ext Ctrl (C) : 0x%08X\n", ext_ctl); printk(KERN_DEBUG " PLL Ctrl (D) : 0x%08X\n", vidc.pll_ctl); printk(KERN_DEBUG " Ctrl (E) : 0x%08X\n", vidc.control); printk(KERN_DEBUG " Data Ctrl (F) : 0x%08X\n", dat_ctl); printk(KERN_DEBUG " Fsize : 0x%08X\n", fsize); #endif } /* * We have to take note of the VIDC20's 16-bit palette here. * The VIDC20 looks up a 16 bit pixel as follows: * * bits 111111 * 5432109876543210 * red ++++++++ (8 bits, 7 to 0) * green ++++++++ (8 bits, 11 to 4) * blue ++++++++ (8 bits, 15 to 8) * * We use a pixel which looks like: * * bits 111111 * 5432109876543210 * red +++++ (5 bits, 4 to 0) * green +++++ (5 bits, 9 to 5) * blue +++++ (5 bits, 14 to 10) */ static int acornfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int trans, struct fb_info *info) { union palette pal; if (regno >= current_par.palette_size) return 1; if (regno < 16 && info->fix.visual == FB_VISUAL_DIRECTCOLOR) { u32 pseudo_val; pseudo_val = regno << info->var.red.offset; pseudo_val |= regno << info->var.green.offset; pseudo_val |= regno << info->var.blue.offset; ((u32 *)info->pseudo_palette)[regno] = pseudo_val; } pal.p = 0; pal.vidc20.red = red >> 8; pal.vidc20.green = green >> 8; pal.vidc20.blue = blue >> 8; current_par.palette[regno] = pal; if (info->var.bits_per_pixel == 16) { int i; pal.p = 0; vidc_writel(0x10000000); for (i = 0; i < 256; i += 1) { pal.vidc20.red = current_par.palette[ i & 31].vidc20.red; pal.vidc20.green = current_par.palette[(i >> 1) & 31].vidc20.green; pal.vidc20.blue = current_par.palette[(i >> 2) & 31].vidc20.blue; vidc_writel(pal.p); /* Palette register pointer auto-increments */ } } else { vidc_writel(0x10000000 | regno); vidc_writel(pal.p); } return 0; } #endif /* * Before selecting the timing parameters, adjust * the resolution to fit the rules. */ static int acornfb_adjust_timing(struct fb_info *info, struct fb_var_screeninfo *var, u_int fontht) { u_int font_line_len, sam_size, min_size, size, nr_y; /* xres must be even */ var->xres = (var->xres + 1) & ~1; /* * We don't allow xres_virtual to differ from xres */ var->xres_virtual = var->xres; var->xoffset = 0; if (current_par.using_vram) sam_size = current_par.vram_half_sam * 2; else sam_size = 16; /* * Now, find a value for yres_virtual which allows * us to do ywrap scrolling. The value of * yres_virtual must be such that the end of the * displayable frame buffer must be aligned with * the start of a font line. */ font_line_len = var->xres * var->bits_per_pixel * fontht / 8; min_size = var->xres * var->yres * var->bits_per_pixel / 8; /* * If minimum screen size is greater than that we have * available, reject it. */ if (min_size > info->fix.smem_len) return -EINVAL; /* Find int 'y', such that y * fll == s * sam < maxsize * y = s * sam / fll; s = maxsize / sam */ for (size = info->fix.smem_len; nr_y = size / font_line_len, min_size <= size; size -= sam_size) { if (nr_y * font_line_len == size) break; } nr_y *= fontht; if (var->accel_flags & FB_ACCELF_TEXT) { if (min_size > size) { /* * failed, use ypan */ size = info->fix.smem_len; var->yres_virtual = size / (font_line_len / fontht); } else var->yres_virtual = nr_y; } else if (var->yres_virtual > nr_y) var->yres_virtual = nr_y; current_par.screen_end = info->fix.smem_start + size; /* * Fix yres & yoffset if needed. */ if (var->yres > var->yres_virtual) var->yres = var->yres_virtual; if (var->vmode & FB_VMODE_YWRAP) { if (var->yoffset > var->yres_virtual) var->yoffset = var->yres_virtual; } else { if (var->yoffset + var->yres > var->yres_virtual) var->yoffset = var->yres_virtual - var->yres; } /* hsync_len must be even */ var->hsync_len = (var->hsync_len + 1) & ~1; #ifdef HAS_VIDC /* left_margin must be odd */ if ((var->left_margin & 1) == 0) { var->left_margin -= 1; var->right_margin += 1; } /* right_margin must be odd */ var->right_margin |= 1; #elif defined(HAS_VIDC20) /* left_margin must be even */ if (var->left_margin & 1) { var->left_margin += 1; var->right_margin -= 1; } /* right_margin must be even */ if (var->right_margin & 1) var->right_margin += 1; #endif if (var->vsync_len < 1) var->vsync_len = 1; return 0; } static int acornfb_validate_timing(struct fb_var_screeninfo *var, struct fb_monspecs *monspecs) { unsigned long hs, vs; /* * hs(Hz) = 10^12 / (pixclock * xtotal) * vs(Hz) = hs(Hz) / ytotal * * No need to do long long divisions or anything * like that if you factor it correctly */ hs = 1953125000 / var->pixclock; hs = hs * 512 / (var->xres + var->left_margin + var->right_margin + var->hsync_len); vs = hs / (var->yres + var->upper_margin + var->lower_margin + var->vsync_len); return (vs >= monspecs->vfmin && vs <= monspecs->vfmax && hs >= monspecs->hfmin && hs <= monspecs->hfmax) ? 0 : -EINVAL; } static inline void acornfb_update_dma(struct fb_info *info, struct fb_var_screeninfo *var) { u_int off = var->yoffset * info->fix.line_length; #if defined(HAS_MEMC) memc_write(VDMA_INIT, off >> 2); #elif defined(HAS_IOMD) iomd_writel(info->fix.smem_start + off, IOMD_VIDINIT); #endif } static int acornfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { u_int fontht; int err; /* * FIXME: Find the font height */ fontht = 8; var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; var->transp.msb_right = 0; switch (var->bits_per_pixel) { case 1: case 2: case 4: case 8: var->red.offset = 0; var->red.length = var->bits_per_pixel; var->green = var->red; var->blue = var->red; var->transp.offset = 0; var->transp.length = 0; break; #ifdef HAS_VIDC20 case 16: var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 10; var->blue.length = 5; var->transp.offset = 15; var->transp.length = 1; break; case 32: var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 4; break; #endif default: return -EINVAL; } /* * Check to see if the pixel rate is valid. */ if (!acornfb_valid_pixrate(var)) return -EINVAL; /* * Validate and adjust the resolution to * match the video generator hardware. */ err = acornfb_adjust_timing(info, var, fontht); if (err) return err; /* * Validate the timing against the * monitor hardware. */ return acornfb_validate_timing(var, &info->monspecs); } static int acornfb_set_par(struct fb_info *info) { switch (info->var.bits_per_pixel) { case 1: current_par.palette_size = 2; info->fix.visual = FB_VISUAL_MONO10; break; case 2: current_par.palette_size = 4; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; break; case 4: current_par.palette_size = 16; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; break; case 8: current_par.palette_size = VIDC_PALETTE_SIZE; #ifdef HAS_VIDC info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR; #else info->fix.visual = FB_VISUAL_PSEUDOCOLOR; #endif break; #ifdef HAS_VIDC20 case 16: current_par.palette_size = 32; info->fix.visual = FB_VISUAL_DIRECTCOLOR; break; case 32: current_par.palette_size = VIDC_PALETTE_SIZE; info->fix.visual = FB_VISUAL_DIRECTCOLOR; break; #endif default: BUG(); } info->fix.line_length = (info->var.xres * info->var.bits_per_pixel) / 8; #if defined(HAS_MEMC) { unsigned long size = info->fix.smem_len - VDMA_XFERSIZE; memc_write(VDMA_START, 0); memc_write(VDMA_END, size >> 2); } #elif defined(HAS_IOMD) { unsigned long start, size; u_int control; start = info->fix.smem_start; size = current_par.screen_end; if (current_par.using_vram) { size -= current_par.vram_half_sam; control = DMA_CR_E | (current_par.vram_half_sam / 256); } else { size -= 16; control = DMA_CR_E | DMA_CR_D | 16; } iomd_writel(start, IOMD_VIDSTART); iomd_writel(size, IOMD_VIDEND); iomd_writel(control, IOMD_VIDCR); } #endif acornfb_update_dma(info, &info->var); acornfb_set_timing(info); return 0; } static int acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { u_int y_bottom = var->yoffset; if (!(var->vmode & FB_VMODE_YWRAP)) y_bottom += var->yres; BUG_ON(y_bottom > var->yres_virtual); acornfb_update_dma(info, var); return 0; } static struct fb_ops acornfb_ops = { .owner = THIS_MODULE, .fb_check_var = acornfb_check_var, .fb_set_par = acornfb_set_par, .fb_setcolreg = acornfb_setcolreg, .fb_pan_display = acornfb_pan_display, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* * Everything after here is initialisation!!! */ static struct fb_videomode modedb[] __devinitdata = { { /* 320x256 @ 50Hz */ NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2, FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED }, { /* 640x250 @ 50Hz, 15.6 kHz hsync */ NULL, 50, 640, 250, 62500, 185, 123, 38, 21, 76, 3, 0, FB_VMODE_NONINTERLACED }, { /* 640x256 @ 50Hz, 15.6 kHz hsync */ NULL, 50, 640, 256, 62500, 185, 123, 35, 18, 76, 3, 0, FB_VMODE_NONINTERLACED }, { /* 640x512 @ 50Hz, 26.8 kHz hsync */ NULL, 50, 640, 512, 41667, 113, 87, 18, 1, 56, 3, 0, FB_VMODE_NONINTERLACED }, { /* 640x250 @ 70Hz, 31.5 kHz hsync */ NULL, 70, 640, 250, 39722, 48, 16, 109, 88, 96, 2, 0, FB_VMODE_NONINTERLACED }, { /* 640x256 @ 70Hz, 31.5 kHz hsync */ NULL, 70, 640, 256, 39722, 48, 16, 106, 85, 96, 2, 0, FB_VMODE_NONINTERLACED }, { /* 640x352 @ 70Hz, 31.5 kHz hsync */ NULL, 70, 640, 352, 39722, 48, 16, 58, 37, 96, 2, 0, FB_VMODE_NONINTERLACED }, { /* 640x480 @ 60Hz, 31.5 kHz hsync */ NULL, 60, 640, 480, 39722, 48, 16, 32, 11, 96, 2, 0, FB_VMODE_NONINTERLACED }, { /* 800x600 @ 56Hz, 35.2 kHz hsync */ NULL, 56, 800, 600, 27778, 101, 23, 22, 1, 100, 2, 0, FB_VMODE_NONINTERLACED }, { /* 896x352 @ 60Hz, 21.8 kHz hsync */ NULL, 60, 896, 352, 41667, 59, 27, 9, 0, 118, 3, 0, FB_VMODE_NONINTERLACED }, { /* 1024x 768 @ 60Hz, 48.4 kHz hsync */ NULL, 60, 1024, 768, 15385, 160, 24, 29, 3, 136, 6, 0, FB_VMODE_NONINTERLACED }, { /* 1280x1024 @ 60Hz, 63.8 kHz hsync */ NULL, 60, 1280, 1024, 9090, 186, 96, 38, 1, 160, 3, 0, FB_VMODE_NONINTERLACED } }; static struct fb_videomode acornfb_default_mode __devinitdata = { .name = NULL, .refresh = 60, .xres = 640, .yres = 480, .pixclock = 39722, .left_margin = 56, .right_margin = 16, .upper_margin = 34, .lower_margin = 9, .hsync_len = 88, .vsync_len = 2, .sync = 0, .vmode = FB_VMODE_NONINTERLACED }; static void __devinit acornfb_init_fbinfo(void) { static int first = 1; if (!first) return; first = 0; fb_info.fbops = &acornfb_ops; fb_info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; fb_info.pseudo_palette = current_par.pseudo_palette; strcpy(fb_info.fix.id, "Acorn"); fb_info.fix.type = FB_TYPE_PACKED_PIXELS; fb_info.fix.type_aux = 0; fb_info.fix.xpanstep = 0; fb_info.fix.ypanstep = 1; fb_info.fix.ywrapstep = 1; fb_info.fix.line_length = 0; fb_info.fix.accel = FB_ACCEL_NONE; /* * setup initial parameters */ memset(&fb_info.var, 0, sizeof(fb_info.var)); #if defined(HAS_VIDC20) fb_info.var.red.length = 8; fb_info.var.transp.length = 4; #elif defined(HAS_VIDC) fb_info.var.red.length = 4; fb_info.var.transp.length = 1; #endif fb_info.var.green = fb_info.var.red; fb_info.var.blue = fb_info.var.red; fb_info.var.nonstd = 0; fb_info.var.activate = FB_ACTIVATE_NOW; fb_info.var.height = -1; fb_info.var.width = -1; fb_info.var.vmode = FB_VMODE_NONINTERLACED; fb_info.var.accel_flags = FB_ACCELF_TEXT; current_par.dram_size = 0; current_par.montype = -1; current_par.dpms = 0; } /* * setup acornfb options: * * mon:hmin-hmax:vmin-vmax:dpms:width:height * Set monitor parameters: * hmin = horizontal minimum frequency (Hz) * hmax = horizontal maximum frequency (Hz) (optional) * vmin = vertical minimum frequency (Hz) * vmax = vertical maximum frequency (Hz) (optional) * dpms = DPMS supported? (optional) * width = width of picture in mm. (optional) * height = height of picture in mm. (optional) * * montype:type * Set RISC-OS style monitor type: * 0 (or tv) - TV frequency * 1 (or multi) - Multi frequency * 2 (or hires) - Hi-res monochrome * 3 (or vga) - VGA * 4 (or svga) - SVGA * auto, or option missing * - try hardware detect * * dram:size * Set the amount of DRAM to use for the frame buffer * (even if you have VRAM). * size can optionally be followed by 'M' or 'K' for * MB or KB respectively. */ static void __devinit acornfb_parse_mon(char *opt) { char *p = opt; current_par.montype = -2; fb_info.monspecs.hfmin = simple_strtoul(p, &p, 0); if (*p == '-') fb_info.monspecs.hfmax = simple_strtoul(p + 1, &p, 0); else fb_info.monspecs.hfmax = fb_info.monspecs.hfmin; if (*p != ':') goto bad; fb_info.monspecs.vfmin = simple_strtoul(p + 1, &p, 0); if (*p == '-') fb_info.monspecs.vfmax = simple_strtoul(p + 1, &p, 0); else fb_info.monspecs.vfmax = fb_info.monspecs.vfmin; if (*p != ':') goto check_values; fb_info.monspecs.dpms = simple_strtoul(p + 1, &p, 0); if (*p != ':') goto check_values; fb_info.var.width = simple_strtoul(p + 1, &p, 0); if (*p != ':') goto check_values; fb_info.var.height = simple_strtoul(p + 1, NULL, 0); check_values: if (fb_info.monspecs.hfmax < fb_info.monspecs.hfmin || fb_info.monspecs.vfmax < fb_info.monspecs.vfmin) goto bad; return; bad: printk(KERN_ERR "Acornfb: bad monitor settings: %s\n", opt); current_par.montype = -1; } static void __devinit acornfb_parse_montype(char *opt) { current_par.montype = -2; if (strncmp(opt, "tv", 2) == 0) { opt += 2; current_par.montype = 0; } else if (strncmp(opt, "multi", 5) == 0) { opt += 5; current_par.montype = 1; } else if (strncmp(opt, "hires", 5) == 0) { opt += 5; current_par.montype = 2; } else if (strncmp(opt, "vga", 3) == 0) { opt += 3; current_par.montype = 3; } else if (strncmp(opt, "svga", 4) == 0) { opt += 4; current_par.montype = 4; } else if (strncmp(opt, "auto", 4) == 0) { opt += 4; current_par.montype = -1; } else if (isdigit(*opt)) current_par.montype = simple_strtoul(opt, &opt, 0); if (current_par.montype == -2 || current_par.montype > NR_MONTYPES) { printk(KERN_ERR "acornfb: unknown monitor type: %s\n", opt); current_par.montype = -1; } else if (opt && *opt) { if (strcmp(opt, ",dpms") == 0) current_par.dpms = 1; else printk(KERN_ERR "acornfb: unknown monitor option: %s\n", opt); } } static void __devinit acornfb_parse_dram(char *opt) { unsigned int size; size = simple_strtoul(opt, &opt, 0); if (opt) { switch (*opt) { case 'M': case 'm': size *= 1024; case 'K': case 'k': size *= 1024; default: break; } } current_par.dram_size = size; } static struct options { char *name; void (*parse)(char *opt); } opt_table[] __devinitdata = { { "mon", acornfb_parse_mon }, { "montype", acornfb_parse_montype }, { "dram", acornfb_parse_dram }, { NULL, NULL } }; static int __devinit acornfb_setup(char *options) { struct options *optp; char *opt; if (!options || !*options) return 0; acornfb_init_fbinfo(); while ((opt = strsep(&options, ",")) != NULL) { if (!*opt) continue; for (optp = opt_table; optp->name; optp++) { int optlen; optlen = strlen(optp->name); if (strncmp(opt, optp->name, optlen) == 0 && opt[optlen] == ':') { optp->parse(opt + optlen + 1); break; } } if (!optp->name) printk(KERN_ERR "acornfb: unknown parameter: %s\n", opt); } return 0; } /* * Detect type of monitor connected * For now, we just assume SVGA */ static int __devinit acornfb_detect_monitortype(void) { return 4; } /* * This enables the unused memory to be freed on older Acorn machines. * We are freeing memory on behalf of the architecture initialisation * code here. */ static inline void free_unused_pages(unsigned int virtual_start, unsigned int virtual_end) { int mb_freed = 0; /* * Align addresses */ virtual_start = PAGE_ALIGN(virtual_start); virtual_end = PAGE_ALIGN(virtual_end); while (virtual_start < virtual_end) { struct page *page; /* * Clear page reserved bit, * set count to 1, and free * the page. */ page = virt_to_page(virtual_start); ClearPageReserved(page); init_page_count(page); free_page(virtual_start); virtual_start += PAGE_SIZE; mb_freed += PAGE_SIZE / 1024; } printk("acornfb: freed %dK memory\n", mb_freed); } static int __devinit acornfb_probe(struct platform_device *dev) { unsigned long size; u_int h_sync, v_sync; int rc, i; char *option = NULL; if (fb_get_options("acornfb", &option)) return -ENODEV; acornfb_setup(option); acornfb_init_fbinfo(); current_par.dev = &dev->dev; if (current_par.montype == -1) current_par.montype = acornfb_detect_monitortype(); if (current_par.montype == -1 || current_par.montype > NR_MONTYPES) current_par.montype = 4; if (current_par.montype >= 0) { fb_info.monspecs = monspecs[current_par.montype]; fb_info.monspecs.dpms = current_par.dpms; } /* * Try to select a suitable default mode */ for (i = 0; i < ARRAY_SIZE(modedb); i++) { unsigned long hs; hs = modedb[i].refresh * (modedb[i].yres + modedb[i].upper_margin + modedb[i].lower_margin + modedb[i].vsync_len); if (modedb[i].xres == DEFAULT_XRES && modedb[i].yres == DEFAULT_YRES && modedb[i].refresh >= fb_info.monspecs.vfmin && modedb[i].refresh <= fb_info.monspecs.vfmax && hs >= fb_info.monspecs.hfmin && hs <= fb_info.monspecs.hfmax) { acornfb_default_mode = modedb[i]; break; } } fb_info.screen_base = (char *)SCREEN_BASE; fb_info.fix.smem_start = SCREEN_START; current_par.using_vram = 0; /* * If vram_size is set, we are using VRAM in * a Risc PC. However, if the user has specified * an amount of DRAM then use that instead. */ if (vram_size && !current_par.dram_size) { size = vram_size; current_par.vram_half_sam = vram_size / 1024; current_par.using_vram = 1; } else if (current_par.dram_size) size = current_par.dram_size; else size = MAX_SIZE; /* * Limit maximum screen size. */ if (size > MAX_SIZE) size = MAX_SIZE; size = PAGE_ALIGN(size); #if defined(HAS_VIDC20) if (!current_par.using_vram) { dma_addr_t handle; void *base; /* * RiscPC needs to allocate the DRAM memory * for the framebuffer if we are not using * VRAM. */ base = dma_alloc_writecombine(current_par.dev, size, &handle, GFP_KERNEL); if (base == NULL) { printk(KERN_ERR "acornfb: unable to allocate screen " "memory\n"); return -ENOMEM; } fb_info.screen_base = base; fb_info.fix.smem_start = handle; } #endif #if defined(HAS_VIDC) /* * Archimedes/A5000 machines use a fixed address for their * framebuffers. Free unused pages */ free_unused_pages(PAGE_OFFSET + size, PAGE_OFFSET + MAX_SIZE); #endif fb_info.fix.smem_len = size; current_par.palette_size = VIDC_PALETTE_SIZE; /* * Lookup the timing for this resolution. If we can't * find it, then we can't restore it if we change * the resolution, so we disable this feature. */ do { rc = fb_find_mode(&fb_info.var, &fb_info, NULL, modedb, ARRAY_SIZE(modedb), &acornfb_default_mode, DEFAULT_BPP); /* * If we found an exact match, all ok. */ if (rc == 1) break; rc = fb_find_mode(&fb_info.var, &fb_info, NULL, NULL, 0, &acornfb_default_mode, DEFAULT_BPP); /* * If we found an exact match, all ok. */ if (rc == 1) break; rc = fb_find_mode(&fb_info.var, &fb_info, NULL, modedb, ARRAY_SIZE(modedb), &acornfb_default_mode, DEFAULT_BPP); if (rc) break; rc = fb_find_mode(&fb_info.var, &fb_info, NULL, NULL, 0, &acornfb_default_mode, DEFAULT_BPP); } while (0); /* * If we didn't find an exact match, try the * generic database. */ if (rc == 0) { printk("Acornfb: no valid mode found\n"); return -EINVAL; } h_sync = 1953125000 / fb_info.var.pixclock; h_sync = h_sync * 512 / (fb_info.var.xres + fb_info.var.left_margin + fb_info.var.right_margin + fb_info.var.hsync_len); v_sync = h_sync / (fb_info.var.yres + fb_info.var.upper_margin + fb_info.var.lower_margin + fb_info.var.vsync_len); printk(KERN_INFO "Acornfb: %dkB %cRAM, %s, using %dx%d, " "%d.%03dkHz, %dHz\n", fb_info.fix.smem_len / 1024, current_par.using_vram ? 'V' : 'D', VIDC_NAME, fb_info.var.xres, fb_info.var.yres, h_sync / 1000, h_sync % 1000, v_sync); printk(KERN_INFO "Acornfb: Monitor: %d.%03d-%d.%03dkHz, %d-%dHz%s\n", fb_info.monspecs.hfmin / 1000, fb_info.monspecs.hfmin % 1000, fb_info.monspecs.hfmax / 1000, fb_info.monspecs.hfmax % 1000, fb_info.monspecs.vfmin, fb_info.monspecs.vfmax, fb_info.monspecs.dpms ? ", DPMS" : ""); if (fb_set_var(&fb_info, &fb_info.var)) printk(KERN_ERR "Acornfb: unable to set display parameters\n"); if (register_framebuffer(&fb_info) < 0) return -EINVAL; return 0; } static struct platform_driver acornfb_driver = { .probe = acornfb_probe, .driver = { .name = "acornfb", }, }; static int __init acornfb_init(void) { return platform_driver_register(&acornfb_driver); } module_init(acornfb_init); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("VIDC 1/1a/20 framebuffer driver"); MODULE_LICENSE("GPL");
gpl-2.0
Nothing-Dev/MaxiKernel_condor
arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
4977
2854
/* * OMAP2/3 clockdomain common data * * Copyright (C) 2008-2011 Texas Instruments, Inc. * Copyright (C) 2008-2010 Nokia Corporation * * Paul Walmsley, Jouni Högander * * This file contains clockdomains and clockdomain wakeup/sleep * dependencies for the OMAP2/3 chips. Some notes: * * A useful validation rule for struct clockdomain: Any clockdomain * referenced by a wkdep_srcs or sleepdep_srcs array must have a * dep_bit assigned. So wkdep_srcs/sleepdep_srcs are really just * software-controllable dependencies. Non-software-controllable * dependencies do exist, but they are not encoded below (yet). * * 24xx does not support programmable sleep dependencies (SLEEPDEP) * * The overly-specific dep_bit names are due to a bit name collision * with CM_FCLKEN_{DSP,IVA2}. The DSP/IVA2 PM_WKDEP and CM_SLEEPDEP shift * value are the same for all powerdomains: 2 * * XXX should dep_bit be a mask, so we can test to see if it is 0 as a * sanity check? * XXX encode hardware fixed wakeup dependencies -- esp. for 3430 CORE */ /* * To-Do List * -> Port the Sleep/Wakeup dependencies for the domains * from the Power domain framework */ #include <linux/kernel.h> #include <linux/io.h> #include "clockdomain.h" #include "prm2xxx_3xxx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-24xx.h" #include "cm-regbits-34xx.h" #include "cm-regbits-44xx.h" #include "prm-regbits-24xx.h" #include "prm-regbits-34xx.h" /* * Clockdomain dependencies for wkdeps/sleepdeps * * XXX Hardware dependencies (e.g., dependencies that cannot be * changed in software) are not included here yet, but should be. */ /* Wakeup dependency source arrays */ /* 2xxx-specific possible dependencies */ /* 2xxx PM_WKDEP_GFX: CORE, MPU, WKUP */ struct clkdm_dep gfx_24xx_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* 2xxx PM_WKDEP_DSP: CORE, MPU, WKUP */ struct clkdm_dep dsp_24xx_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* * OMAP2/3-common clockdomains * * Even though the 2420 has a single PRCM module from the * interconnect's perspective, internally it does appear to have * separate PRM and CM clockdomains. The usual test case is * sys_clkout/sys_clkout2. */ /* This is an implicit clockdomain - it is never defined as such in TRM */ struct clockdomain wkup_common_clkdm = { .name = "wkup_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .dep_bit = OMAP_EN_WKUP_SHIFT, }; struct clockdomain prm_common_clkdm = { .name = "prm_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, }; struct clockdomain cm_common_clkdm = { .name = "cm_clkdm", .pwrdm = { .name = "core_pwrdm" }, };
gpl-2.0
omnirom/android_kernel_oppo_find5
arch/xtensa/kernel/syscall.c
8561
1502
/* * arch/xtensa/kernel/syscall.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2000 Silicon Graphics, Inc. * Copyright (C) 1995 - 2000 by Ralf Baechle * * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> * Chris Zankel <chris@zankel.net> * Kevin Chea * */ #include <asm/uaccess.h> #include <asm/syscall.h> #include <asm/unistd.h> #include <linux/linkage.h> #include <linux/stringify.h> #include <linux/errno.h> #include <linux/syscalls.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/mman.h> #include <linux/shm.h> typedef void (*syscall_t)(void); syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= { [0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall, #undef __SYSCALL #define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol, #undef _XTENSA_UNISTD_H #undef __KERNEL_SYSCALLS__ #include <asm/unistd.h> }; asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) { unsigned long ret; long err; err = do_shmat(shmid, shmaddr, shmflg, &ret); if (err) return err; return (long)ret; } asmlinkage long xtensa_fadvise64_64(int fd, int advice, unsigned long long offset, unsigned long long len) { return sys_fadvise64_64(fd, offset, len, advice); }
gpl-2.0
sdotter/GPE-5.1.0
arch/mn10300/lib/negdi2.c
13937
1821
/* More subroutines needed by GCC output code on some machines. */ /* Compile this one with gcc. */ /* Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc. This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public Licence as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public Licence, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public Licence restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public Licence for more details. You should have received a copy of the GNU General Public Licence along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* It is incorrect to include config.h here, because this file is being compiled for the target, and hence definitions concerning only the host do not apply. */ #include <linux/types.h> union DWunion { s64 ll; struct { s32 low; s32 high; } s; }; s64 __negdi2(s64 u) { union DWunion w; union DWunion uu; uu.ll = u; w.s.low = -uu.s.low; w.s.high = -uu.s.high - ((u32) w.s.low > 0); return w.ll; }
gpl-2.0
iodak/mako-msm
arch/mn10300/lib/ashrdi3.c
13937
1645
/* ashrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */ /* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public Licence as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public Licence for more details. You should have received a copy of the GNU General Public Licence along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define BITS_PER_UNIT 8 typedef int SItype __attribute__((mode(SI))); typedef unsigned int USItype __attribute__((mode(SI))); typedef int DItype __attribute__((mode(DI))); typedef int word_type __attribute__((mode(__word__))); struct DIstruct { SItype low; SItype high; }; union DIunion { struct DIstruct s; DItype ll; }; DItype __ashrdi3(DItype u, word_type b) { union DIunion w; union DIunion uu; word_type bm; if (b == 0) return u; uu.ll = u; bm = (sizeof(SItype) * BITS_PER_UNIT) - b; if (bm <= 0) { /* w.s.high = 1..1 or 0..0 */ w.s.high = uu.s.high >> (sizeof(SItype) * BITS_PER_UNIT - 1); w.s.low = uu.s.high >> -bm; } else { USItype carries = (USItype)uu.s.high << bm; w.s.high = uu.s.high >> b; w.s.low = ((USItype)uu.s.low >> b) | carries; } return w.ll; }
gpl-2.0
jderrick/linux-block
arch/mn10300/lib/lshrdi3.c
13937
1587
/* lshrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */ /* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public Licence as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public Licence for more details. You should have received a copy of the GNU General Public Licence along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define BITS_PER_UNIT 8 typedef int SItype __attribute__((mode(SI))); typedef unsigned int USItype __attribute__((mode(SI))); typedef int DItype __attribute__((mode(DI))); typedef int word_type __attribute__((mode(__word__))); struct DIstruct { SItype low; SItype high; }; union DIunion { struct DIstruct s; DItype ll; }; DItype __lshrdi3(DItype u, word_type b) { union DIunion w; word_type bm; union DIunion uu; if (b == 0) return u; uu.ll = u; bm = (sizeof(SItype) * BITS_PER_UNIT) - b; if (bm <= 0) { w.s.high = 0; w.s.low = (USItype) uu.s.high >> -bm; } else { USItype carries = (USItype) uu.s.high << bm; w.s.high = (USItype) uu.s.high >> b; w.s.low = ((USItype) uu.s.low >> b) | carries; } return w.ll; }
gpl-2.0
MoKee/android_kernel_sony_msm8974pro
arch/mn10300/lib/lshrdi3.c
13937
1587
/* lshrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */ /* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public Licence as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public Licence for more details. You should have received a copy of the GNU General Public Licence along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define BITS_PER_UNIT 8 typedef int SItype __attribute__((mode(SI))); typedef unsigned int USItype __attribute__((mode(SI))); typedef int DItype __attribute__((mode(DI))); typedef int word_type __attribute__((mode(__word__))); struct DIstruct { SItype low; SItype high; }; union DIunion { struct DIstruct s; DItype ll; }; DItype __lshrdi3(DItype u, word_type b) { union DIunion w; word_type bm; union DIunion uu; if (b == 0) return u; uu.ll = u; bm = (sizeof(SItype) * BITS_PER_UNIT) - b; if (bm <= 0) { w.s.high = 0; w.s.low = (USItype) uu.s.high >> -bm; } else { USItype carries = (USItype) uu.s.high << bm; w.s.high = (USItype) uu.s.high >> b; w.s.low = ((USItype) uu.s.low >> b) | carries; } return w.ll; }
gpl-2.0
lucatib/a33_linux
sound/core/hwdep_compat.c
14961
2352
/* * 32bit -> 64bit ioctl wrapper for hwdep API * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* This file is included from hwdep.c */ #include <linux/compat.h> struct snd_hwdep_dsp_image32 { u32 index; unsigned char name[64]; u32 image; /* pointer */ u32 length; u32 driver_data; } /* don't set packed attribute here */; static int snd_hwdep_dsp_load_compat(struct snd_hwdep *hw, struct snd_hwdep_dsp_image32 __user *src) { struct snd_hwdep_dsp_image __user *dst; compat_caddr_t ptr; u32 val; dst = compat_alloc_user_space(sizeof(*dst)); /* index and name */ if (copy_in_user(dst, src, 4 + 64)) return -EFAULT; if (get_user(ptr, &src->image) || put_user(compat_ptr(ptr), &dst->image)) return -EFAULT; if (get_user(val, &src->length) || put_user(val, &dst->length)) return -EFAULT; if (get_user(val, &src->driver_data) || put_user(val, &dst->driver_data)) return -EFAULT; return snd_hwdep_dsp_load(hw, dst); } enum { SNDRV_HWDEP_IOCTL_DSP_LOAD32 = _IOW('H', 0x03, struct snd_hwdep_dsp_image32) }; static long snd_hwdep_ioctl_compat(struct file * file, unsigned int cmd, unsigned long arg) { struct snd_hwdep *hw = file->private_data; void __user *argp = compat_ptr(arg); switch (cmd) { case SNDRV_HWDEP_IOCTL_PVERSION: case SNDRV_HWDEP_IOCTL_INFO: case SNDRV_HWDEP_IOCTL_DSP_STATUS: return snd_hwdep_ioctl(file, cmd, (unsigned long)argp); case SNDRV_HWDEP_IOCTL_DSP_LOAD32: return snd_hwdep_dsp_load_compat(hw, argp); } if (hw->ops.ioctl_compat) return hw->ops.ioctl_compat(hw, file, cmd, arg); return -ENOIOCTLCMD; }
gpl-2.0
BPI-SINOVOIP/BPI-Mainline-kernel
linux-4.14/arch/arm/mach-bcm/platsmp.c
114
8309
/* * Copyright (C) 2014-2015 Broadcom Corporation * Copyright 2014 Linaro Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/cpumask.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/io.h> #include <linux/jiffies.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include <asm/smp.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> /* Size of mapped Cortex A9 SCU address space */ #define CORTEX_A9_SCU_SIZE 0x58 #define SECONDARY_TIMEOUT_NS NSEC_PER_MSEC /* 1 msec (in nanoseconds) */ #define BOOT_ADDR_CPUID_MASK 0x3 /* Name of device node property defining secondary boot register location */ #define OF_SECONDARY_BOOT "secondary-boot-reg" #define MPIDR_CPUID_BITMASK 0x3 /* * Enable the Cortex A9 Snoop Control Unit * * By the time this is called we already know there are multiple * cores present. We assume we're running on a Cortex A9 processor, * so any trouble getting the base address register or getting the * SCU base is a problem. * * Return 0 if successful or an error code otherwise. */ static int __init scu_a9_enable(void) { unsigned long config_base; void __iomem *scu_base; if (!scu_a9_has_base()) { pr_err("no configuration base address register!\n"); return -ENXIO; } /* Config base address register value is zero for uniprocessor */ config_base = scu_a9_get_base(); if (!config_base) { pr_err("hardware reports only one core\n"); return -ENOENT; } scu_base = ioremap((phys_addr_t)config_base, CORTEX_A9_SCU_SIZE); if (!scu_base) { pr_err("failed to remap config base (%lu/%u) for SCU\n", config_base, CORTEX_A9_SCU_SIZE); return -ENOMEM; } scu_enable(scu_base); iounmap(scu_base); /* That's the last we'll need of this */ return 0; } static u32 secondary_boot_addr_for(unsigned int cpu) { u32 secondary_boot_addr = 0; struct device_node *cpu_node = of_get_cpu_node(cpu, NULL); if (!cpu_node) { pr_err("Failed to find device tree node for CPU%u\n", cpu); return 0; } if (of_property_read_u32(cpu_node, OF_SECONDARY_BOOT, &secondary_boot_addr)) pr_err("required secondary boot register not specified for CPU%u\n", cpu); of_node_put(cpu_node); return secondary_boot_addr; } static int nsp_write_lut(unsigned int cpu) { void __iomem *sku_rom_lut; phys_addr_t secondary_startup_phy; const u32 secondary_boot_addr = secondary_boot_addr_for(cpu); if (!secondary_boot_addr) return -EINVAL; sku_rom_lut = ioremap_nocache((phys_addr_t)secondary_boot_addr, sizeof(phys_addr_t)); if (!sku_rom_lut) { pr_warn("unable to ioremap SKU-ROM LUT register for cpu %u\n", cpu); return -ENOMEM; } secondary_startup_phy = __pa_symbol(secondary_startup); BUG_ON(secondary_startup_phy > (phys_addr_t)U32_MAX); writel_relaxed(secondary_startup_phy, sku_rom_lut); /* Ensure the write is visible to the secondary core */ smp_wmb(); iounmap(sku_rom_lut); return 0; } static void __init bcm_smp_prepare_cpus(unsigned int max_cpus) { const cpumask_t only_cpu_0 = { CPU_BITS_CPU0 }; /* Enable the SCU on Cortex A9 based SoCs */ if (scu_a9_enable()) { /* Update the CPU present map to reflect uniprocessor mode */ pr_warn("failed to enable A9 SCU - disabling SMP\n"); init_cpu_present(&only_cpu_0); } } /* * The ROM code has the secondary cores looping, waiting for an event. * When an event occurs each core examines the bottom two bits of the * secondary boot register. When a core finds those bits contain its * own core id, it performs initialization, including computing its boot * address by clearing the boot register value's bottom two bits. The * core signals that it is beginning its execution by writing its boot * address back to the secondary boot register, and finally jumps to * that address. * * So to start a core executing we need to: * - Encode the (hardware) CPU id with the bottom bits of the secondary * start address. * - Write that value into the secondary boot register. * - Generate an event to wake up the secondary CPU(s). * - Wait for the secondary boot register to be re-written, which * indicates the secondary core has started. */ static int kona_boot_secondary(unsigned int cpu, struct task_struct *idle) { void __iomem *boot_reg; phys_addr_t boot_func; u64 start_clock; u32 cpu_id; u32 boot_val; bool timeout = false; const u32 secondary_boot_addr = secondary_boot_addr_for(cpu); cpu_id = cpu_logical_map(cpu); if (cpu_id & ~BOOT_ADDR_CPUID_MASK) { pr_err("bad cpu id (%u > %u)\n", cpu_id, BOOT_ADDR_CPUID_MASK); return -EINVAL; } if (!secondary_boot_addr) return -EINVAL; boot_reg = ioremap_nocache((phys_addr_t)secondary_boot_addr, sizeof(phys_addr_t)); if (!boot_reg) { pr_err("unable to map boot register for cpu %u\n", cpu_id); return -ENOMEM; } /* * Secondary cores will start in secondary_startup(), * defined in "arch/arm/kernel/head.S" */ boot_func = __pa_symbol(secondary_startup); BUG_ON(boot_func & BOOT_ADDR_CPUID_MASK); BUG_ON(boot_func > (phys_addr_t)U32_MAX); /* The core to start is encoded in the low bits */ boot_val = (u32)boot_func | cpu_id; writel_relaxed(boot_val, boot_reg); sev(); /* The low bits will be cleared once the core has started */ start_clock = local_clock(); while (!timeout && readl_relaxed(boot_reg) == boot_val) timeout = local_clock() - start_clock > SECONDARY_TIMEOUT_NS; iounmap(boot_reg); if (!timeout) return 0; pr_err("timeout waiting for cpu %u to start\n", cpu_id); return -ENXIO; } /* Cluster Dormant Control command to bring CPU into a running state */ #define CDC_CMD 6 #define CDC_CMD_OFFSET 0 #define CDC_CMD_REG(cpu) (CDC_CMD_OFFSET + 4*(cpu)) /* * BCM23550 has a Cluster Dormant Control block that keeps the core in * idle state. A command needs to be sent to the block to bring the CPU * into running state. */ static int bcm23550_boot_secondary(unsigned int cpu, struct task_struct *idle) { void __iomem *cdc_base; struct device_node *dn; char *name; int ret; /* Make sure a CDC node exists before booting the * secondary core. */ name = "brcm,bcm23550-cdc"; dn = of_find_compatible_node(NULL, NULL, name); if (!dn) { pr_err("unable to find cdc node\n"); return -ENODEV; } cdc_base = of_iomap(dn, 0); of_node_put(dn); if (!cdc_base) { pr_err("unable to remap cdc base register\n"); return -ENOMEM; } /* Boot the secondary core */ ret = kona_boot_secondary(cpu, idle); if (ret) goto out; /* Bring this CPU to RUN state so that nIRQ nFIQ * signals are unblocked. */ writel_relaxed(CDC_CMD, cdc_base + CDC_CMD_REG(cpu)); out: iounmap(cdc_base); return ret; } static int nsp_boot_secondary(unsigned int cpu, struct task_struct *idle) { int ret; /* * After wake up, secondary core branches to the startup * address programmed at SKU ROM LUT location. */ ret = nsp_write_lut(cpu); if (ret) { pr_err("unable to write startup addr to SKU ROM LUT\n"); goto out; } /* Send a CPU wakeup interrupt to the secondary core */ arch_send_wakeup_ipi_mask(cpumask_of(cpu)); out: return ret; } static const struct smp_operations kona_smp_ops __initconst = { .smp_prepare_cpus = bcm_smp_prepare_cpus, .smp_boot_secondary = kona_boot_secondary, }; CPU_METHOD_OF_DECLARE(bcm_smp_bcm281xx, "brcm,bcm11351-cpu-method", &kona_smp_ops); static const struct smp_operations bcm23550_smp_ops __initconst = { .smp_boot_secondary = bcm23550_boot_secondary, }; CPU_METHOD_OF_DECLARE(bcm_smp_bcm23550, "brcm,bcm23550", &bcm23550_smp_ops); static const struct smp_operations nsp_smp_ops __initconst = { .smp_prepare_cpus = bcm_smp_prepare_cpus, .smp_boot_secondary = nsp_boot_secondary, }; CPU_METHOD_OF_DECLARE(bcm_smp_nsp, "brcm,bcm-nsp-smp", &nsp_smp_ops);
gpl-2.0
acassis/linux_kernel_ssd1935
sound/pci/echoaudio/darla20_dsp.c
114
3473
/*************************************************************************** Copyright Echo Digital Audio Corporation (c) 1998 - 2004 All rights reserved www.echoaudio.com This file is part of Echo Digital Audio's generic driver library. Echo Digital Audio's generic driver library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> ****************************************************************************/ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id) { int err; DE_INIT(("init_hw() - Darla20\n")); snd_assert((subdevice_id & 0xfff0) == DARLA20, return -ENODEV); if ((err = init_dsp_comm_page(chip))) { DE_INIT(("init_hw - could not initialize DSP comm page\n")); return err; } chip->device_id = device_id; chip->subdevice_id = subdevice_id; chip->bad_board = TRUE; chip->dsp_code_to_load = &card_fw[FW_DARLA20_DSP]; chip->spdif_status = GD_SPDIF_STATUS_UNDEF; chip->clock_state = GD_CLOCK_UNDEF; /* Since this card has no ASIC, mark it as loaded so everything works OK */ chip->asic_loaded = TRUE; chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL; if ((err = load_firmware(chip)) < 0) return err; chip->bad_board = FALSE; if ((err = init_line_levels(chip)) < 0) return err; DE_INIT(("init_hw done\n")); return err; } /* The Darla20 has no external clock sources */ static u32 detect_input_clocks(const struct echoaudio *chip) { return ECHO_CLOCK_BIT_INTERNAL; } /* The Darla20 has no ASIC. Just do nothing */ static int load_asic(struct echoaudio *chip) { return 0; } static int set_sample_rate(struct echoaudio *chip, u32 rate) { u8 clock_state, spdif_status; if (wait_handshake(chip)) return -EIO; switch (rate) { case 44100: clock_state = GD_CLOCK_44; spdif_status = GD_SPDIF_STATUS_44; break; case 48000: clock_state = GD_CLOCK_48; spdif_status = GD_SPDIF_STATUS_48; break; default: clock_state = GD_CLOCK_NOCHANGE; spdif_status = GD_SPDIF_STATUS_NOCHANGE; break; } if (chip->clock_state == clock_state) clock_state = GD_CLOCK_NOCHANGE; if (spdif_status == chip->spdif_status) spdif_status = GD_SPDIF_STATUS_NOCHANGE; chip->comm_page->sample_rate = cpu_to_le32(rate); chip->comm_page->gd_clock_state = clock_state; chip->comm_page->gd_spdif_status = spdif_status; chip->comm_page->gd_resampler_state = 3; /* magic number - should always be 3 */ /* Save the new audio state if it changed */ if (clock_state != GD_CLOCK_NOCHANGE) chip->clock_state = clock_state; if (spdif_status != GD_SPDIF_STATUS_NOCHANGE) chip->spdif_status = spdif_status; chip->sample_rate = rate; clear_handshake(chip); return send_vector(chip, DSP_VC_SET_GD_AUDIO_STATE); }
gpl-2.0
parc-wifi/kernel
drivers/pinctrl/pinctrl-imx28.c
114
11316
/* * Copyright 2012 Freescale Semiconductor, Inc. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/init.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> #include "pinctrl-mxs.h" enum imx28_pin_enum { GPMI_D00 = PINID(0, 0), GPMI_D01 = PINID(0, 1), GPMI_D02 = PINID(0, 2), GPMI_D03 = PINID(0, 3), GPMI_D04 = PINID(0, 4), GPMI_D05 = PINID(0, 5), GPMI_D06 = PINID(0, 6), GPMI_D07 = PINID(0, 7), GPMI_CE0N = PINID(0, 16), GPMI_CE1N = PINID(0, 17), GPMI_CE2N = PINID(0, 18), GPMI_CE3N = PINID(0, 19), GPMI_RDY0 = PINID(0, 20), GPMI_RDY1 = PINID(0, 21), GPMI_RDY2 = PINID(0, 22), GPMI_RDY3 = PINID(0, 23), GPMI_RDN = PINID(0, 24), GPMI_WRN = PINID(0, 25), GPMI_ALE = PINID(0, 26), GPMI_CLE = PINID(0, 27), GPMI_RESETN = PINID(0, 28), LCD_D00 = PINID(1, 0), LCD_D01 = PINID(1, 1), LCD_D02 = PINID(1, 2), LCD_D03 = PINID(1, 3), LCD_D04 = PINID(1, 4), LCD_D05 = PINID(1, 5), LCD_D06 = PINID(1, 6), LCD_D07 = PINID(1, 7), LCD_D08 = PINID(1, 8), LCD_D09 = PINID(1, 9), LCD_D10 = PINID(1, 10), LCD_D11 = PINID(1, 11), LCD_D12 = PINID(1, 12), LCD_D13 = PINID(1, 13), LCD_D14 = PINID(1, 14), LCD_D15 = PINID(1, 15), LCD_D16 = PINID(1, 16), LCD_D17 = PINID(1, 17), LCD_D18 = PINID(1, 18), LCD_D19 = PINID(1, 19), LCD_D20 = PINID(1, 20), LCD_D21 = PINID(1, 21), LCD_D22 = PINID(1, 22), LCD_D23 = PINID(1, 23), LCD_RD_E = PINID(1, 24), LCD_WR_RWN = PINID(1, 25), LCD_RS = PINID(1, 26), LCD_CS = PINID(1, 27), LCD_VSYNC = PINID(1, 28), LCD_HSYNC = PINID(1, 29), LCD_DOTCLK = PINID(1, 30), LCD_ENABLE = PINID(1, 31), SSP0_DATA0 = PINID(2, 0), SSP0_DATA1 = PINID(2, 1), SSP0_DATA2 = PINID(2, 2), SSP0_DATA3 = PINID(2, 3), SSP0_DATA4 = PINID(2, 4), SSP0_DATA5 = PINID(2, 5), SSP0_DATA6 = PINID(2, 6), SSP0_DATA7 = PINID(2, 7), SSP0_CMD = PINID(2, 8), SSP0_DETECT = PINID(2, 9), SSP0_SCK = PINID(2, 10), SSP1_SCK = PINID(2, 12), SSP1_CMD = PINID(2, 13), SSP1_DATA0 = PINID(2, 14), SSP1_DATA3 = PINID(2, 15), SSP2_SCK = PINID(2, 16), SSP2_MOSI = PINID(2, 17), SSP2_MISO = PINID(2, 18), SSP2_SS0 = PINID(2, 19), SSP2_SS1 = PINID(2, 20), SSP2_SS2 = PINID(2, 21), SSP3_SCK = PINID(2, 24), SSP3_MOSI = PINID(2, 25), SSP3_MISO = PINID(2, 26), SSP3_SS0 = PINID(2, 27), AUART0_RX = PINID(3, 0), AUART0_TX = PINID(3, 1), AUART0_CTS = PINID(3, 2), AUART0_RTS = PINID(3, 3), AUART1_RX = PINID(3, 4), AUART1_TX = PINID(3, 5), AUART1_CTS = PINID(3, 6), AUART1_RTS = PINID(3, 7), AUART2_RX = PINID(3, 8), AUART2_TX = PINID(3, 9), AUART2_CTS = PINID(3, 10), AUART2_RTS = PINID(3, 11), AUART3_RX = PINID(3, 12), AUART3_TX = PINID(3, 13), AUART3_CTS = PINID(3, 14), AUART3_RTS = PINID(3, 15), PWM0 = PINID(3, 16), PWM1 = PINID(3, 17), PWM2 = PINID(3, 18), SAIF0_MCLK = PINID(3, 20), SAIF0_LRCLK = PINID(3, 21), SAIF0_BITCLK = PINID(3, 22), SAIF0_SDATA0 = PINID(3, 23), I2C0_SCL = PINID(3, 24), I2C0_SDA = PINID(3, 25), SAIF1_SDATA0 = PINID(3, 26), SPDIF = PINID(3, 27), PWM3 = PINID(3, 28), PWM4 = PINID(3, 29), LCD_RESET = PINID(3, 30), ENET0_MDC = PINID(4, 0), ENET0_MDIO = PINID(4, 1), ENET0_RX_EN = PINID(4, 2), ENET0_RXD0 = PINID(4, 3), ENET0_RXD1 = PINID(4, 4), ENET0_TX_CLK = PINID(4, 5), ENET0_TX_EN = PINID(4, 6), ENET0_TXD0 = PINID(4, 7), ENET0_TXD1 = PINID(4, 8), ENET0_RXD2 = PINID(4, 9), ENET0_RXD3 = PINID(4, 10), ENET0_TXD2 = PINID(4, 11), ENET0_TXD3 = PINID(4, 12), ENET0_RX_CLK = PINID(4, 13), ENET0_COL = PINID(4, 14), ENET0_CRS = PINID(4, 15), ENET_CLK = PINID(4, 16), JTAG_RTCK = PINID(4, 20), EMI_D00 = PINID(5, 0), EMI_D01 = PINID(5, 1), EMI_D02 = PINID(5, 2), EMI_D03 = PINID(5, 3), EMI_D04 = PINID(5, 4), EMI_D05 = PINID(5, 5), EMI_D06 = PINID(5, 6), EMI_D07 = PINID(5, 7), EMI_D08 = PINID(5, 8), EMI_D09 = PINID(5, 9), EMI_D10 = PINID(5, 10), EMI_D11 = PINID(5, 11), EMI_D12 = PINID(5, 12), EMI_D13 = PINID(5, 13), EMI_D14 = PINID(5, 14), EMI_D15 = PINID(5, 15), EMI_ODT0 = PINID(5, 16), EMI_DQM0 = PINID(5, 17), EMI_ODT1 = PINID(5, 18), EMI_DQM1 = PINID(5, 19), EMI_DDR_OPEN_FB = PINID(5, 20), EMI_CLK = PINID(5, 21), EMI_DQS0 = PINID(5, 22), EMI_DQS1 = PINID(5, 23), EMI_DDR_OPEN = PINID(5, 26), EMI_A00 = PINID(6, 0), EMI_A01 = PINID(6, 1), EMI_A02 = PINID(6, 2), EMI_A03 = PINID(6, 3), EMI_A04 = PINID(6, 4), EMI_A05 = PINID(6, 5), EMI_A06 = PINID(6, 6), EMI_A07 = PINID(6, 7), EMI_A08 = PINID(6, 8), EMI_A09 = PINID(6, 9), EMI_A10 = PINID(6, 10), EMI_A11 = PINID(6, 11), EMI_A12 = PINID(6, 12), EMI_A13 = PINID(6, 13), EMI_A14 = PINID(6, 14), EMI_BA0 = PINID(6, 16), EMI_BA1 = PINID(6, 17), EMI_BA2 = PINID(6, 18), EMI_CASN = PINID(6, 19), EMI_RASN = PINID(6, 20), EMI_WEN = PINID(6, 21), EMI_CE0N = PINID(6, 22), EMI_CE1N = PINID(6, 23), EMI_CKE = PINID(6, 24), }; static const struct pinctrl_pin_desc imx28_pins[] = { MXS_PINCTRL_PIN(GPMI_D00), MXS_PINCTRL_PIN(GPMI_D01), MXS_PINCTRL_PIN(GPMI_D02), MXS_PINCTRL_PIN(GPMI_D03), MXS_PINCTRL_PIN(GPMI_D04), MXS_PINCTRL_PIN(GPMI_D05), MXS_PINCTRL_PIN(GPMI_D06), MXS_PINCTRL_PIN(GPMI_D07), MXS_PINCTRL_PIN(GPMI_CE0N), MXS_PINCTRL_PIN(GPMI_CE1N), MXS_PINCTRL_PIN(GPMI_CE2N), MXS_PINCTRL_PIN(GPMI_CE3N), MXS_PINCTRL_PIN(GPMI_RDY0), MXS_PINCTRL_PIN(GPMI_RDY1), MXS_PINCTRL_PIN(GPMI_RDY2), MXS_PINCTRL_PIN(GPMI_RDY3), MXS_PINCTRL_PIN(GPMI_RDN), MXS_PINCTRL_PIN(GPMI_WRN), MXS_PINCTRL_PIN(GPMI_ALE), MXS_PINCTRL_PIN(GPMI_CLE), MXS_PINCTRL_PIN(GPMI_RESETN), MXS_PINCTRL_PIN(LCD_D00), MXS_PINCTRL_PIN(LCD_D01), MXS_PINCTRL_PIN(LCD_D02), MXS_PINCTRL_PIN(LCD_D03), MXS_PINCTRL_PIN(LCD_D04), MXS_PINCTRL_PIN(LCD_D05), MXS_PINCTRL_PIN(LCD_D06), MXS_PINCTRL_PIN(LCD_D07), MXS_PINCTRL_PIN(LCD_D08), MXS_PINCTRL_PIN(LCD_D09), MXS_PINCTRL_PIN(LCD_D10), MXS_PINCTRL_PIN(LCD_D11), MXS_PINCTRL_PIN(LCD_D12), MXS_PINCTRL_PIN(LCD_D13), MXS_PINCTRL_PIN(LCD_D14), MXS_PINCTRL_PIN(LCD_D15), MXS_PINCTRL_PIN(LCD_D16), MXS_PINCTRL_PIN(LCD_D17), MXS_PINCTRL_PIN(LCD_D18), MXS_PINCTRL_PIN(LCD_D19), MXS_PINCTRL_PIN(LCD_D20), MXS_PINCTRL_PIN(LCD_D21), MXS_PINCTRL_PIN(LCD_D22), MXS_PINCTRL_PIN(LCD_D23), MXS_PINCTRL_PIN(LCD_RD_E), MXS_PINCTRL_PIN(LCD_WR_RWN), MXS_PINCTRL_PIN(LCD_RS), MXS_PINCTRL_PIN(LCD_CS), MXS_PINCTRL_PIN(LCD_VSYNC), MXS_PINCTRL_PIN(LCD_HSYNC), MXS_PINCTRL_PIN(LCD_DOTCLK), MXS_PINCTRL_PIN(LCD_ENABLE), MXS_PINCTRL_PIN(SSP0_DATA0), MXS_PINCTRL_PIN(SSP0_DATA1), MXS_PINCTRL_PIN(SSP0_DATA2), MXS_PINCTRL_PIN(SSP0_DATA3), MXS_PINCTRL_PIN(SSP0_DATA4), MXS_PINCTRL_PIN(SSP0_DATA5), MXS_PINCTRL_PIN(SSP0_DATA6), MXS_PINCTRL_PIN(SSP0_DATA7), MXS_PINCTRL_PIN(SSP0_CMD), MXS_PINCTRL_PIN(SSP0_DETECT), MXS_PINCTRL_PIN(SSP0_SCK), MXS_PINCTRL_PIN(SSP1_SCK), MXS_PINCTRL_PIN(SSP1_CMD), MXS_PINCTRL_PIN(SSP1_DATA0), MXS_PINCTRL_PIN(SSP1_DATA3), MXS_PINCTRL_PIN(SSP2_SCK), MXS_PINCTRL_PIN(SSP2_MOSI), MXS_PINCTRL_PIN(SSP2_MISO), MXS_PINCTRL_PIN(SSP2_SS0), MXS_PINCTRL_PIN(SSP2_SS1), MXS_PINCTRL_PIN(SSP2_SS2), MXS_PINCTRL_PIN(SSP3_SCK), MXS_PINCTRL_PIN(SSP3_MOSI), MXS_PINCTRL_PIN(SSP3_MISO), MXS_PINCTRL_PIN(SSP3_SS0), MXS_PINCTRL_PIN(AUART0_RX), MXS_PINCTRL_PIN(AUART0_TX), MXS_PINCTRL_PIN(AUART0_CTS), MXS_PINCTRL_PIN(AUART0_RTS), MXS_PINCTRL_PIN(AUART1_RX), MXS_PINCTRL_PIN(AUART1_TX), MXS_PINCTRL_PIN(AUART1_CTS), MXS_PINCTRL_PIN(AUART1_RTS), MXS_PINCTRL_PIN(AUART2_RX), MXS_PINCTRL_PIN(AUART2_TX), MXS_PINCTRL_PIN(AUART2_CTS), MXS_PINCTRL_PIN(AUART2_RTS), MXS_PINCTRL_PIN(AUART3_RX), MXS_PINCTRL_PIN(AUART3_TX), MXS_PINCTRL_PIN(AUART3_CTS), MXS_PINCTRL_PIN(AUART3_RTS), MXS_PINCTRL_PIN(PWM0), MXS_PINCTRL_PIN(PWM1), MXS_PINCTRL_PIN(PWM2), MXS_PINCTRL_PIN(SAIF0_MCLK), MXS_PINCTRL_PIN(SAIF0_LRCLK), MXS_PINCTRL_PIN(SAIF0_BITCLK), MXS_PINCTRL_PIN(SAIF0_SDATA0), MXS_PINCTRL_PIN(I2C0_SCL), MXS_PINCTRL_PIN(I2C0_SDA), MXS_PINCTRL_PIN(SAIF1_SDATA0), MXS_PINCTRL_PIN(SPDIF), MXS_PINCTRL_PIN(PWM3), MXS_PINCTRL_PIN(PWM4), MXS_PINCTRL_PIN(LCD_RESET), MXS_PINCTRL_PIN(ENET0_MDC), MXS_PINCTRL_PIN(ENET0_MDIO), MXS_PINCTRL_PIN(ENET0_RX_EN), MXS_PINCTRL_PIN(ENET0_RXD0), MXS_PINCTRL_PIN(ENET0_RXD1), MXS_PINCTRL_PIN(ENET0_TX_CLK), MXS_PINCTRL_PIN(ENET0_TX_EN), MXS_PINCTRL_PIN(ENET0_TXD0), MXS_PINCTRL_PIN(ENET0_TXD1), MXS_PINCTRL_PIN(ENET0_RXD2), MXS_PINCTRL_PIN(ENET0_RXD3), MXS_PINCTRL_PIN(ENET0_TXD2), MXS_PINCTRL_PIN(ENET0_TXD3), MXS_PINCTRL_PIN(ENET0_RX_CLK), MXS_PINCTRL_PIN(ENET0_COL), MXS_PINCTRL_PIN(ENET0_CRS), MXS_PINCTRL_PIN(ENET_CLK), MXS_PINCTRL_PIN(JTAG_RTCK), MXS_PINCTRL_PIN(EMI_D00), MXS_PINCTRL_PIN(EMI_D01), MXS_PINCTRL_PIN(EMI_D02), MXS_PINCTRL_PIN(EMI_D03), MXS_PINCTRL_PIN(EMI_D04), MXS_PINCTRL_PIN(EMI_D05), MXS_PINCTRL_PIN(EMI_D06), MXS_PINCTRL_PIN(EMI_D07), MXS_PINCTRL_PIN(EMI_D08), MXS_PINCTRL_PIN(EMI_D09), MXS_PINCTRL_PIN(EMI_D10), MXS_PINCTRL_PIN(EMI_D11), MXS_PINCTRL_PIN(EMI_D12), MXS_PINCTRL_PIN(EMI_D13), MXS_PINCTRL_PIN(EMI_D14), MXS_PINCTRL_PIN(EMI_D15), MXS_PINCTRL_PIN(EMI_ODT0), MXS_PINCTRL_PIN(EMI_DQM0), MXS_PINCTRL_PIN(EMI_ODT1), MXS_PINCTRL_PIN(EMI_DQM1), MXS_PINCTRL_PIN(EMI_DDR_OPEN_FB), MXS_PINCTRL_PIN(EMI_CLK), MXS_PINCTRL_PIN(EMI_DQS0), MXS_PINCTRL_PIN(EMI_DQS1), MXS_PINCTRL_PIN(EMI_DDR_OPEN), MXS_PINCTRL_PIN(EMI_A00), MXS_PINCTRL_PIN(EMI_A01), MXS_PINCTRL_PIN(EMI_A02), MXS_PINCTRL_PIN(EMI_A03), MXS_PINCTRL_PIN(EMI_A04), MXS_PINCTRL_PIN(EMI_A05), MXS_PINCTRL_PIN(EMI_A06), MXS_PINCTRL_PIN(EMI_A07), MXS_PINCTRL_PIN(EMI_A08), MXS_PINCTRL_PIN(EMI_A09), MXS_PINCTRL_PIN(EMI_A10), MXS_PINCTRL_PIN(EMI_A11), MXS_PINCTRL_PIN(EMI_A12), MXS_PINCTRL_PIN(EMI_A13), MXS_PINCTRL_PIN(EMI_A14), MXS_PINCTRL_PIN(EMI_BA0), MXS_PINCTRL_PIN(EMI_BA1), MXS_PINCTRL_PIN(EMI_BA2), MXS_PINCTRL_PIN(EMI_CASN), MXS_PINCTRL_PIN(EMI_RASN), MXS_PINCTRL_PIN(EMI_WEN), MXS_PINCTRL_PIN(EMI_CE0N), MXS_PINCTRL_PIN(EMI_CE1N), MXS_PINCTRL_PIN(EMI_CKE), }; static struct mxs_regs imx28_regs = { .muxsel = 0x100, .drive = 0x300, .pull = 0x600, }; static struct mxs_pinctrl_soc_data imx28_pinctrl_data = { .regs = &imx28_regs, .pins = imx28_pins, .npins = ARRAY_SIZE(imx28_pins), }; static int __devinit imx28_pinctrl_probe(struct platform_device *pdev) { return mxs_pinctrl_probe(pdev, &imx28_pinctrl_data); } static struct of_device_id imx28_pinctrl_of_match[] __devinitdata = { { .compatible = "fsl,imx28-pinctrl", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx28_pinctrl_of_match); static struct platform_driver imx28_pinctrl_driver = { .driver = { .name = "imx28-pinctrl", .owner = THIS_MODULE, .of_match_table = imx28_pinctrl_of_match, }, .probe = imx28_pinctrl_probe, .remove = __devexit_p(mxs_pinctrl_remove), }; static int __init imx28_pinctrl_init(void) { return platform_driver_register(&imx28_pinctrl_driver); } postcore_initcall(imx28_pinctrl_init); static void __exit imx28_pinctrl_exit(void) { platform_driver_unregister(&imx28_pinctrl_driver); } module_exit(imx28_pinctrl_exit); MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); MODULE_DESCRIPTION("Freescale i.MX28 pinctrl driver"); MODULE_LICENSE("GPL v2");
gpl-2.0