repo_name
string
path
string
copies
string
size
string
content
string
license
string
losfair/MiracleKernel
drivers/infiniband/hw/ipath/ipath_verbs.c
3946
63388
/* * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_mad.h> #include <rdma/ib_user_verbs.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/utsname.h> #include <linux/rculist.h> #include "ipath_kernel.h" #include "ipath_verbs.h" #include "ipath_common.h" static unsigned int ib_ipath_qp_table_size = 251; module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); MODULE_PARM_DESC(qp_table_size, "QP table size"); unsigned int ib_ipath_lkey_table_size = 12; module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint, S_IRUGO); MODULE_PARM_DESC(lkey_table_size, "LKEY table size in bits (2^n, 1 <= n <= 23)"); static unsigned int ib_ipath_max_pds = 0xFFFF; module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_pds, "Maximum number of protection domains to support"); static unsigned int ib_ipath_max_ahs = 0xFFFF; module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support"); unsigned int ib_ipath_max_cqes = 0x2FFFF; module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_cqes, "Maximum number of completion queue entries to support"); unsigned int ib_ipath_max_cqs = 0x1FFFF; module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support"); unsigned int ib_ipath_max_qp_wrs = 0x3FFF; module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); unsigned int ib_ipath_max_qps = 16384; module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); unsigned int ib_ipath_max_sges = 0x60; module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); unsigned int ib_ipath_max_mcast_grps = 16384; module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_mcast_grps, "Maximum number of multicast groups to support"); unsigned int ib_ipath_max_mcast_qp_attached = 16; module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_mcast_qp_attached, "Maximum number of attached QPs to support"); unsigned int ib_ipath_max_srqs = 1024; module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support"); unsigned int ib_ipath_max_srq_sges = 128; module_param_named(max_srq_sges, ib_ipath_max_srq_sges, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support"); unsigned int ib_ipath_max_srq_wrs = 0x1FFFF; module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); static unsigned int ib_ipath_disable_sma; module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(disable_sma, "Disable the SMA"); /* * Note that it is OK to post send work requests in the SQE and ERR * states; ipath_do_send() will process them and generate error * completions as per IB 1.2 C10-96. */ const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { [IB_QPS_RESET] = 0, [IB_QPS_INIT] = IPATH_POST_RECV_OK, [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK, [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK | IPATH_PROCESS_NEXT_SEND_OK, [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK, [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | IPATH_POST_SEND_OK | IPATH_FLUSH_SEND, [IB_QPS_ERR] = IPATH_POST_RECV_OK | IPATH_FLUSH_RECV | IPATH_POST_SEND_OK | IPATH_FLUSH_SEND, }; struct ipath_ucontext { struct ib_ucontext ibucontext; }; static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext *ibucontext) { return container_of(ibucontext, struct ipath_ucontext, ibucontext); } /* * Translate ib_wr_opcode into ib_wc_opcode. */ const enum ib_wc_opcode ib_ipath_wc_opcode[] = { [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, [IB_WR_SEND] = IB_WC_SEND, [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD }; /* * System image GUID. */ static __be64 sys_image_guid; /** * ipath_copy_sge - copy data to SGE memory * @ss: the SGE state * @data: the data to copy * @length: the length of the data */ void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length) { struct ipath_sge *sge = &ss->sge; while (length) { u32 len = sge->length; if (len > length) len = length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); memcpy(sge->vaddr, data, len); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } data += len; length -= len; } } /** * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func * @ss: the SGE state * @length: the number of bytes to skip */ void ipath_skip_sge(struct ipath_sge_state *ss, u32 length) { struct ipath_sge *sge = &ss->sge; while (length) { u32 len = sge->length; if (len > length) len = length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } length -= len; } } /* * Count the number of DMA descriptors needed to send length bytes of data. * Don't modify the ipath_sge_state to get the count. * Return zero if any of the segments is not aligned. */ static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length) { struct ipath_sge *sg_list = ss->sg_list; struct ipath_sge sge = ss->sge; u8 num_sge = ss->num_sge; u32 ndesc = 1; /* count the header */ while (length) { u32 len = sge.length; if (len > length) len = length; if (len > sge.sge_length) len = sge.sge_length; BUG_ON(len == 0); if (((long) sge.vaddr & (sizeof(u32) - 1)) || (len != length && (len & (sizeof(u32) - 1)))) { ndesc = 0; break; } ndesc++; sge.vaddr += len; sge.length -= len; sge.sge_length -= len; if (sge.sge_length == 0) { if (--num_sge) sge = *sg_list++; } else if (sge.length == 0 && sge.mr != NULL) { if (++sge.n >= IPATH_SEGSZ) { if (++sge.m >= sge.mr->mapsz) break; sge.n = 0; } sge.vaddr = sge.mr->map[sge.m]->segs[sge.n].vaddr; sge.length = sge.mr->map[sge.m]->segs[sge.n].length; } length -= len; } return ndesc; } /* * Copy from the SGEs to the data buffer. */ static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss, u32 length) { struct ipath_sge *sge = &ss->sge; while (length) { u32 len = sge->length; if (len > length) len = length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); memcpy(data, sge->vaddr, len); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } data += len; length -= len; } } /** * ipath_post_one_send - post one RC, UC, or UD send work request * @qp: the QP to post on * @wr: the work request to send */ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) { struct ipath_swqe *wqe; u32 next; int i; int j; int acc; int ret; unsigned long flags; struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; spin_lock_irqsave(&qp->s_lock, flags); if (qp->ibqp.qp_type != IB_QPT_SMI && !(dd->ipath_flags & IPATH_LINKACTIVE)) { ret = -ENETDOWN; goto bail; } /* Check that state is OK to post send. */ if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) goto bail_inval; /* IB spec says that num_sge == 0 is OK. */ if (wr->num_sge > qp->s_max_sge) goto bail_inval; /* * Don't allow RDMA reads or atomic operations on UC or * undefined operations. * Make sure buffer is large enough to hold the result for atomics. */ if (qp->ibqp.qp_type == IB_QPT_UC) { if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) goto bail_inval; } else if (qp->ibqp.qp_type == IB_QPT_UD) { /* Check UD opcode */ if (wr->opcode != IB_WR_SEND && wr->opcode != IB_WR_SEND_WITH_IMM) goto bail_inval; /* Check UD destination address PD */ if (qp->ibqp.pd != wr->wr.ud.ah->pd) goto bail_inval; } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) goto bail_inval; else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && (wr->num_sge == 0 || wr->sg_list[0].length < sizeof(u64) || wr->sg_list[0].addr & (sizeof(u64) - 1))) goto bail_inval; else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) goto bail_inval; next = qp->s_head + 1; if (next >= qp->s_size) next = 0; if (next == qp->s_last) { ret = -ENOMEM; goto bail; } wqe = get_swqe_ptr(qp, qp->s_head); wqe->wr = *wr; wqe->length = 0; if (wr->num_sge) { acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0; for (i = 0, j = 0; i < wr->num_sge; i++) { u32 length = wr->sg_list[i].length; int ok; if (length == 0) continue; ok = ipath_lkey_ok(qp, &wqe->sg_list[j], &wr->sg_list[i], acc); if (!ok) goto bail_inval; wqe->length += length; j++; } wqe->wr.num_sge = j; } if (qp->ibqp.qp_type == IB_QPT_UC || qp->ibqp.qp_type == IB_QPT_RC) { if (wqe->length > 0x80000000U) goto bail_inval; } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu) goto bail_inval; wqe->ssn = qp->s_ssn++; qp->s_head = next; ret = 0; goto bail; bail_inval: ret = -EINVAL; bail: spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } /** * ipath_post_send - post a send on a QP * @ibqp: the QP to post the send on * @wr: the list of work requests to post * @bad_wr: the first bad WR is put here * * This may be called from interrupt context. */ static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct ipath_qp *qp = to_iqp(ibqp); int err = 0; for (; wr; wr = wr->next) { err = ipath_post_one_send(qp, wr); if (err) { *bad_wr = wr; goto bail; } } /* Try to do the send work in the caller's context. */ ipath_do_send((unsigned long) qp); bail: return err; } /** * ipath_post_receive - post a receive on a QP * @ibqp: the QP to post the receive on * @wr: the WR to post * @bad_wr: the first bad WR is put here * * This may be called from interrupt context. */ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct ipath_qp *qp = to_iqp(ibqp); struct ipath_rwq *wq = qp->r_rq.wq; unsigned long flags; int ret; /* Check that state is OK to post receive. */ if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) { *bad_wr = wr; ret = -EINVAL; goto bail; } for (; wr; wr = wr->next) { struct ipath_rwqe *wqe; u32 next; int i; if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { *bad_wr = wr; ret = -EINVAL; goto bail; } spin_lock_irqsave(&qp->r_rq.lock, flags); next = wq->head + 1; if (next >= qp->r_rq.size) next = 0; if (next == wq->tail) { spin_unlock_irqrestore(&qp->r_rq.lock, flags); *bad_wr = wr; ret = -ENOMEM; goto bail; } wqe = get_rwqe_ptr(&qp->r_rq, wq->head); wqe->wr_id = wr->wr_id; wqe->num_sge = wr->num_sge; for (i = 0; i < wr->num_sge; i++) wqe->sg_list[i] = wr->sg_list[i]; /* Make sure queue entry is written before the head index. */ smp_wmb(); wq->head = next; spin_unlock_irqrestore(&qp->r_rq.lock, flags); } ret = 0; bail: return ret; } /** * ipath_qp_rcv - processing an incoming packet on a QP * @dev: the device the packet came on * @hdr: the packet header * @has_grh: true if the packet has a GRH * @data: the packet data * @tlen: the packet length * @qp: the QP the packet came on * * This is called from ipath_ib_rcv() to process an incoming packet * for the given QP. * Called at interrupt level. */ static void ipath_qp_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) { /* Check for valid receive state. */ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { dev->n_pkt_drops++; return; } switch (qp->ibqp.qp_type) { case IB_QPT_SMI: case IB_QPT_GSI: if (ib_ipath_disable_sma) break; /* FALLTHROUGH */ case IB_QPT_UD: ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp); break; case IB_QPT_RC: ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp); break; case IB_QPT_UC: ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp); break; default: break; } } /** * ipath_ib_rcv - process an incoming packet * @arg: the device pointer * @rhdr: the header of the packet * @data: the packet data * @tlen: the packet length * * This is called from ipath_kreceive() to process an incoming packet at * interrupt level. Tlen is the length of the header + data + CRC in bytes. */ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data, u32 tlen) { struct ipath_ib_header *hdr = rhdr; struct ipath_other_headers *ohdr; struct ipath_qp *qp; u32 qp_num; int lnh; u8 opcode; u16 lid; if (unlikely(dev == NULL)) goto bail; if (unlikely(tlen < 24)) { /* LRH+BTH+CRC */ dev->rcv_errors++; goto bail; } /* Check for a valid destination LID (see ch. 7.11.1). */ lid = be16_to_cpu(hdr->lrh[1]); if (lid < IPATH_MULTICAST_LID_BASE) { lid &= ~((1 << dev->dd->ipath_lmc) - 1); if (unlikely(lid != dev->dd->ipath_lid)) { dev->rcv_errors++; goto bail; } } /* Check for GRH */ lnh = be16_to_cpu(hdr->lrh[0]) & 3; if (lnh == IPATH_LRH_BTH) ohdr = &hdr->u.oth; else if (lnh == IPATH_LRH_GRH) ohdr = &hdr->u.l.oth; else { dev->rcv_errors++; goto bail; } opcode = be32_to_cpu(ohdr->bth[0]) >> 24; dev->opstats[opcode].n_bytes += tlen; dev->opstats[opcode].n_packets++; /* Get the destination QP number. */ qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK; if (qp_num == IPATH_MULTICAST_QPN) { struct ipath_mcast *mcast; struct ipath_mcast_qp *p; if (lnh != IPATH_LRH_GRH) { dev->n_pkt_drops++; goto bail; } mcast = ipath_mcast_find(&hdr->u.l.grh.dgid); if (mcast == NULL) { dev->n_pkt_drops++; goto bail; } dev->n_multicast_rcv++; list_for_each_entry_rcu(p, &mcast->qp_list, list) ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp); /* * Notify ipath_multicast_detach() if it is waiting for us * to finish. */ if (atomic_dec_return(&mcast->refcount) <= 1) wake_up(&mcast->wait); } else { qp = ipath_lookup_qpn(&dev->qp_table, qp_num); if (qp) { dev->n_unicast_rcv++; ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data, tlen, qp); /* * Notify ipath_destroy_qp() if it is waiting * for us to finish. */ if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } else dev->n_pkt_drops++; } bail:; } /** * ipath_ib_timer - verbs timer * @arg: the device pointer * * This is called from ipath_do_rcv_timer() at interrupt level to check for * QPs which need retransmits and to collect performance numbers. */ static void ipath_ib_timer(struct ipath_ibdev *dev) { struct ipath_qp *resend = NULL; struct ipath_qp *rnr = NULL; struct list_head *last; struct ipath_qp *qp; unsigned long flags; if (dev == NULL) return; spin_lock_irqsave(&dev->pending_lock, flags); /* Start filling the next pending queue. */ if (++dev->pending_index >= ARRAY_SIZE(dev->pending)) dev->pending_index = 0; /* Save any requests still in the new queue, they have timed out. */ last = &dev->pending[dev->pending_index]; while (!list_empty(last)) { qp = list_entry(last->next, struct ipath_qp, timerwait); list_del_init(&qp->timerwait); qp->timer_next = resend; resend = qp; atomic_inc(&qp->refcount); } last = &dev->rnrwait; if (!list_empty(last)) { qp = list_entry(last->next, struct ipath_qp, timerwait); if (--qp->s_rnr_timeout == 0) { do { list_del_init(&qp->timerwait); qp->timer_next = rnr; rnr = qp; atomic_inc(&qp->refcount); if (list_empty(last)) break; qp = list_entry(last->next, struct ipath_qp, timerwait); } while (qp->s_rnr_timeout == 0); } } /* * We should only be in the started state if pma_sample_start != 0 */ if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED && --dev->pma_sample_start == 0) { dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; ipath_snapshot_counters(dev->dd, &dev->ipath_sword, &dev->ipath_rword, &dev->ipath_spkts, &dev->ipath_rpkts, &dev->ipath_xmit_wait); } if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { if (dev->pma_sample_interval == 0) { u64 ta, tb, tc, td, te; dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; ipath_snapshot_counters(dev->dd, &ta, &tb, &tc, &td, &te); dev->ipath_sword = ta - dev->ipath_sword; dev->ipath_rword = tb - dev->ipath_rword; dev->ipath_spkts = tc - dev->ipath_spkts; dev->ipath_rpkts = td - dev->ipath_rpkts; dev->ipath_xmit_wait = te - dev->ipath_xmit_wait; } else dev->pma_sample_interval--; } spin_unlock_irqrestore(&dev->pending_lock, flags); /* XXX What if timer fires again while this is running? */ while (resend != NULL) { qp = resend; resend = qp->timer_next; spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_last != qp->s_tail && ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) { dev->n_timeouts++; ipath_restart_rc(qp, qp->s_last_psn + 1); } spin_unlock_irqrestore(&qp->s_lock, flags); /* Notify ipath_destroy_qp() if it is waiting. */ if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } while (rnr != NULL) { qp = rnr; rnr = qp->timer_next; spin_lock_irqsave(&qp->s_lock, flags); if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ipath_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); /* Notify ipath_destroy_qp() if it is waiting. */ if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } } static void update_sge(struct ipath_sge_state *ss, u32 length) { struct ipath_sge *sge = &ss->sge; sge->vaddr += length; sge->length -= length; sge->sge_length -= length; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) return; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } } #ifdef __LITTLE_ENDIAN static inline u32 get_upper_bits(u32 data, u32 shift) { return data >> shift; } static inline u32 set_upper_bits(u32 data, u32 shift) { return data << shift; } static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) { data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); return data; } #else static inline u32 get_upper_bits(u32 data, u32 shift) { return data << shift; } static inline u32 set_upper_bits(u32 data, u32 shift) { return data >> shift; } static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) { data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); return data; } #endif static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss, u32 length, unsigned flush_wc) { u32 extra = 0; u32 data = 0; u32 last; while (1) { u32 len = ss->sge.length; u32 off; if (len > length) len = length; if (len > ss->sge.sge_length) len = ss->sge.sge_length; BUG_ON(len == 0); /* If the source address is not aligned, try to align it. */ off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); if (off) { u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & ~(sizeof(u32) - 1)); u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); u32 y; y = sizeof(u32) - off; if (len > y) len = y; if (len + extra >= sizeof(u32)) { data |= set_upper_bits(v, extra * BITS_PER_BYTE); len = sizeof(u32) - extra; if (len == length) { last = data; break; } __raw_writel(data, piobuf); piobuf++; extra = 0; data = 0; } else { /* Clear unused upper bytes */ data |= clear_upper_bytes(v, len, extra); if (len == length) { last = data; break; } extra += len; } } else if (extra) { /* Source address is aligned. */ u32 *addr = (u32 *) ss->sge.vaddr; int shift = extra * BITS_PER_BYTE; int ushift = 32 - shift; u32 l = len; while (l >= sizeof(u32)) { u32 v = *addr; data |= set_upper_bits(v, shift); __raw_writel(data, piobuf); data = get_upper_bits(v, ushift); piobuf++; addr++; l -= sizeof(u32); } /* * We still have 'extra' number of bytes leftover. */ if (l) { u32 v = *addr; if (l + extra >= sizeof(u32)) { data |= set_upper_bits(v, shift); len -= l + extra - sizeof(u32); if (len == length) { last = data; break; } __raw_writel(data, piobuf); piobuf++; extra = 0; data = 0; } else { /* Clear unused upper bytes */ data |= clear_upper_bytes(v, l, extra); if (len == length) { last = data; break; } extra += l; } } else if (len == length) { last = data; break; } } else if (len == length) { u32 w; /* * Need to round up for the last dword in the * packet. */ w = (len + 3) >> 2; __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); piobuf += w - 1; last = ((u32 *) ss->sge.vaddr)[w - 1]; break; } else { u32 w = len >> 2; __iowrite32_copy(piobuf, ss->sge.vaddr, w); piobuf += w; extra = len & (sizeof(u32) - 1); if (extra) { u32 v = ((u32 *) ss->sge.vaddr)[w]; /* Clear unused upper bytes */ data = clear_upper_bytes(v, extra, 0); } } update_sge(ss, len); length -= len; } /* Update address before sending packet. */ update_sge(ss, length); if (flush_wc) { /* must flush early everything before trigger word */ ipath_flush_wc(); __raw_writel(last, piobuf); /* be sure trigger word is written */ ipath_flush_wc(); } else __raw_writel(last, piobuf); } /* * Convert IB rate to delay multiplier. */ unsigned ipath_ib_rate_to_mult(enum ib_rate rate) { switch (rate) { case IB_RATE_2_5_GBPS: return 8; case IB_RATE_5_GBPS: return 4; case IB_RATE_10_GBPS: return 2; case IB_RATE_20_GBPS: return 1; default: return 0; } } /* * Convert delay multiplier to IB rate */ static enum ib_rate ipath_mult_to_ib_rate(unsigned mult) { switch (mult) { case 8: return IB_RATE_2_5_GBPS; case 4: return IB_RATE_5_GBPS; case 2: return IB_RATE_10_GBPS; case 1: return IB_RATE_20_GBPS; default: return IB_RATE_PORT_CURRENT; } } static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev) { struct ipath_verbs_txreq *tx = NULL; unsigned long flags; spin_lock_irqsave(&dev->pending_lock, flags); if (!list_empty(&dev->txreq_free)) { struct list_head *l = dev->txreq_free.next; list_del(l); tx = list_entry(l, struct ipath_verbs_txreq, txreq.list); } spin_unlock_irqrestore(&dev->pending_lock, flags); return tx; } static inline void put_txreq(struct ipath_ibdev *dev, struct ipath_verbs_txreq *tx) { unsigned long flags; spin_lock_irqsave(&dev->pending_lock, flags); list_add(&tx->txreq.list, &dev->txreq_free); spin_unlock_irqrestore(&dev->pending_lock, flags); } static void sdma_complete(void *cookie, int status) { struct ipath_verbs_txreq *tx = cookie; struct ipath_qp *qp = tx->qp; struct ipath_ibdev *dev = to_idev(qp->ibqp.device); unsigned long flags; enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ? IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR; if (atomic_dec_and_test(&qp->s_dma_busy)) { spin_lock_irqsave(&qp->s_lock, flags); if (tx->wqe) ipath_send_complete(qp, tx->wqe, ibs); if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && qp->s_last != qp->s_head) || (qp->s_flags & IPATH_S_WAIT_DMA)) ipath_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); wake_up(&qp->wait_dma); } else if (tx->wqe) { spin_lock_irqsave(&qp->s_lock, flags); ipath_send_complete(qp, tx->wqe, ibs); spin_unlock_irqrestore(&qp->s_lock, flags); } if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) kfree(tx->txreq.map_addr); put_txreq(dev, tx); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } static void decrement_dma_busy(struct ipath_qp *qp) { unsigned long flags; if (atomic_dec_and_test(&qp->s_dma_busy)) { spin_lock_irqsave(&qp->s_lock, flags); if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && qp->s_last != qp->s_head) || (qp->s_flags & IPATH_S_WAIT_DMA)) ipath_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); wake_up(&qp->wait_dma); } } /* * Compute the number of clock cycles of delay before sending the next packet. * The multipliers reflect the number of clocks for the fastest rate so * one tick at 4xDDR is 8 ticks at 1xSDR. * If the destination port will take longer to receive a packet than * the outgoing link can send it, we need to delay sending the next packet * by the difference in time it takes the receiver to receive and the sender * to send this packet. * Note that this delay is always correct for UC and RC but not always * optimal for UD. For UD, the destination HCA can be different for each * packet, in which case, we could send packets to a different destination * while "waiting" for the delay. The overhead for doing this without * HW support is more than just paying the cost of delaying some packets * unnecessarily. */ static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult) { return (rcv_mult > snd_mult) ? (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0; } static int ipath_verbs_send_dma(struct ipath_qp *qp, struct ipath_ib_header *hdr, u32 hdrwords, struct ipath_sge_state *ss, u32 len, u32 plen, u32 dwords) { struct ipath_ibdev *dev = to_idev(qp->ibqp.device); struct ipath_devdata *dd = dev->dd; struct ipath_verbs_txreq *tx; u32 *piobuf; u32 control; u32 ndesc; int ret; tx = qp->s_tx; if (tx) { qp->s_tx = NULL; /* resend previously constructed packet */ atomic_inc(&qp->s_dma_busy); ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx); if (ret) { qp->s_tx = tx; decrement_dma_busy(qp); } goto bail; } tx = get_txreq(dev); if (!tx) { ret = -EBUSY; goto bail; } /* * Get the saved delay count we computed for the previous packet * and save the delay count for this packet to be used next time * we get here. */ control = qp->s_pkt_delay; qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult); tx->qp = qp; atomic_inc(&qp->refcount); tx->wqe = qp->s_wqe; tx->txreq.callback = sdma_complete; tx->txreq.callback_cookie = tx; tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST | IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC; if (plen + 1 >= IPATH_SMALLBUF_DWORDS) tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF; /* VL15 packets bypass credit check */ if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) { control |= 1ULL << 31; tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15; } if (len) { /* * Don't try to DMA if it takes more descriptors than * the queue holds. */ ndesc = ipath_count_sge(ss, len); if (ndesc >= dd->ipath_sdma_descq_cnt) ndesc = 0; } else ndesc = 1; if (ndesc) { tx->hdr.pbc[0] = cpu_to_le32(plen); tx->hdr.pbc[1] = cpu_to_le32(control); memcpy(&tx->hdr.hdr, hdr, hdrwords << 2); tx->txreq.sg_count = ndesc; tx->map_len = (hdrwords + 2) << 2; tx->txreq.map_addr = &tx->hdr; atomic_inc(&qp->s_dma_busy); ret = ipath_sdma_verbs_send(dd, ss, dwords, tx); if (ret) { /* save ss and length in dwords */ tx->ss = ss; tx->len = dwords; qp->s_tx = tx; decrement_dma_busy(qp); } goto bail; } /* Allocate a buffer and copy the header and payload to it. */ tx->map_len = (plen + 1) << 2; piobuf = kmalloc(tx->map_len, GFP_ATOMIC); if (unlikely(piobuf == NULL)) { ret = -EBUSY; goto err_tx; } tx->txreq.map_addr = piobuf; tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF; tx->txreq.sg_count = 1; *piobuf++ = (__force u32) cpu_to_le32(plen); *piobuf++ = (__force u32) cpu_to_le32(control); memcpy(piobuf, hdr, hdrwords << 2); ipath_copy_from_sge(piobuf + hdrwords, ss, len); atomic_inc(&qp->s_dma_busy); ret = ipath_sdma_verbs_send(dd, NULL, 0, tx); /* * If we couldn't queue the DMA request, save the info * and try again later rather than destroying the * buffer and undoing the side effects of the copy. */ if (ret) { tx->ss = NULL; tx->len = 0; qp->s_tx = tx; decrement_dma_busy(qp); } dev->n_unaligned++; goto bail; err_tx: if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); put_txreq(dev, tx); bail: return ret; } static int ipath_verbs_send_pio(struct ipath_qp *qp, struct ipath_ib_header *ibhdr, u32 hdrwords, struct ipath_sge_state *ss, u32 len, u32 plen, u32 dwords) { struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; u32 *hdr = (u32 *) ibhdr; u32 __iomem *piobuf; unsigned flush_wc; u32 control; int ret; unsigned long flags; piobuf = ipath_getpiobuf(dd, plen, NULL); if (unlikely(piobuf == NULL)) { ret = -EBUSY; goto bail; } /* * Get the saved delay count we computed for the previous packet * and save the delay count for this packet to be used next time * we get here. */ control = qp->s_pkt_delay; qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult); /* VL15 packets bypass credit check */ if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15) control |= 1ULL << 31; /* * Write the length to the control qword plus any needed flags. * We have to flush after the PBC for correctness on some cpus * or WC buffer can be written out of order. */ writeq(((u64) control << 32) | plen, piobuf); piobuf += 2; flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC; if (len == 0) { /* * If there is just the header portion, must flush before * writing last word of header for correctness, and after * the last header word (trigger word). */ if (flush_wc) { ipath_flush_wc(); __iowrite32_copy(piobuf, hdr, hdrwords - 1); ipath_flush_wc(); __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); ipath_flush_wc(); } else __iowrite32_copy(piobuf, hdr, hdrwords); goto done; } if (flush_wc) ipath_flush_wc(); __iowrite32_copy(piobuf, hdr, hdrwords); piobuf += hdrwords; /* The common case is aligned and contained in one segment. */ if (likely(ss->num_sge == 1 && len <= ss->sge.length && !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { u32 *addr = (u32 *) ss->sge.vaddr; /* Update address before sending packet. */ update_sge(ss, len); if (flush_wc) { __iowrite32_copy(piobuf, addr, dwords - 1); /* must flush early everything before trigger word */ ipath_flush_wc(); __raw_writel(addr[dwords - 1], piobuf + dwords - 1); /* be sure trigger word is written */ ipath_flush_wc(); } else __iowrite32_copy(piobuf, addr, dwords); goto done; } copy_io(piobuf, ss, len, flush_wc); done: if (qp->s_wqe) { spin_lock_irqsave(&qp->s_lock, flags); ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); spin_unlock_irqrestore(&qp->s_lock, flags); } ret = 0; bail: return ret; } /** * ipath_verbs_send - send a packet * @qp: the QP to send on * @hdr: the packet header * @hdrwords: the number of 32-bit words in the header * @ss: the SGE to send * @len: the length of the packet in bytes */ int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr, u32 hdrwords, struct ipath_sge_state *ss, u32 len) { struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; u32 plen; int ret; u32 dwords = (len + 3) >> 2; /* * Calculate the send buffer trigger address. * The +1 counts for the pbc control dword following the pbc length. */ plen = hdrwords + dwords + 1; /* * VL15 packets (IB_QPT_SMI) will always use PIO, so we * can defer SDMA restart until link goes ACTIVE without * worrying about just how we got there. */ if (qp->ibqp.qp_type == IB_QPT_SMI || !(dd->ipath_flags & IPATH_HAS_SEND_DMA)) ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len, plen, dwords); else ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len, plen, dwords); return ret; } int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords, u64 *rwords, u64 *spkts, u64 *rpkts, u64 *xmit_wait) { int ret; if (!(dd->ipath_flags & IPATH_INITTED)) { /* no hardware, freeze, etc. */ ret = -EINVAL; goto bail; } *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt); ret = 0; bail: return ret; } /** * ipath_get_counters - get various chip counters * @dd: the infinipath device * @cntrs: counters are placed here * * Return the counters needed by recv_pma_get_portcounters(). */ int ipath_get_counters(struct ipath_devdata *dd, struct ipath_verbs_counters *cntrs) { struct ipath_cregs const *crp = dd->ipath_cregs; int ret; if (!(dd->ipath_flags & IPATH_INITTED)) { /* no hardware, freeze, etc. */ ret = -EINVAL; goto bail; } cntrs->symbol_error_counter = ipath_snap_cntr(dd, crp->cr_ibsymbolerrcnt); cntrs->link_error_recovery_counter = ipath_snap_cntr(dd, crp->cr_iblinkerrrecovcnt); /* * The link downed counter counts when the other side downs the * connection. We add in the number of times we downed the link * due to local link integrity errors to compensate. */ cntrs->link_downed_counter = ipath_snap_cntr(dd, crp->cr_iblinkdowncnt); cntrs->port_rcv_errors = ipath_snap_cntr(dd, crp->cr_rxdroppktcnt) + ipath_snap_cntr(dd, crp->cr_rcvovflcnt) + ipath_snap_cntr(dd, crp->cr_portovflcnt) + ipath_snap_cntr(dd, crp->cr_err_rlencnt) + ipath_snap_cntr(dd, crp->cr_invalidrlencnt) + ipath_snap_cntr(dd, crp->cr_errlinkcnt) + ipath_snap_cntr(dd, crp->cr_erricrccnt) + ipath_snap_cntr(dd, crp->cr_errvcrccnt) + ipath_snap_cntr(dd, crp->cr_errlpcrccnt) + ipath_snap_cntr(dd, crp->cr_badformatcnt) + dd->ipath_rxfc_unsupvl_errs; if (crp->cr_rxotherlocalphyerrcnt) cntrs->port_rcv_errors += ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt); if (crp->cr_rxvlerrcnt) cntrs->port_rcv_errors += ipath_snap_cntr(dd, crp->cr_rxvlerrcnt); cntrs->port_rcv_remphys_errors = ipath_snap_cntr(dd, crp->cr_rcvebpcnt); cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt); cntrs->port_xmit_data = ipath_snap_cntr(dd, crp->cr_wordsendcnt); cntrs->port_rcv_data = ipath_snap_cntr(dd, crp->cr_wordrcvcnt); cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt); cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt); cntrs->local_link_integrity_errors = crp->cr_locallinkintegrityerrcnt ? ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) : ((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ? dd->ipath_lli_errs : dd->ipath_lli_errors); cntrs->excessive_buffer_overrun_errors = crp->cr_excessbufferovflcnt ? ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) : dd->ipath_overrun_thresh_errs; cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ? ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0; ret = 0; bail: return ret; } /** * ipath_ib_piobufavail - callback when a PIO buffer is available * @arg: the device pointer * * This is called from ipath_intr() at interrupt level when a PIO buffer is * available after ipath_verbs_send() returned an error that no buffers were * available. Return 1 if we consumed all the PIO buffers and we still have * QPs waiting for buffers (for now, just restart the send tasklet and * return zero). */ int ipath_ib_piobufavail(struct ipath_ibdev *dev) { struct list_head *list; struct ipath_qp *qplist; struct ipath_qp *qp; unsigned long flags; if (dev == NULL) goto bail; list = &dev->piowait; qplist = NULL; spin_lock_irqsave(&dev->pending_lock, flags); while (!list_empty(list)) { qp = list_entry(list->next, struct ipath_qp, piowait); list_del_init(&qp->piowait); qp->pio_next = qplist; qplist = qp; atomic_inc(&qp->refcount); } spin_unlock_irqrestore(&dev->pending_lock, flags); while (qplist != NULL) { qp = qplist; qplist = qp->pio_next; spin_lock_irqsave(&qp->s_lock, flags); if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ipath_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); /* Notify ipath_destroy_qp() if it is waiting. */ if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } bail: return 0; } static int ipath_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { struct ipath_ibdev *dev = to_idev(ibdev); memset(props, 0, sizeof(*props)); props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE; props->page_size_cap = PAGE_SIZE; props->vendor_id = IPATH_SRC_OUI_1 << 16 | IPATH_SRC_OUI_2 << 8 | IPATH_SRC_OUI_3; props->vendor_part_id = dev->dd->ipath_deviceid; props->hw_ver = dev->dd->ipath_pcirev; props->sys_image_guid = dev->sys_image_guid; props->max_mr_size = ~0ull; props->max_qp = ib_ipath_max_qps; props->max_qp_wr = ib_ipath_max_qp_wrs; props->max_sge = ib_ipath_max_sges; props->max_cq = ib_ipath_max_cqs; props->max_ah = ib_ipath_max_ahs; props->max_cqe = ib_ipath_max_cqes; props->max_mr = dev->lk_table.max; props->max_fmr = dev->lk_table.max; props->max_map_per_fmr = 32767; props->max_pd = ib_ipath_max_pds; props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC; props->max_qp_init_rd_atom = 255; /* props->max_res_rd_atom */ props->max_srq = ib_ipath_max_srqs; props->max_srq_wr = ib_ipath_max_srq_wrs; props->max_srq_sge = ib_ipath_max_srq_sges; /* props->local_ca_ack_delay */ props->atomic_cap = IB_ATOMIC_GLOB; props->max_pkeys = ipath_get_npkeys(dev->dd); props->max_mcast_grp = ib_ipath_max_mcast_grps; props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; return 0; } const u8 ipath_cvt_physportstate[32] = { [INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED, [INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP, [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL, [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL, [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP, [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP, [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN, [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] = IB_PHYSPORTSTATE_CFG_TRAIN, [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] = IB_PHYSPORTSTATE_CFG_TRAIN, [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN, [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN }; u32 ipath_get_cr_errpkey(struct ipath_devdata *dd) { return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey); } static int ipath_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_devdata *dd = dev->dd; enum ib_mtu mtu; u16 lid = dd->ipath_lid; u64 ibcstat; memset(props, 0, sizeof(*props)); props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); props->lmc = dd->ipath_lmc; props->sm_lid = dev->sm_lid; props->sm_sl = dev->sm_sl; ibcstat = dd->ipath_lastibcstat; /* map LinkState to IB portinfo values. */ props->state = ipath_ib_linkstate(dd, ibcstat) + 1; /* See phys_state_show() */ props->phys_state = /* MEA: assumes shift == 0 */ ipath_cvt_physportstate[dd->ipath_lastibcstat & dd->ibcs_lts_mask]; props->port_cap_flags = dev->port_cap_flags; props->gid_tbl_len = 1; props->max_msg_sz = 0x80000000; props->pkey_tbl_len = ipath_get_npkeys(dd); props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) - dev->z_pkey_violations; props->qkey_viol_cntr = dev->qkey_violations; props->active_width = dd->ipath_link_width_active; /* See rate_show() */ props->active_speed = dd->ipath_link_speed_active; props->max_vl_num = 1; /* VLCap = VL0 */ props->init_type_reply = 0; props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048; switch (dd->ipath_ibmtu) { case 4096: mtu = IB_MTU_4096; break; case 2048: mtu = IB_MTU_2048; break; case 1024: mtu = IB_MTU_1024; break; case 512: mtu = IB_MTU_512; break; case 256: mtu = IB_MTU_256; break; default: mtu = IB_MTU_2048; } props->active_mtu = mtu; props->subnet_timeout = dev->subnet_timeout; return 0; } static int ipath_modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify) { int ret; if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | IB_DEVICE_MODIFY_NODE_DESC)) { ret = -EOPNOTSUPP; goto bail; } if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) memcpy(device->node_desc, device_modify->node_desc, 64); if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) to_idev(device)->sys_image_guid = cpu_to_be64(device_modify->sys_image_guid); ret = 0; bail: return ret; } static int ipath_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask, struct ib_port_modify *props) { struct ipath_ibdev *dev = to_idev(ibdev); dev->port_cap_flags |= props->set_port_cap_mask; dev->port_cap_flags &= ~props->clr_port_cap_mask; if (port_modify_mask & IB_PORT_SHUTDOWN) ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN); if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) dev->qkey_violations = 0; return 0; } static int ipath_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { struct ipath_ibdev *dev = to_idev(ibdev); int ret; if (index >= 1) { ret = -EINVAL; goto bail; } gid->global.subnet_prefix = dev->gid_prefix; gid->global.interface_id = dev->dd->ipath_guid; ret = 0; bail: return ret; } static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_pd *pd; struct ib_pd *ret; /* * This is actually totally arbitrary. Some correctness tests * assume there's a maximum number of PDs that can be allocated. * We don't actually have this limit, but we fail the test if * we allow allocations of more than we report for this value. */ pd = kmalloc(sizeof *pd, GFP_KERNEL); if (!pd) { ret = ERR_PTR(-ENOMEM); goto bail; } spin_lock(&dev->n_pds_lock); if (dev->n_pds_allocated == ib_ipath_max_pds) { spin_unlock(&dev->n_pds_lock); kfree(pd); ret = ERR_PTR(-ENOMEM); goto bail; } dev->n_pds_allocated++; spin_unlock(&dev->n_pds_lock); /* ib_alloc_pd() will initialize pd->ibpd. */ pd->user = udata != NULL; ret = &pd->ibpd; bail: return ret; } static int ipath_dealloc_pd(struct ib_pd *ibpd) { struct ipath_pd *pd = to_ipd(ibpd); struct ipath_ibdev *dev = to_idev(ibpd->device); spin_lock(&dev->n_pds_lock); dev->n_pds_allocated--; spin_unlock(&dev->n_pds_lock); kfree(pd); return 0; } /** * ipath_create_ah - create an address handle * @pd: the protection domain * @ah_attr: the attributes of the AH * * This may be called from interrupt context. */ static struct ib_ah *ipath_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) { struct ipath_ah *ah; struct ib_ah *ret; struct ipath_ibdev *dev = to_idev(pd->device); unsigned long flags; /* A multicast address requires a GRH (see ch. 8.4.1). */ if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && ah_attr->dlid != IPATH_PERMISSIVE_LID && !(ah_attr->ah_flags & IB_AH_GRH)) { ret = ERR_PTR(-EINVAL); goto bail; } if (ah_attr->dlid == 0) { ret = ERR_PTR(-EINVAL); goto bail; } if (ah_attr->port_num < 1 || ah_attr->port_num > pd->device->phys_port_cnt) { ret = ERR_PTR(-EINVAL); goto bail; } ah = kmalloc(sizeof *ah, GFP_ATOMIC); if (!ah) { ret = ERR_PTR(-ENOMEM); goto bail; } spin_lock_irqsave(&dev->n_ahs_lock, flags); if (dev->n_ahs_allocated == ib_ipath_max_ahs) { spin_unlock_irqrestore(&dev->n_ahs_lock, flags); kfree(ah); ret = ERR_PTR(-ENOMEM); goto bail; } dev->n_ahs_allocated++; spin_unlock_irqrestore(&dev->n_ahs_lock, flags); /* ib_create_ah() will initialize ah->ibah. */ ah->attr = *ah_attr; ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate); ret = &ah->ibah; bail: return ret; } /** * ipath_destroy_ah - destroy an address handle * @ibah: the AH to destroy * * This may be called from interrupt context. */ static int ipath_destroy_ah(struct ib_ah *ibah) { struct ipath_ibdev *dev = to_idev(ibah->device); struct ipath_ah *ah = to_iah(ibah); unsigned long flags; spin_lock_irqsave(&dev->n_ahs_lock, flags); dev->n_ahs_allocated--; spin_unlock_irqrestore(&dev->n_ahs_lock, flags); kfree(ah); return 0; } static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) { struct ipath_ah *ah = to_iah(ibah); *ah_attr = ah->attr; ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate); return 0; } /** * ipath_get_npkeys - return the size of the PKEY table for port 0 * @dd: the infinipath device */ unsigned ipath_get_npkeys(struct ipath_devdata *dd) { return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys); } /** * ipath_get_pkey - return the indexed PKEY from the port PKEY table * @dd: the infinipath device * @index: the PKEY index */ unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index) { unsigned ret; /* always a kernel port, no locking needed */ if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys)) ret = 0; else ret = dd->ipath_pd[0]->port_pkeys[index]; return ret; } static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { struct ipath_ibdev *dev = to_idev(ibdev); int ret; if (index >= ipath_get_npkeys(dev->dd)) { ret = -EINVAL; goto bail; } *pkey = ipath_get_pkey(dev->dd, index); ret = 0; bail: return ret; } /** * ipath_alloc_ucontext - allocate a ucontest * @ibdev: the infiniband device * @udata: not used by the InfiniPath driver */ static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { struct ipath_ucontext *context; struct ib_ucontext *ret; context = kmalloc(sizeof *context, GFP_KERNEL); if (!context) { ret = ERR_PTR(-ENOMEM); goto bail; } ret = &context->ibucontext; bail: return ret; } static int ipath_dealloc_ucontext(struct ib_ucontext *context) { kfree(to_iucontext(context)); return 0; } static int ipath_verbs_register_sysfs(struct ib_device *dev); static void __verbs_timer(unsigned long arg) { struct ipath_devdata *dd = (struct ipath_devdata *) arg; /* Handle verbs layer timeouts. */ ipath_ib_timer(dd->verbs_dev); mod_timer(&dd->verbs_timer, jiffies + 1); } static int enable_timer(struct ipath_devdata *dd) { /* * Early chips had a design flaw where the chip and kernel idea * of the tail register don't always agree, and therefore we won't * get an interrupt on the next packet received. * If the board supports per packet receive interrupts, use it. * Otherwise, the timer function periodically checks for packets * to cover this case. * Either way, the timer is needed for verbs layer related * processing. */ if (dd->ipath_flags & IPATH_GPIO_INTR) { ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, 0x2074076542310ULL); /* Enable GPIO bit 2 interrupt */ dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT); ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, dd->ipath_gpio_mask); } init_timer(&dd->verbs_timer); dd->verbs_timer.function = __verbs_timer; dd->verbs_timer.data = (unsigned long)dd; dd->verbs_timer.expires = jiffies + 1; add_timer(&dd->verbs_timer); return 0; } static int disable_timer(struct ipath_devdata *dd) { /* Disable GPIO bit 2 interrupt */ if (dd->ipath_flags & IPATH_GPIO_INTR) { /* Disable GPIO bit 2 interrupt */ dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT)); ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, dd->ipath_gpio_mask); /* * We might want to undo changes to debugportselect, * but how? */ } del_timer_sync(&dd->verbs_timer); return 0; } /** * ipath_register_ib_device - register our device with the infiniband core * @dd: the device data structure * Return the allocated ipath_ibdev pointer or NULL on error. */ int ipath_register_ib_device(struct ipath_devdata *dd) { struct ipath_verbs_counters cntrs; struct ipath_ibdev *idev; struct ib_device *dev; struct ipath_verbs_txreq *tx; unsigned i; int ret; idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev); if (idev == NULL) { ret = -ENOMEM; goto bail; } dev = &idev->ibdev; if (dd->ipath_sdma_descq_cnt) { tx = kmalloc(dd->ipath_sdma_descq_cnt * sizeof *tx, GFP_KERNEL); if (tx == NULL) { ret = -ENOMEM; goto err_tx; } } else tx = NULL; idev->txreq_bufs = tx; /* Only need to initialize non-zero fields. */ spin_lock_init(&idev->n_pds_lock); spin_lock_init(&idev->n_ahs_lock); spin_lock_init(&idev->n_cqs_lock); spin_lock_init(&idev->n_qps_lock); spin_lock_init(&idev->n_srqs_lock); spin_lock_init(&idev->n_mcast_grps_lock); spin_lock_init(&idev->qp_table.lock); spin_lock_init(&idev->lk_table.lock); idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE); /* Set the prefix to the default value (see ch. 4.1.1) */ idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL); ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size); if (ret) goto err_qp; /* * The top ib_ipath_lkey_table_size bits are used to index the * table. The lower 8 bits can be owned by the user (copied from * the LKEY). The remaining bits act as a generation number or tag. */ idev->lk_table.max = 1 << ib_ipath_lkey_table_size; idev->lk_table.table = kzalloc(idev->lk_table.max * sizeof(*idev->lk_table.table), GFP_KERNEL); if (idev->lk_table.table == NULL) { ret = -ENOMEM; goto err_lk; } INIT_LIST_HEAD(&idev->pending_mmaps); spin_lock_init(&idev->pending_lock); idev->mmap_offset = PAGE_SIZE; spin_lock_init(&idev->mmap_offset_lock); INIT_LIST_HEAD(&idev->pending[0]); INIT_LIST_HEAD(&idev->pending[1]); INIT_LIST_HEAD(&idev->pending[2]); INIT_LIST_HEAD(&idev->piowait); INIT_LIST_HEAD(&idev->rnrwait); INIT_LIST_HEAD(&idev->txreq_free); idev->pending_index = 0; idev->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP; if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY) idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; /* Snapshot current HW counters to "clear" them. */ ipath_get_counters(dd, &cntrs); idev->z_symbol_error_counter = cntrs.symbol_error_counter; idev->z_link_error_recovery_counter = cntrs.link_error_recovery_counter; idev->z_link_downed_counter = cntrs.link_downed_counter; idev->z_port_rcv_errors = cntrs.port_rcv_errors; idev->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; idev->z_port_xmit_discards = cntrs.port_xmit_discards; idev->z_port_xmit_data = cntrs.port_xmit_data; idev->z_port_rcv_data = cntrs.port_rcv_data; idev->z_port_xmit_packets = cntrs.port_xmit_packets; idev->z_port_rcv_packets = cntrs.port_rcv_packets; idev->z_local_link_integrity_errors = cntrs.local_link_integrity_errors; idev->z_excessive_buffer_overrun_errors = cntrs.excessive_buffer_overrun_errors; idev->z_vl15_dropped = cntrs.vl15_dropped; for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++) list_add(&tx->txreq.list, &idev->txreq_free); /* * The system image GUID is supposed to be the same for all * IB HCAs in a single system but since there can be other * device types in the system, we can't be sure this is unique. */ if (!sys_image_guid) sys_image_guid = dd->ipath_guid; idev->sys_image_guid = sys_image_guid; idev->ib_unit = dd->ipath_unit; idev->dd = dd; strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); dev->owner = THIS_MODULE; dev->node_guid = dd->ipath_guid; dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; dev->uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | (1ull << IB_USER_VERBS_CMD_CREATE_AH) | (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | (1ull << IB_USER_VERBS_CMD_QUERY_AH) | (1ull << IB_USER_VERBS_CMD_REG_MR) | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | (1ull << IB_USER_VERBS_CMD_POLL_CQ) | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_POST_SEND) | (1ull << IB_USER_VERBS_CMD_POST_RECV) | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); dev->node_type = RDMA_NODE_IB_CA; dev->phys_port_cnt = 1; dev->num_comp_vectors = 1; dev->dma_device = &dd->pcidev->dev; dev->query_device = ipath_query_device; dev->modify_device = ipath_modify_device; dev->query_port = ipath_query_port; dev->modify_port = ipath_modify_port; dev->query_pkey = ipath_query_pkey; dev->query_gid = ipath_query_gid; dev->alloc_ucontext = ipath_alloc_ucontext; dev->dealloc_ucontext = ipath_dealloc_ucontext; dev->alloc_pd = ipath_alloc_pd; dev->dealloc_pd = ipath_dealloc_pd; dev->create_ah = ipath_create_ah; dev->destroy_ah = ipath_destroy_ah; dev->query_ah = ipath_query_ah; dev->create_srq = ipath_create_srq; dev->modify_srq = ipath_modify_srq; dev->query_srq = ipath_query_srq; dev->destroy_srq = ipath_destroy_srq; dev->create_qp = ipath_create_qp; dev->modify_qp = ipath_modify_qp; dev->query_qp = ipath_query_qp; dev->destroy_qp = ipath_destroy_qp; dev->post_send = ipath_post_send; dev->post_recv = ipath_post_receive; dev->post_srq_recv = ipath_post_srq_receive; dev->create_cq = ipath_create_cq; dev->destroy_cq = ipath_destroy_cq; dev->resize_cq = ipath_resize_cq; dev->poll_cq = ipath_poll_cq; dev->req_notify_cq = ipath_req_notify_cq; dev->get_dma_mr = ipath_get_dma_mr; dev->reg_phys_mr = ipath_reg_phys_mr; dev->reg_user_mr = ipath_reg_user_mr; dev->dereg_mr = ipath_dereg_mr; dev->alloc_fmr = ipath_alloc_fmr; dev->map_phys_fmr = ipath_map_phys_fmr; dev->unmap_fmr = ipath_unmap_fmr; dev->dealloc_fmr = ipath_dealloc_fmr; dev->attach_mcast = ipath_multicast_attach; dev->detach_mcast = ipath_multicast_detach; dev->process_mad = ipath_process_mad; dev->mmap = ipath_mmap; dev->dma_ops = &ipath_dma_mapping_ops; snprintf(dev->node_desc, sizeof(dev->node_desc), IPATH_IDSTR " %s", init_utsname()->nodename); ret = ib_register_device(dev, NULL); if (ret) goto err_reg; if (ipath_verbs_register_sysfs(dev)) goto err_class; enable_timer(dd); goto bail; err_class: ib_unregister_device(dev); err_reg: kfree(idev->lk_table.table); err_lk: kfree(idev->qp_table.table); err_qp: kfree(idev->txreq_bufs); err_tx: ib_dealloc_device(dev); ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret); idev = NULL; bail: dd->verbs_dev = idev; return ret; } void ipath_unregister_ib_device(struct ipath_ibdev *dev) { struct ib_device *ibdev = &dev->ibdev; u32 qps_inuse; ib_unregister_device(ibdev); disable_timer(dev->dd); if (!list_empty(&dev->pending[0]) || !list_empty(&dev->pending[1]) || !list_empty(&dev->pending[2])) ipath_dev_err(dev->dd, "pending list not empty!\n"); if (!list_empty(&dev->piowait)) ipath_dev_err(dev->dd, "piowait list not empty!\n"); if (!list_empty(&dev->rnrwait)) ipath_dev_err(dev->dd, "rnrwait list not empty!\n"); if (!ipath_mcast_tree_empty()) ipath_dev_err(dev->dd, "multicast table memory leak!\n"); /* * Note that ipath_unregister_ib_device() can be called before all * the QPs are destroyed! */ qps_inuse = ipath_free_all_qps(&dev->qp_table); if (qps_inuse) ipath_dev_err(dev->dd, "QP memory leak! %u still in use\n", qps_inuse); kfree(dev->qp_table.table); kfree(dev->lk_table.table); kfree(dev->txreq_bufs); ib_dealloc_device(ibdev); } static ssize_t show_rev(struct device *device, struct device_attribute *attr, char *buf) { struct ipath_ibdev *dev = container_of(device, struct ipath_ibdev, ibdev.dev); return sprintf(buf, "%x\n", dev->dd->ipath_pcirev); } static ssize_t show_hca(struct device *device, struct device_attribute *attr, char *buf) { struct ipath_ibdev *dev = container_of(device, struct ipath_ibdev, ibdev.dev); int ret; ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128); if (ret < 0) goto bail; strcat(buf, "\n"); ret = strlen(buf); bail: return ret; } static ssize_t show_stats(struct device *device, struct device_attribute *attr, char *buf) { struct ipath_ibdev *dev = container_of(device, struct ipath_ibdev, ibdev.dev); int i; int len; len = sprintf(buf, "RC resends %d\n" "RC no QACK %d\n" "RC ACKs %d\n" "RC SEQ NAKs %d\n" "RC RDMA seq %d\n" "RC RNR NAKs %d\n" "RC OTH NAKs %d\n" "RC timeouts %d\n" "RC RDMA dup %d\n" "piobuf wait %d\n" "unaligned %d\n" "PKT drops %d\n" "WQE errs %d\n", dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, dev->n_other_naks, dev->n_timeouts, dev->n_rdma_dup_busy, dev->n_piowait, dev->n_unaligned, dev->n_pkt_drops, dev->n_wqe_errs); for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { const struct ipath_opcode_stats *si = &dev->opstats[i]; if (!si->n_packets && !si->n_bytes) continue; len += sprintf(buf + len, "%02x %llu/%llu\n", i, (unsigned long long) si->n_packets, (unsigned long long) si->n_bytes); } return len; } static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL); static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL); static struct device_attribute *ipath_class_attributes[] = { &dev_attr_hw_rev, &dev_attr_hca_type, &dev_attr_board_id, &dev_attr_stats }; static int ipath_verbs_register_sysfs(struct ib_device *dev) { int i; int ret; for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) if (device_create_file(&dev->dev, ipath_class_attributes[i])) { ret = 1; goto bail; } ret = 0; bail: return ret; }
gpl-2.0
blackbox87/zte_skate_gb_kernel
drivers/media/dvb/frontends/tua6100.c
4202
5405
/** * Driver for Infineon tua6100 pll. * * (c) 2006 Andrew de Quincey * * Based on code found in budget-av.c, which has the following: * Compiled from various sources by Michael Hunold <michael@mihu.de> * * CI interface support (c) 2004 Olivier Gournet <ogournet@anevia.com> & * Andrew de Quincey <adq_dvb@lidskialf.net> * * Copyright (C) 2002 Ralph Metzler <rjkm@metzlerbros.de> * * Copyright (C) 1999-2002 Ralph Metzler * & Marcus Metzler for convergence integrated media GmbH * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/dvb/frontend.h> #include <asm/types.h> #include "tua6100.h" struct tua6100_priv { /* i2c details */ int i2c_address; struct i2c_adapter *i2c; u32 frequency; }; static int tua6100_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static int tua6100_sleep(struct dvb_frontend *fe) { struct tua6100_priv *priv = fe->tuner_priv; int ret; u8 reg0[] = { 0x00, 0x00 }; struct i2c_msg msg = { .addr = priv->i2c_address, .flags = 0, .buf = reg0, .len = 2 }; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if ((ret = i2c_transfer (priv->i2c, &msg, 1)) != 1) { printk("%s: i2c error\n", __func__); } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); return (ret == 1) ? 0 : ret; } static int tua6100_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) { struct tua6100_priv *priv = fe->tuner_priv; u32 div; u32 prediv; u8 reg0[] = { 0x00, 0x00 }; u8 reg1[] = { 0x01, 0x00, 0x00, 0x00 }; u8 reg2[] = { 0x02, 0x00, 0x00 }; struct i2c_msg msg0 = { .addr = priv->i2c_address, .flags = 0, .buf = reg0, .len = 2 }; struct i2c_msg msg1 = { .addr = priv->i2c_address, .flags = 0, .buf = reg1, .len = 4 }; struct i2c_msg msg2 = { .addr = priv->i2c_address, .flags = 0, .buf = reg2, .len = 3 }; #define _R 4 #define _P 32 #define _ri 4000000 // setup register 0 if (params->frequency < 2000000) { reg0[1] = 0x03; } else { reg0[1] = 0x07; } // setup register 1 if (params->frequency < 1630000) { reg1[1] = 0x2c; } else { reg1[1] = 0x0c; } if (_P == 64) reg1[1] |= 0x40; if (params->frequency >= 1525000) reg1[1] |= 0x80; // register 2 reg2[1] = (_R >> 8) & 0x03; reg2[2] = _R; if (params->frequency < 1455000) { reg2[1] |= 0x1c; } else if (params->frequency < 1630000) { reg2[1] |= 0x0c; } else { reg2[1] |= 0x1c; } // The N divisor ratio (note: params->frequency is in kHz, but we need it in Hz) prediv = (params->frequency * _R) / (_ri / 1000); div = prediv / _P; reg1[1] |= (div >> 9) & 0x03; reg1[2] = div >> 1; reg1[3] = (div << 7); priv->frequency = ((div * _P) * (_ri / 1000)) / _R; // Finally, calculate and store the value for A reg1[3] |= (prediv - (div*_P)) & 0x7f; #undef _R #undef _P #undef _ri if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer(priv->i2c, &msg0, 1) != 1) return -EIO; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer(priv->i2c, &msg2, 1) != 1) return -EIO; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer(priv->i2c, &msg1, 1) != 1) return -EIO; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); return 0; } static int tua6100_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct tua6100_priv *priv = fe->tuner_priv; *frequency = priv->frequency; return 0; } static struct dvb_tuner_ops tua6100_tuner_ops = { .info = { .name = "Infineon TUA6100", .frequency_min = 950000, .frequency_max = 2150000, .frequency_step = 1000, }, .release = tua6100_release, .sleep = tua6100_sleep, .set_params = tua6100_set_params, .get_frequency = tua6100_get_frequency, }; struct dvb_frontend *tua6100_attach(struct dvb_frontend *fe, int addr, struct i2c_adapter *i2c) { struct tua6100_priv *priv = NULL; u8 b1 [] = { 0x80 }; u8 b2 [] = { 0x00 }; struct i2c_msg msg [] = { { .addr = addr, .flags = 0, .buf = b1, .len = 1 }, { .addr = addr, .flags = I2C_M_RD, .buf = b2, .len = 1 } }; int ret; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); ret = i2c_transfer (i2c, msg, 2); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); if (ret != 2) return NULL; priv = kzalloc(sizeof(struct tua6100_priv), GFP_KERNEL); if (priv == NULL) return NULL; priv->i2c_address = addr; priv->i2c = i2c; memcpy(&fe->ops.tuner_ops, &tua6100_tuner_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = priv; return fe; } EXPORT_SYMBOL(tua6100_attach); MODULE_DESCRIPTION("DVB tua6100 driver"); MODULE_AUTHOR("Andrew de Quincey"); MODULE_LICENSE("GPL");
gpl-2.0
SciAps/android-dm3730-kernel
drivers/parport/parport_mfc3.c
4202
10989
/* Low-level parallel port routines for the Multiface 3 card * * Author: Joerg Dorchain <joerg@dorchain.net> * * (C) The elitist m68k Users(TM) * * based on the existing parport_amiga and lp_mfc * * * From the MFC3 documentation: * * Miscellaneous PIA Details * ------------------------- * * The two open-drain interrupt outputs /IRQA and /IRQB are routed to * /INT2 of the Z2 bus. * * The CPU data bus of the PIA (D0-D7) is connected to D8-D15 on the Z2 * bus. This means that any PIA registers are accessed at even addresses. * * Centronics Pin Connections for the PIA * -------------------------------------- * * The following table shows the connections between the PIA and the * Centronics interface connector. These connections implement a single, but * very complete, Centronics type interface. The Pin column gives the pin * numbers of the PIA. The Centronics pin numbers can be found in the section * "Parallel Connectors". * * * Pin | PIA | Dir | Centronics Names * -------+-----+-----+--------------------------------------------------------- * 19 | CB2 | --> | /STROBE (aka /DRDY) * 10-17 | PBx | <-> | DATA0 - DATA7 * 18 | CB1 | <-- | /ACK * 40 | CA1 | <-- | BUSY * 3 | PA1 | <-- | PAPER-OUT (aka POUT) * 4 | PA2 | <-- | SELECTED (aka SEL) * 9 | PA7 | --> | /INIT (aka /RESET or /INPUT-PRIME) * 6 | PA4 | <-- | /ERROR (aka /FAULT) * 7 | PA5 | --> | DIR (aka /SELECT-IN) * 8 | PA6 | --> | /AUTO-FEED-XT * 39 | CA2 | --> | open * 5 | PA3 | <-- | /ACK (same as CB1!) * 2 | PA0 | <-- | BUSY (same as CA1!) * -------+-----+-----+--------------------------------------------------------- * * Should be enough to understand some of the driver. * * Per convention for normal use the port registers are visible. * If you need the data direction registers, restore the value in the * control register. */ #include "multiface.h" #include <linux/module.h> #include <linux/init.h> #include <linux/parport.h> #include <linux/delay.h> #include <linux/mc6821.h> #include <linux/zorro.h> #include <linux/interrupt.h> #include <asm/setup.h> #include <asm/amigahw.h> #include <asm/irq.h> #include <asm/amigaints.h> /* Maximum Number of Cards supported */ #define MAX_MFC 5 #undef DEBUG #ifdef DEBUG #define DPRINTK printk #else static inline int DPRINTK(void *nothing, ...) {return 0;} #endif static struct parport *this_port[MAX_MFC] = {NULL, }; static volatile int dummy; /* for trigger readds */ #define pia(dev) ((struct pia *)(dev->base)) static struct parport_operations pp_mfc3_ops; static void mfc3_write_data(struct parport *p, unsigned char data) { DPRINTK(KERN_DEBUG "write_data %c\n",data); dummy = pia(p)->pprb; /* clears irq bit */ /* Triggers also /STROBE.*/ pia(p)->pprb = data; } static unsigned char mfc3_read_data(struct parport *p) { /* clears interrupt bit. Triggers also /STROBE. */ return pia(p)->pprb; } static unsigned char control_pc_to_mfc3(unsigned char control) { unsigned char ret = 32|64; if (control & PARPORT_CONTROL_SELECT) /* XXX: What is SELECP? */ ret &= ~32; /* /SELECT_IN */ if (control & PARPORT_CONTROL_INIT) /* INITP */ ret |= 128; if (control & PARPORT_CONTROL_AUTOFD) /* AUTOLF */ ret &= ~64; if (control & PARPORT_CONTROL_STROBE) /* Strobe */ /* Handled directly by hardware */; return ret; } static unsigned char control_mfc3_to_pc(unsigned char control) { unsigned char ret = PARPORT_CONTROL_STROBE | PARPORT_CONTROL_AUTOFD | PARPORT_CONTROL_SELECT; if (control & 128) /* /INITP */ ret |= PARPORT_CONTROL_INIT; if (control & 64) /* /AUTOLF */ ret &= ~PARPORT_CONTROL_AUTOFD; if (control & 32) /* /SELECT_IN */ ret &= ~PARPORT_CONTROL_SELECT; return ret; } static void mfc3_write_control(struct parport *p, unsigned char control) { DPRINTK(KERN_DEBUG "write_control %02x\n",control); pia(p)->ppra = (pia(p)->ppra & 0x1f) | control_pc_to_mfc3(control); } static unsigned char mfc3_read_control( struct parport *p) { DPRINTK(KERN_DEBUG "read_control \n"); return control_mfc3_to_pc(pia(p)->ppra & 0xe0); } static unsigned char mfc3_frob_control( struct parport *p, unsigned char mask, unsigned char val) { unsigned char old; DPRINTK(KERN_DEBUG "frob_control mask %02x, value %02x\n",mask,val); old = mfc3_read_control(p); mfc3_write_control(p, (old & ~mask) ^ val); return old; } #if 0 /* currently unused */ static unsigned char status_pc_to_mfc3(unsigned char status) { unsigned char ret = 1; if (status & PARPORT_STATUS_BUSY) /* Busy */ ret &= ~1; if (status & PARPORT_STATUS_ACK) /* Ack */ ret |= 8; if (status & PARPORT_STATUS_PAPEROUT) /* PaperOut */ ret |= 2; if (status & PARPORT_STATUS_SELECT) /* select */ ret |= 4; if (status & PARPORT_STATUS_ERROR) /* error */ ret |= 16; return ret; } #endif static unsigned char status_mfc3_to_pc(unsigned char status) { unsigned char ret = PARPORT_STATUS_BUSY; if (status & 1) /* Busy */ ret &= ~PARPORT_STATUS_BUSY; if (status & 2) /* PaperOut */ ret |= PARPORT_STATUS_PAPEROUT; if (status & 4) /* Selected */ ret |= PARPORT_STATUS_SELECT; if (status & 8) /* Ack */ ret |= PARPORT_STATUS_ACK; if (status & 16) /* /ERROR */ ret |= PARPORT_STATUS_ERROR; return ret; } #if 0 /* currently unused */ static void mfc3_write_status( struct parport *p, unsigned char status) { DPRINTK(KERN_DEBUG "write_status %02x\n",status); pia(p)->ppra = (pia(p)->ppra & 0xe0) | status_pc_to_mfc3(status); } #endif static unsigned char mfc3_read_status(struct parport *p) { unsigned char status; status = status_mfc3_to_pc(pia(p)->ppra & 0x1f); DPRINTK(KERN_DEBUG "read_status %02x\n", status); return status; } #if 0 /* currently unused */ static void mfc3_change_mode( struct parport *p, int m) { /* XXX: This port only has one mode, and I am not sure about the corresponding PC-style mode*/ } #endif static int use_cnt = 0; static irqreturn_t mfc3_interrupt(int irq, void *dev_id) { int i; for( i = 0; i < MAX_MFC; i++) if (this_port[i] != NULL) if (pia(this_port[i])->crb & 128) { /* Board caused interrupt */ dummy = pia(this_port[i])->pprb; /* clear irq bit */ parport_generic_irq(this_port[i]); } return IRQ_HANDLED; } static void mfc3_enable_irq(struct parport *p) { pia(p)->crb |= PIA_C1_ENABLE_IRQ; } static void mfc3_disable_irq(struct parport *p) { pia(p)->crb &= ~PIA_C1_ENABLE_IRQ; } static void mfc3_data_forward(struct parport *p) { DPRINTK(KERN_DEBUG "forward\n"); pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */ pia(p)->pddrb = 255; /* all pins output */ pia(p)->crb |= PIA_DDR; /* make data register visible - default */ } static void mfc3_data_reverse(struct parport *p) { DPRINTK(KERN_DEBUG "reverse\n"); pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */ pia(p)->pddrb = 0; /* all pins input */ pia(p)->crb |= PIA_DDR; /* make data register visible - default */ } static void mfc3_init_state(struct pardevice *dev, struct parport_state *s) { s->u.amiga.data = 0; s->u.amiga.datadir = 255; s->u.amiga.status = 0; s->u.amiga.statusdir = 0xe0; } static void mfc3_save_state(struct parport *p, struct parport_state *s) { s->u.amiga.data = pia(p)->pprb; pia(p)->crb &= ~PIA_DDR; s->u.amiga.datadir = pia(p)->pddrb; pia(p)->crb |= PIA_DDR; s->u.amiga.status = pia(p)->ppra; pia(p)->cra &= ~PIA_DDR; s->u.amiga.statusdir = pia(p)->pddrb; pia(p)->cra |= PIA_DDR; } static void mfc3_restore_state(struct parport *p, struct parport_state *s) { pia(p)->pprb = s->u.amiga.data; pia(p)->crb &= ~PIA_DDR; pia(p)->pddrb = s->u.amiga.datadir; pia(p)->crb |= PIA_DDR; pia(p)->ppra = s->u.amiga.status; pia(p)->cra &= ~PIA_DDR; pia(p)->pddrb = s->u.amiga.statusdir; pia(p)->cra |= PIA_DDR; } static struct parport_operations pp_mfc3_ops = { .write_data = mfc3_write_data, .read_data = mfc3_read_data, .write_control = mfc3_write_control, .read_control = mfc3_read_control, .frob_control = mfc3_frob_control, .read_status = mfc3_read_status, .enable_irq = mfc3_enable_irq, .disable_irq = mfc3_disable_irq, .data_forward = mfc3_data_forward, .data_reverse = mfc3_data_reverse, .init_state = mfc3_init_state, .save_state = mfc3_save_state, .restore_state = mfc3_restore_state, .epp_write_data = parport_ieee1284_epp_write_data, .epp_read_data = parport_ieee1284_epp_read_data, .epp_write_addr = parport_ieee1284_epp_write_addr, .epp_read_addr = parport_ieee1284_epp_read_addr, .ecp_write_data = parport_ieee1284_ecp_write_data, .ecp_read_data = parport_ieee1284_ecp_read_data, .ecp_write_addr = parport_ieee1284_ecp_write_addr, .compat_write_data = parport_ieee1284_write_compat, .nibble_read_data = parport_ieee1284_read_nibble, .byte_read_data = parport_ieee1284_read_byte, .owner = THIS_MODULE, }; /* ----------- Initialisation code --------------------------------- */ static int __init parport_mfc3_init(void) { struct parport *p; int pias = 0; struct pia *pp; struct zorro_dev *z = NULL; if (!MACH_IS_AMIGA) return -ENODEV; while ((z = zorro_find_device(ZORRO_PROD_BSC_MULTIFACE_III, z))) { unsigned long piabase = z->resource.start+PIABASE; if (!request_mem_region(piabase, sizeof(struct pia), "PIA")) continue; pp = (struct pia *)ZTWO_VADDR(piabase); pp->crb = 0; pp->pddrb = 255; /* all data pins output */ pp->crb = PIA_DDR|32|8; dummy = pp->pddrb; /* reading clears interrupt */ pp->cra = 0; pp->pddra = 0xe0; /* /RESET, /DIR ,/AUTO-FEED output */ pp->cra = PIA_DDR; pp->ppra = 0; /* reset printer */ udelay(10); pp->ppra = 128; p = parport_register_port((unsigned long)pp, IRQ_AMIGA_PORTS, PARPORT_DMA_NONE, &pp_mfc3_ops); if (!p) goto out_port; if (p->irq != PARPORT_IRQ_NONE) { if (use_cnt++ == 0) if (request_irq(IRQ_AMIGA_PORTS, mfc3_interrupt, IRQF_SHARED, p->name, &pp_mfc3_ops)) goto out_irq; } p->dev = &z->dev; this_port[pias++] = p; printk(KERN_INFO "%s: Multiface III port using irq\n", p->name); /* XXX: set operating mode */ p->private_data = (void *)piabase; parport_announce_port (p); if (pias >= MAX_MFC) break; continue; out_irq: parport_put_port(p); out_port: release_mem_region(piabase, sizeof(struct pia)); } return pias ? 0 : -ENODEV; } static void __exit parport_mfc3_exit(void) { int i; for (i = 0; i < MAX_MFC; i++) { if (!this_port[i]) continue; parport_remove_port(this_port[i]); if (this_port[i]->irq != PARPORT_IRQ_NONE) { if (--use_cnt == 0) free_irq(IRQ_AMIGA_PORTS, &pp_mfc3_ops); } release_mem_region(ZTWO_PADDR(this_port[i]->private_data), sizeof(struct pia)); parport_put_port(this_port[i]); } } MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>"); MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Paralllel Port"); MODULE_SUPPORTED_DEVICE("Multiface 3 Parallel Port"); MODULE_LICENSE("GPL"); module_init(parport_mfc3_init) module_exit(parport_mfc3_exit)
gpl-2.0
sinoory/linux3.19
lib/mpi/mpi-pow.c
4202
8464
/* mpi-pow.c - MPI functions * Copyright (C) 1994, 1996, 1998, 2000 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include <linux/string.h> #include "mpi-internal.h" #include "longlong.h" /**************** * RES = BASE ^ EXP mod MOD */ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) { mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL; mpi_ptr_t xp_marker = NULL; mpi_ptr_t tspace = NULL; mpi_ptr_t rp, ep, mp, bp; mpi_size_t esize, msize, bsize, rsize; int esign, msign, bsign, rsign; mpi_size_t size; int mod_shift_cnt; int negative_result; int assign_rp = 0; mpi_size_t tsize = 0; /* to avoid compiler warning */ /* fixme: we should check that the warning is void */ int rc = -ENOMEM; esize = exp->nlimbs; msize = mod->nlimbs; size = 2 * msize; esign = exp->sign; msign = mod->sign; rp = res->d; ep = exp->d; if (!msize) return -EINVAL; if (!esize) { /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 * depending on if MOD equals 1. */ rp[0] = 1; res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; res->sign = 0; goto leave; } /* Normalize MOD (i.e. make its most significant bit set) as required by * mpn_divrem. This will make the intermediate values in the calculation * slightly larger, but the correct result is obtained after a final * reduction using the original MOD value. */ mp = mp_marker = mpi_alloc_limb_space(msize); if (!mp) goto enomem; mod_shift_cnt = count_leading_zeros(mod->d[msize - 1]); if (mod_shift_cnt) mpihelp_lshift(mp, mod->d, msize, mod_shift_cnt); else MPN_COPY(mp, mod->d, msize); bsize = base->nlimbs; bsign = base->sign; if (bsize > msize) { /* The base is larger than the module. Reduce it. */ /* Allocate (BSIZE + 1) with space for remainder and quotient. * (The quotient is (bsize - msize + 1) limbs.) */ bp = bp_marker = mpi_alloc_limb_space(bsize + 1); if (!bp) goto enomem; MPN_COPY(bp, base->d, bsize); /* We don't care about the quotient, store it above the remainder, * at BP + MSIZE. */ mpihelp_divrem(bp + msize, 0, bp, bsize, mp, msize); bsize = msize; /* Canonicalize the base, since we are going to multiply with it * quite a few times. */ MPN_NORMALIZE(bp, bsize); } else bp = base->d; if (!bsize) { res->nlimbs = 0; res->sign = 0; goto leave; } if (res->alloced < size) { /* We have to allocate more space for RES. If any of the input * parameters are identical to RES, defer deallocation of the old * space. */ if (rp == ep || rp == mp || rp == bp) { rp = mpi_alloc_limb_space(size); if (!rp) goto enomem; assign_rp = 1; } else { if (mpi_resize(res, size) < 0) goto enomem; rp = res->d; } } else { /* Make BASE, EXP and MOD not overlap with RES. */ if (rp == bp) { /* RES and BASE are identical. Allocate temp. space for BASE. */ BUG_ON(bp_marker); bp = bp_marker = mpi_alloc_limb_space(bsize); if (!bp) goto enomem; MPN_COPY(bp, rp, bsize); } if (rp == ep) { /* RES and EXP are identical. Allocate temp. space for EXP. */ ep = ep_marker = mpi_alloc_limb_space(esize); if (!ep) goto enomem; MPN_COPY(ep, rp, esize); } if (rp == mp) { /* RES and MOD are identical. Allocate temporary space for MOD. */ BUG_ON(mp_marker); mp = mp_marker = mpi_alloc_limb_space(msize); if (!mp) goto enomem; MPN_COPY(mp, rp, msize); } } MPN_COPY(rp, bp, bsize); rsize = bsize; rsign = bsign; { mpi_size_t i; mpi_ptr_t xp; int c; mpi_limb_t e; mpi_limb_t carry_limb; struct karatsuba_ctx karactx; xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1)); if (!xp) goto enomem; memset(&karactx, 0, sizeof karactx); negative_result = (ep[0] & 1) && base->sign; i = esize - 1; e = ep[i]; c = count_leading_zeros(e); e = (e << c) << 1; /* shift the exp bits to the left, lose msb */ c = BITS_PER_MPI_LIMB - 1 - c; /* Main loop. * * Make the result be pointed to alternately by XP and RP. This * helps us avoid block copying, which would otherwise be necessary * with the overlap restrictions of mpihelp_divmod. With 50% probability * the result after this loop will be in the area originally pointed * by RP (==RES->d), and with 50% probability in the area originally * pointed to by XP. */ for (;;) { while (c) { mpi_ptr_t tp; mpi_size_t xsize; /*if (mpihelp_mul_n(xp, rp, rp, rsize) < 0) goto enomem */ if (rsize < KARATSUBA_THRESHOLD) mpih_sqr_n_basecase(xp, rp, rsize); else { if (!tspace) { tsize = 2 * rsize; tspace = mpi_alloc_limb_space(tsize); if (!tspace) goto enomem; } else if (tsize < (2 * rsize)) { mpi_free_limb_space(tspace); tsize = 2 * rsize; tspace = mpi_alloc_limb_space(tsize); if (!tspace) goto enomem; } mpih_sqr_n(xp, rp, rsize, tspace); } xsize = 2 * rsize; if (xsize > msize) { mpihelp_divrem(xp + msize, 0, xp, xsize, mp, msize); xsize = msize; } tp = rp; rp = xp; xp = tp; rsize = xsize; if ((mpi_limb_signed_t) e < 0) { /*mpihelp_mul( xp, rp, rsize, bp, bsize ); */ if (bsize < KARATSUBA_THRESHOLD) { mpi_limb_t tmp; if (mpihelp_mul (xp, rp, rsize, bp, bsize, &tmp) < 0) goto enomem; } else { if (mpihelp_mul_karatsuba_case (xp, rp, rsize, bp, bsize, &karactx) < 0) goto enomem; } xsize = rsize + bsize; if (xsize > msize) { mpihelp_divrem(xp + msize, 0, xp, xsize, mp, msize); xsize = msize; } tp = rp; rp = xp; xp = tp; rsize = xsize; } e <<= 1; c--; } i--; if (i < 0) break; e = ep[i]; c = BITS_PER_MPI_LIMB; } /* We shifted MOD, the modulo reduction argument, left MOD_SHIFT_CNT * steps. Adjust the result by reducing it with the original MOD. * * Also make sure the result is put in RES->d (where it already * might be, see above). */ if (mod_shift_cnt) { carry_limb = mpihelp_lshift(res->d, rp, rsize, mod_shift_cnt); rp = res->d; if (carry_limb) { rp[rsize] = carry_limb; rsize++; } } else { MPN_COPY(res->d, rp, rsize); rp = res->d; } if (rsize >= msize) { mpihelp_divrem(rp + msize, 0, rp, rsize, mp, msize); rsize = msize; } /* Remove any leading zero words from the result. */ if (mod_shift_cnt) mpihelp_rshift(rp, rp, rsize, mod_shift_cnt); MPN_NORMALIZE(rp, rsize); mpihelp_release_karatsuba_ctx(&karactx); } if (negative_result && rsize) { if (mod_shift_cnt) mpihelp_rshift(mp, mp, msize, mod_shift_cnt); mpihelp_sub(rp, mp, msize, rp, rsize); rsize = msize; rsign = msign; MPN_NORMALIZE(rp, rsize); } res->nlimbs = rsize; res->sign = rsign; leave: rc = 0; enomem: if (assign_rp) mpi_assign_limb_space(res, rp, size); if (mp_marker) mpi_free_limb_space(mp_marker); if (bp_marker) mpi_free_limb_space(bp_marker); if (ep_marker) mpi_free_limb_space(ep_marker); if (xp_marker) mpi_free_limb_space(xp_marker); if (tspace) mpi_free_limb_space(tspace); return rc; } EXPORT_SYMBOL_GPL(mpi_powm);
gpl-2.0
hobit26/h3_linux
arch/tile/kernel/signal.c
4458
12642
/* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/personality.h> #include <linux/suspend.h> #include <linux/ptrace.h> #include <linux/elf.h> #include <linux/compat.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include <asm/processor.h> #include <asm/ucontext.h> #include <asm/sigframe.h> #include <asm/syscalls.h> #include <arch/interrupts.h> #define DEBUG_SIG 0 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss, stack_t __user *, uoss, struct pt_regs *, regs) { return do_sigaltstack(uss, uoss, regs->sp); } /* * Do a signal return; undo the signal stack. */ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; int i; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; /* * Enforce that sigcontext is like pt_regs, and doesn't mess * up our stack alignment rules. */ BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs)); BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0); for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) err |= __get_user(regs->regs[i], &sc->gregs[i]); /* Ensure that the PL is always set to USER_PL. */ regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1)); regs->faultnum = INT_SWINT_1_SIGRETURN; return err; } void signal_fault(const char *type, struct pt_regs *regs, void __user *frame, int sig) { trace_unhandled_signal(type, regs, (unsigned long)frame, SIGSEGV); force_sigsegv(sig, current); } /* The assembly shim for this function arranges to ignore the return value. */ SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs) { struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->sp); sigset_t set; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) goto badframe; return 0; badframe: signal_fault("bad sigreturn frame", regs, frame, 0); return 0; } /* * Set up a signal frame. */ int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) { int i, err = 0; for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) err |= __put_user(regs->regs[i], &sc->gregs[i]); return err; } /* * Determine which stack to use.. */ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) { unsigned long sp; /* Default to using normal stack */ sp = regs->sp; /* * If we are on the alternate signal stack and would overflow * it, don't. Return an always-bogus address instead so we * will die with SIGSEGV. */ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) return (void __user __force *)-1UL; /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { if (sas_ss_flags(sp) == 0) sp = current->sas_ss_sp + current->sas_ss_size; } sp -= frame_size; /* * Align the stack pointer according to the TILE ABI, * i.e. so that on function entry (sp & 15) == 0. */ sp &= -16UL; return (void __user *) sp; } static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { unsigned long restorer; struct rt_sigframe __user *frame; int err = 0; int usig; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; usig = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; /* Always write at least the signal number for the stack backtracer. */ if (ka->sa.sa_flags & SA_SIGINFO) { /* At sigreturn time, restore the callee-save registers too. */ err |= copy_siginfo_to_user(&frame->info, info); regs->flags |= PT_FLAGS_RESTORE_REGS; } else { err |= __put_user(info->si_signo, &frame->info.si_signo); } /* Create the ucontext. */ err |= __clear_user(&frame->save_area, sizeof(frame->save_area)); err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __put_user((void __user *)(current->sas_ss_sp), &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; restorer = VDSO_BASE; if (ka->sa.sa_flags & SA_RESTORER) restorer = (unsigned long) ka->sa.sa_restorer; /* * Set up registers for signal handler. * Registers that we don't modify keep the value they had from * user-space at the time we took the signal. * We always pass siginfo and mcontext, regardless of SA_SIGINFO, * since some things rely on this (e.g. glibc's debug/segfault.c). */ regs->pc = (unsigned long) ka->sa.sa_handler; regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ regs->sp = (unsigned long) frame; regs->lr = restorer; regs->regs[0] = (unsigned long) usig; regs->regs[1] = (unsigned long) &frame->info; regs->regs[2] = (unsigned long) &frame->uc; regs->flags |= PT_FLAGS_CALLER_SAVES; /* * Notify any tracer that was single-stepping it. * The tracer may want to single-step inside the * handler too. */ if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); return 0; give_sigsegv: signal_fault("bad setup frame", regs, frame, sig); return -EFAULT; } /* * OK, we're invoking a handler */ static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) { int ret; /* Are we from a system call? */ if (regs->faultnum == INT_SWINT_1) { /* If so, check system call restarting.. */ switch (regs->regs[0]) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->regs[0] = -EINTR; break; case -ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { regs->regs[0] = -EINTR; break; } /* fallthrough */ case -ERESTARTNOINTR: /* Reload caller-saves to restore r0..r5 and r10. */ regs->flags |= PT_FLAGS_CALLER_SAVES; regs->regs[0] = regs->orig_r0; regs->pc -= 8; } } /* Set up the stack frame */ #ifdef CONFIG_COMPAT if (is_compat_task()) ret = compat_setup_rt_frame(sig, ka, info, oldset, regs); else #endif ret = setup_rt_frame(sig, ka, info, oldset, regs); if (ret == 0) { /* This code is only called from system calls or from * the work_pending path in the return-to-user code, and * either way we can re-enable interrupts unconditionally. */ block_sigmask(ka, sig); } return ret; } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ void do_signal(struct pt_regs *regs) { siginfo_t info; int signr; struct k_sigaction ka; sigset_t *oldset; /* * i386 will check if we're coming from kernel mode and bail out * here. In my experience this just turns weird crashes into * weird spin-hangs. But if we find a case where this seems * helpful, we can reinstate the check on "!user_mode(regs)". */ if (current_thread_info()->status & TS_RESTORE_SIGMASK) oldset = &current->saved_sigmask; else oldset = &current->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { /* * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply * clear the TS_RESTORE_SIGMASK flag. */ current_thread_info()->status &= ~TS_RESTORE_SIGMASK; } goto done; } /* Did we come from a system call? */ if (regs->faultnum == INT_SWINT_1) { /* Restart the system call - no handlers present */ switch (regs->regs[0]) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: regs->flags |= PT_FLAGS_CALLER_SAVES; regs->regs[0] = regs->orig_r0; regs->pc -= 8; break; case -ERESTART_RESTARTBLOCK: regs->flags |= PT_FLAGS_CALLER_SAVES; regs->regs[TREG_SYSCALL_NR] = __NR_restart_syscall; regs->pc -= 8; break; } } /* If there's no signal to deliver, just put the saved sigmask back. */ if (current_thread_info()->status & TS_RESTORE_SIGMASK) { current_thread_info()->status &= ~TS_RESTORE_SIGMASK; sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } done: /* Avoid double syscall restart if there are nested signals. */ regs->faultnum = INT_SWINT_1_SIGRETURN; } int show_unhandled_signals = 1; static int __init crashinfo(char *str) { unsigned long val; const char *word; if (*str == '\0') val = 2; else if (*str != '=' || strict_strtoul(++str, 0, &val) != 0) return 0; show_unhandled_signals = val; switch (show_unhandled_signals) { case 0: word = "No"; break; case 1: word = "One-line"; break; default: word = "Detailed"; break; } pr_info("%s crash reports will be generated on the console\n", word); return 1; } __setup("crashinfo", crashinfo); static void dump_mem(void __user *address) { void __user *addr; enum { region_size = 256, bytes_per_line = 16 }; int i, j, k; int found_readable_mem = 0; pr_err("\n"); if (!access_ok(VERIFY_READ, address, 1)) { pr_err("Not dumping at address 0x%lx (kernel address)\n", (unsigned long)address); return; } addr = (void __user *) (((unsigned long)address & -bytes_per_line) - region_size/2); if (addr > address) addr = NULL; for (i = 0; i < region_size; addr += bytes_per_line, i += bytes_per_line) { unsigned char buf[bytes_per_line]; char line[100]; if (copy_from_user(buf, addr, bytes_per_line)) continue; if (!found_readable_mem) { pr_err("Dumping memory around address 0x%lx:\n", (unsigned long)address); found_readable_mem = 1; } j = sprintf(line, REGFMT":", (unsigned long)addr); for (k = 0; k < bytes_per_line; ++k) j += sprintf(&line[j], " %02x", buf[k]); pr_err("%s\n", line); } if (!found_readable_mem) pr_err("No readable memory around address 0x%lx\n", (unsigned long)address); } void trace_unhandled_signal(const char *type, struct pt_regs *regs, unsigned long address, int sig) { struct task_struct *tsk = current; if (show_unhandled_signals == 0) return; /* If the signal is handled, don't show it here. */ if (!is_global_init(tsk)) { void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; if (handler != SIG_IGN && handler != SIG_DFL) return; } /* Rate-limit the one-line output, not the detailed output. */ if (show_unhandled_signals <= 1 && !printk_ratelimit()) return; printk("%s%s[%d]: %s at %lx pc "REGFMT" signal %d", task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, tsk->comm, task_pid_nr(tsk), type, address, regs->pc, sig); print_vma_addr(KERN_CONT " in ", regs->pc); printk(KERN_CONT "\n"); if (show_unhandled_signals > 1) { switch (sig) { case SIGILL: case SIGFPE: case SIGSEGV: case SIGBUS: pr_err("User crash: signal %d," " trap %ld, address 0x%lx\n", sig, regs->faultnum, address); show_regs(regs); dump_mem((void __user *)address); break; default: pr_err("User crash: signal %d, trap %ld\n", sig, regs->faultnum); break; } } }
gpl-2.0
1nv4d3r5/android_kernel_oppo_find5
arch/arm/mach-omap2/powerdomains3xxx_data.c
4714
8056
/* * OMAP3 powerdomain definitions * * Copyright (C) 2007-2008, 2011 Texas Instruments, Inc. * Copyright (C) 2007-2011 Nokia Corporation * * Paul Walmsley, Jouni Högander * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/bug.h> #include <plat/cpu.h> #include "powerdomain.h" #include "powerdomains2xxx_3xxx_data.h" #include "prcm-common.h" #include "prm2xxx_3xxx.h" #include "prm-regbits-34xx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-34xx.h" /* * 34XX-specific powerdomains, dependencies */ /* * Powerdomains */ static struct powerdomain iva2_pwrdm = { .name = "iva2_pwrdm", .prcm_offs = OMAP3430_IVA2_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 4, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, [1] = PWRSTS_OFF_RET, [2] = PWRSTS_OFF_RET, [3] = PWRSTS_OFF_RET, }, .pwrsts_mem_on = { [0] = PWRSTS_ON, [1] = PWRSTS_ON, [2] = PWRSTS_OFF_ON, [3] = PWRSTS_ON, }, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain mpu_3xxx_pwrdm = { .name = "mpu_pwrdm", .prcm_offs = MPU_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .flags = PWRDM_HAS_MPU_QUIRK, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_ON, }, .voltdm = { .name = "mpu_iva" }, }; /* * The USBTLL Save-and-Restore mechanism is broken on * 3430s up to ES3.0 and 3630ES1.0. Hence this feature * needs to be disabled on these chips. * Refer: 3430 errata ID i459 and 3630 errata ID i579 * * Note: setting the SAR flag could help for errata ID i478 * which applies to 3430 <= ES3.1, but since the SAR feature * is broken, do not use it. */ static struct powerdomain core_3xxx_pre_es3_1_pwrdm = { .name = "core_pwrdm", .prcm_offs = CORE_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 2, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* MEM1RETSTATE */ [1] = PWRSTS_OFF_RET, /* MEM2RETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */ [1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain core_3xxx_es3_1_pwrdm = { .name = "core_pwrdm", .prcm_offs = CORE_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, /* * Setting the SAR flag for errata ID i478 which applies * to 3430 <= ES3.1 */ .flags = PWRDM_HAS_HDWR_SAR, /* for USBTLL only */ .banks = 2, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* MEM1RETSTATE */ [1] = PWRSTS_OFF_RET, /* MEM2RETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */ [1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain dss_pwrdm = { .name = "dss_pwrdm", .prcm_offs = OMAP3430_DSS_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; /* * Although the 34XX TRM Rev K Table 4-371 notes that retention is a * possible SGX powerstate, the SGX device itself does not support * retention. */ static struct powerdomain sgx_pwrdm = { .name = "sgx_pwrdm", .prcm_offs = OMAP3430ES2_SGX_MOD, /* XXX This is accurate for 3430 SGX, but what about GFX? */ .pwrsts = PWRSTS_OFF_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain cam_pwrdm = { .name = "cam_pwrdm", .prcm_offs = OMAP3430_CAM_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain per_pwrdm = { .name = "per_pwrdm", .prcm_offs = OMAP3430_PER_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain emu_pwrdm = { .name = "emu_pwrdm", .prcm_offs = OMAP3430_EMU_MOD, .voltdm = { .name = "core" }, }; static struct powerdomain neon_pwrdm = { .name = "neon_pwrdm", .prcm_offs = OMAP3430_NEON_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain usbhost_pwrdm = { .name = "usbhost_pwrdm", .prcm_offs = OMAP3430ES2_USBHOST_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, /* * REVISIT: Enabling usb host save and restore mechanism seems to * leave the usb host domain permanently in ACTIVE mode after * changing the usb host power domain state from OFF to active once. * Disabling for now. */ /*.flags = PWRDM_HAS_HDWR_SAR,*/ /* for USBHOST ctrlr only */ .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain dpll1_pwrdm = { .name = "dpll1_pwrdm", .prcm_offs = MPU_MOD, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain dpll2_pwrdm = { .name = "dpll2_pwrdm", .prcm_offs = OMAP3430_IVA2_MOD, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain dpll3_pwrdm = { .name = "dpll3_pwrdm", .prcm_offs = PLL_MOD, .voltdm = { .name = "core" }, }; static struct powerdomain dpll4_pwrdm = { .name = "dpll4_pwrdm", .prcm_offs = PLL_MOD, .voltdm = { .name = "core" }, }; static struct powerdomain dpll5_pwrdm = { .name = "dpll5_pwrdm", .prcm_offs = PLL_MOD, .voltdm = { .name = "core" }, }; /* As powerdomains are added or removed above, this list must also be changed */ static struct powerdomain *powerdomains_omap3430_common[] __initdata = { &wkup_omap2_pwrdm, &iva2_pwrdm, &mpu_3xxx_pwrdm, &neon_pwrdm, &cam_pwrdm, &dss_pwrdm, &per_pwrdm, &emu_pwrdm, &dpll1_pwrdm, &dpll2_pwrdm, &dpll3_pwrdm, &dpll4_pwrdm, NULL }; static struct powerdomain *powerdomains_omap3430es1[] __initdata = { &gfx_omap2_pwrdm, &core_3xxx_pre_es3_1_pwrdm, NULL }; /* also includes 3630ES1.0 */ static struct powerdomain *powerdomains_omap3430es2_es3_0[] __initdata = { &core_3xxx_pre_es3_1_pwrdm, &sgx_pwrdm, &usbhost_pwrdm, &dpll5_pwrdm, NULL }; /* also includes 3630ES1.1+ */ static struct powerdomain *powerdomains_omap3430es3_1plus[] __initdata = { &core_3xxx_es3_1_pwrdm, &sgx_pwrdm, &usbhost_pwrdm, &dpll5_pwrdm, NULL }; void __init omap3xxx_powerdomains_init(void) { unsigned int rev; if (!cpu_is_omap34xx()) return; pwrdm_register_platform_funcs(&omap3_pwrdm_operations); pwrdm_register_pwrdms(powerdomains_omap3430_common); rev = omap_rev(); if (rev == OMAP3430_REV_ES1_0) pwrdm_register_pwrdms(powerdomains_omap3430es1); else if (rev == OMAP3430_REV_ES2_0 || rev == OMAP3430_REV_ES2_1 || rev == OMAP3430_REV_ES3_0 || rev == OMAP3630_REV_ES1_0) pwrdm_register_pwrdms(powerdomains_omap3430es2_es3_0); else if (rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2 || rev == OMAP3517_REV_ES1_0 || rev == OMAP3517_REV_ES1_1 || rev == OMAP3630_REV_ES1_1 || rev == OMAP3630_REV_ES1_2) pwrdm_register_pwrdms(powerdomains_omap3430es3_1plus); else WARN(1, "OMAP3 powerdomain init: unknown chip type\n"); pwrdm_complete_init(); }
gpl-2.0
Myself5/android_kernel_sony_msm8974
drivers/leds/leds-qci-backlight.c
4714
1940
/* Quanta I2C Backlight Driver * * Copyright (C) 2009 Quanta Computer Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * * The Driver with I/O communications via the I2C Interface for ST15 platform. * And it is only working on the nuvoTon WPCE775x Embedded Controller. * */ #include <linux/module.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/wpce775x.h> #define EC_CMD_SET_BACKLIGHT 0xB1 static void qci_backlight_store(struct led_classdev *led_cdev, enum led_brightness val); static struct platform_device *bl_pdev; static struct led_classdev lcd_backlight = { .name = "lcd-backlight", .brightness = 147, .brightness_set = qci_backlight_store, }; static void qci_backlight_store(struct led_classdev *led_cdev, enum led_brightness val) { u16 value = val; wpce_smbus_write_word_data(EC_CMD_SET_BACKLIGHT, value); msleep(10); dev_dbg(&bl_pdev->dev, "[backlight_store] : value = %d\n", value); } static int __init qci_backlight_init(void) { int err = 0; bl_pdev = platform_device_register_simple("backlight", 0, NULL, 0); err = led_classdev_register(&bl_pdev->dev, &lcd_backlight); return err; } static void __exit qci_backlight_exit(void) { led_classdev_unregister(&lcd_backlight); platform_device_unregister(bl_pdev); } module_init(qci_backlight_init); module_exit(qci_backlight_exit); MODULE_AUTHOR("Quanta Computer Inc."); MODULE_DESCRIPTION("Quanta Embedded Controller I2C Backlight Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
imnuts/sch-i510_kernel
sound/drivers/opl3/opl3_midi.c
4714
22751
/* * Copyright (c) by Uros Bizjak <uros@kss-loka.si> * * Midi synth routines for OPL2/OPL3/OPL4 FM * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #undef DEBUG_ALLOC #undef DEBUG_MIDI #include "opl3_voice.h" #include <sound/asoundef.h> extern char snd_opl3_regmap[MAX_OPL2_VOICES][4]; extern int use_internal_drums; static void snd_opl3_note_off_unsafe(void *p, int note, int vel, struct snd_midi_channel *chan); /* * The next table looks magical, but it certainly is not. Its values have * been calculated as table[i]=8*log(i/64)/log(2) with an obvious exception * for i=0. This log-table converts a linear volume-scaling (0..127) to a * logarithmic scaling as present in the FM-synthesizer chips. so : Volume * 64 = 0 db = relative volume 0 and: Volume 32 = -6 db = relative * volume -8 it was implemented as a table because it is only 128 bytes and * it saves a lot of log() calculations. (Rob Hooft <hooft@chem.ruu.nl>) */ static char opl3_volume_table[128] = { -63, -48, -40, -35, -32, -29, -27, -26, -24, -23, -21, -20, -19, -18, -18, -17, -16, -15, -15, -14, -13, -13, -12, -12, -11, -11, -10, -10, -10, -9, -9, -8, -8, -8, -7, -7, -7, -6, -6, -6, -5, -5, -5, -5, -4, -4, -4, -4, -3, -3, -3, -3, -2, -2, -2, -2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8 }; void snd_opl3_calc_volume(unsigned char *volbyte, int vel, struct snd_midi_channel *chan) { int oldvol, newvol, n; int volume; volume = (vel * chan->gm_volume * chan->gm_expression) / (127*127); if (volume > 127) volume = 127; oldvol = OPL3_TOTAL_LEVEL_MASK - (*volbyte & OPL3_TOTAL_LEVEL_MASK); newvol = opl3_volume_table[volume] + oldvol; if (newvol > OPL3_TOTAL_LEVEL_MASK) newvol = OPL3_TOTAL_LEVEL_MASK; else if (newvol < 0) newvol = 0; n = OPL3_TOTAL_LEVEL_MASK - (newvol & OPL3_TOTAL_LEVEL_MASK); *volbyte = (*volbyte & OPL3_KSL_MASK) | (n & OPL3_TOTAL_LEVEL_MASK); } /* * Converts the note frequency to block and fnum values for the FM chip */ static short opl3_note_table[16] = { 305, 323, /* for pitch bending, -2 semitones */ 343, 363, 385, 408, 432, 458, 485, 514, 544, 577, 611, 647, 686, 726 /* for pitch bending, +2 semitones */ }; static void snd_opl3_calc_pitch(unsigned char *fnum, unsigned char *blocknum, int note, struct snd_midi_channel *chan) { int block = ((note / 12) & 0x07) - 1; int idx = (note % 12) + 2; int freq; if (chan->midi_pitchbend) { int pitchbend = chan->midi_pitchbend; int segment; if (pitchbend > 0x1FFF) pitchbend = 0x1FFF; segment = pitchbend / 0x1000; freq = opl3_note_table[idx+segment]; freq += ((opl3_note_table[idx+segment+1] - freq) * (pitchbend % 0x1000)) / 0x1000; } else { freq = opl3_note_table[idx]; } *fnum = (unsigned char) freq; *blocknum = ((freq >> 8) & OPL3_FNUM_HIGH_MASK) | ((block << 2) & OPL3_BLOCKNUM_MASK); } #ifdef DEBUG_ALLOC static void debug_alloc(struct snd_opl3 *opl3, char *s, int voice) { int i; char *str = "x.24"; printk(KERN_DEBUG "time %.5i: %s [%.2i]: ", opl3->use_time, s, voice); for (i = 0; i < opl3->max_voices; i++) printk("%c", *(str + opl3->voices[i].state + 1)); printk("\n"); } #endif /* * Get a FM voice (channel) to play a note on. */ static int opl3_get_voice(struct snd_opl3 *opl3, int instr_4op, struct snd_midi_channel *chan) { int chan_4op_1; /* first voice for 4op instrument */ int chan_4op_2; /* second voice for 4op instrument */ struct snd_opl3_voice *vp, *vp2; unsigned int voice_time; int i; #ifdef DEBUG_ALLOC char *alloc_type[3] = { "FREE ", "CHEAP ", "EXPENSIVE" }; #endif /* This is our "allocation cost" table */ enum { FREE = 0, CHEAP, EXPENSIVE, END }; /* Keeps track of what we are finding */ struct best { unsigned int time; int voice; } best[END]; struct best *bp; for (i = 0; i < END; i++) { best[i].time = (unsigned int)(-1); /* XXX MAX_?INT really */; best[i].voice = -1; } /* Look through all the channels for the most suitable. */ for (i = 0; i < opl3->max_voices; i++) { vp = &opl3->voices[i]; if (vp->state == SNDRV_OPL3_ST_NOT_AVAIL) /* skip unavailable channels, allocated by drum voices or by bounded 4op voices) */ continue; voice_time = vp->time; bp = best; chan_4op_1 = ((i < 3) || (i > 8 && i < 12)); chan_4op_2 = ((i > 2 && i < 6) || (i > 11 && i < 15)); if (instr_4op) { /* allocate 4op voice */ /* skip channels unavailable to 4op instrument */ if (!chan_4op_1) continue; if (vp->state) /* kill one voice, CHEAP */ bp++; /* get state of bounded 2op channel to be allocated for 4op instrument */ vp2 = &opl3->voices[i + 3]; if (vp2->state == SNDRV_OPL3_ST_ON_2OP) { /* kill two voices, EXPENSIVE */ bp++; voice_time = (voice_time > vp->time) ? voice_time : vp->time; } } else { /* allocate 2op voice */ if ((chan_4op_1) || (chan_4op_2)) /* use bounded channels for 2op, CHEAP */ bp++; else if (vp->state) /* kill one voice on 2op channel, CHEAP */ bp++; /* raise kill cost to EXPENSIVE for all channels */ if (vp->state) bp++; } if (voice_time < bp->time) { bp->time = voice_time; bp->voice = i; } } for (i = 0; i < END; i++) { if (best[i].voice >= 0) { #ifdef DEBUG_ALLOC printk(KERN_DEBUG "%s %iop allocation on voice %i\n", alloc_type[i], instr_4op ? 4 : 2, best[i].voice); #endif return best[i].voice; } } /* not found */ return -1; } /* ------------------------------ */ /* * System timer interrupt function */ void snd_opl3_timer_func(unsigned long data) { struct snd_opl3 *opl3 = (struct snd_opl3 *)data; unsigned long flags; int again = 0; int i; spin_lock_irqsave(&opl3->voice_lock, flags); for (i = 0; i < opl3->max_voices; i++) { struct snd_opl3_voice *vp = &opl3->voices[i]; if (vp->state > 0 && vp->note_off_check) { if (vp->note_off == jiffies) snd_opl3_note_off_unsafe(opl3, vp->note, 0, vp->chan); else again++; } } spin_unlock_irqrestore(&opl3->voice_lock, flags); spin_lock_irqsave(&opl3->sys_timer_lock, flags); if (again) { opl3->tlist.expires = jiffies + 1; /* invoke again */ add_timer(&opl3->tlist); } else { opl3->sys_timer_status = 0; } spin_unlock_irqrestore(&opl3->sys_timer_lock, flags); } /* * Start system timer */ static void snd_opl3_start_timer(struct snd_opl3 *opl3) { unsigned long flags; spin_lock_irqsave(&opl3->sys_timer_lock, flags); if (! opl3->sys_timer_status) { opl3->tlist.expires = jiffies + 1; add_timer(&opl3->tlist); opl3->sys_timer_status = 1; } spin_unlock_irqrestore(&opl3->sys_timer_lock, flags); } /* ------------------------------ */ static int snd_opl3_oss_map[MAX_OPL3_VOICES] = { 0, 1, 2, 9, 10, 11, 6, 7, 8, 15, 16, 17, 3, 4 ,5, 12, 13, 14 }; /* * Start a note. */ void snd_opl3_note_on(void *p, int note, int vel, struct snd_midi_channel *chan) { struct snd_opl3 *opl3; int instr_4op; int voice; struct snd_opl3_voice *vp, *vp2; unsigned short connect_mask; unsigned char connection; unsigned char vol_op[4]; int extra_prg = 0; unsigned short reg_side; unsigned char op_offset; unsigned char voice_offset; unsigned short opl3_reg; unsigned char reg_val; unsigned char prg, bank; int key = note; unsigned char fnum, blocknum; int i; struct fm_patch *patch; struct fm_instrument *fm; unsigned long flags; opl3 = p; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG "Note on, ch %i, inst %i, note %i, vel %i\n", chan->number, chan->midi_program, note, vel); #endif /* in SYNTH mode, application takes care of voices */ /* in SEQ mode, drum voice numbers are notes on drum channel */ if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) { if (chan->drum_channel) { /* percussion instruments are located in bank 128 */ bank = 128; prg = note; } else { bank = chan->gm_bank_select; prg = chan->midi_program; } } else { /* Prepare for OSS mode */ if (chan->number >= MAX_OPL3_VOICES) return; /* OSS instruments are located in bank 127 */ bank = 127; prg = chan->midi_program; } spin_lock_irqsave(&opl3->voice_lock, flags); if (use_internal_drums) { snd_opl3_drum_switch(opl3, note, vel, 1, chan); spin_unlock_irqrestore(&opl3->voice_lock, flags); return; } __extra_prg: patch = snd_opl3_find_patch(opl3, prg, bank, 0); if (!patch) { spin_unlock_irqrestore(&opl3->voice_lock, flags); return; } fm = &patch->inst; switch (patch->type) { case FM_PATCH_OPL2: instr_4op = 0; break; case FM_PATCH_OPL3: if (opl3->hardware >= OPL3_HW_OPL3) { instr_4op = 1; break; } default: spin_unlock_irqrestore(&opl3->voice_lock, flags); return; } #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG " --> OPL%i instrument: %s\n", instr_4op ? 3 : 2, patch->name); #endif /* in SYNTH mode, application takes care of voices */ /* in SEQ mode, allocate voice on free OPL3 channel */ if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) { voice = opl3_get_voice(opl3, instr_4op, chan); } else { /* remap OSS voice */ voice = snd_opl3_oss_map[chan->number]; } if (voice < MAX_OPL2_VOICES) { /* Left register block for voices 0 .. 8 */ reg_side = OPL3_LEFT; voice_offset = voice; connect_mask = (OPL3_LEFT_4OP_0 << voice_offset) & 0x07; } else { /* Right register block for voices 9 .. 17 */ reg_side = OPL3_RIGHT; voice_offset = voice - MAX_OPL2_VOICES; connect_mask = (OPL3_RIGHT_4OP_0 << voice_offset) & 0x38; } /* kill voice on channel */ vp = &opl3->voices[voice]; if (vp->state > 0) { opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset); reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT; opl3->command(opl3, opl3_reg, reg_val); } if (instr_4op) { vp2 = &opl3->voices[voice + 3]; if (vp->state > 0) { opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset + 3); reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT; opl3->command(opl3, opl3_reg, reg_val); } } /* set connection register */ if (instr_4op) { if ((opl3->connection_reg ^ connect_mask) & connect_mask) { opl3->connection_reg |= connect_mask; /* set connection bit */ opl3_reg = OPL3_RIGHT | OPL3_REG_CONNECTION_SELECT; opl3->command(opl3, opl3_reg, opl3->connection_reg); } } else { if ((opl3->connection_reg ^ ~connect_mask) & connect_mask) { opl3->connection_reg &= ~connect_mask; /* clear connection bit */ opl3_reg = OPL3_RIGHT | OPL3_REG_CONNECTION_SELECT; opl3->command(opl3, opl3_reg, opl3->connection_reg); } } #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG " --> setting OPL3 connection: 0x%x\n", opl3->connection_reg); #endif /* * calculate volume depending on connection * between FM operators (see include/opl3.h) */ for (i = 0; i < (instr_4op ? 4 : 2); i++) vol_op[i] = fm->op[i].ksl_level; connection = fm->feedback_connection[0] & 0x01; if (instr_4op) { connection <<= 1; connection |= fm->feedback_connection[1] & 0x01; snd_opl3_calc_volume(&vol_op[3], vel, chan); switch (connection) { case 0x03: snd_opl3_calc_volume(&vol_op[2], vel, chan); /* fallthru */ case 0x02: snd_opl3_calc_volume(&vol_op[0], vel, chan); break; case 0x01: snd_opl3_calc_volume(&vol_op[1], vel, chan); } } else { snd_opl3_calc_volume(&vol_op[1], vel, chan); if (connection) snd_opl3_calc_volume(&vol_op[0], vel, chan); } /* Program the FM voice characteristics */ for (i = 0; i < (instr_4op ? 4 : 2); i++) { #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG " --> programming operator %i\n", i); #endif op_offset = snd_opl3_regmap[voice_offset][i]; /* Set OPL3 AM_VIB register of requested voice/operator */ reg_val = fm->op[i].am_vib; opl3_reg = reg_side | (OPL3_REG_AM_VIB + op_offset); opl3->command(opl3, opl3_reg, reg_val); /* Set OPL3 KSL_LEVEL register of requested voice/operator */ reg_val = vol_op[i]; opl3_reg = reg_side | (OPL3_REG_KSL_LEVEL + op_offset); opl3->command(opl3, opl3_reg, reg_val); /* Set OPL3 ATTACK_DECAY register of requested voice/operator */ reg_val = fm->op[i].attack_decay; opl3_reg = reg_side | (OPL3_REG_ATTACK_DECAY + op_offset); opl3->command(opl3, opl3_reg, reg_val); /* Set OPL3 SUSTAIN_RELEASE register of requested voice/operator */ reg_val = fm->op[i].sustain_release; opl3_reg = reg_side | (OPL3_REG_SUSTAIN_RELEASE + op_offset); opl3->command(opl3, opl3_reg, reg_val); /* Select waveform */ reg_val = fm->op[i].wave_select; opl3_reg = reg_side | (OPL3_REG_WAVE_SELECT + op_offset); opl3->command(opl3, opl3_reg, reg_val); } /* Set operator feedback and 2op inter-operator connection */ reg_val = fm->feedback_connection[0]; /* Set output voice connection */ reg_val |= OPL3_STEREO_BITS; if (chan->gm_pan < 43) reg_val &= ~OPL3_VOICE_TO_RIGHT; if (chan->gm_pan > 85) reg_val &= ~OPL3_VOICE_TO_LEFT; opl3_reg = reg_side | (OPL3_REG_FEEDBACK_CONNECTION + voice_offset); opl3->command(opl3, opl3_reg, reg_val); if (instr_4op) { /* Set 4op inter-operator connection */ reg_val = fm->feedback_connection[1] & OPL3_CONNECTION_BIT; /* Set output voice connection */ reg_val |= OPL3_STEREO_BITS; if (chan->gm_pan < 43) reg_val &= ~OPL3_VOICE_TO_RIGHT; if (chan->gm_pan > 85) reg_val &= ~OPL3_VOICE_TO_LEFT; opl3_reg = reg_side | (OPL3_REG_FEEDBACK_CONNECTION + voice_offset + 3); opl3->command(opl3, opl3_reg, reg_val); } /* * Special treatment of percussion notes for fm: * Requested pitch is really program, and pitch for * device is whatever was specified in the patch library. */ if (fm->fix_key) note = fm->fix_key; /* * use transpose if defined in patch library */ if (fm->trnsps) note += (fm->trnsps - 64); snd_opl3_calc_pitch(&fnum, &blocknum, note, chan); /* Set OPL3 FNUM_LOW register of requested voice */ opl3_reg = reg_side | (OPL3_REG_FNUM_LOW + voice_offset); opl3->command(opl3, opl3_reg, fnum); opl3->voices[voice].keyon_reg = blocknum; /* Set output sound flag */ blocknum |= OPL3_KEYON_BIT; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG " --> trigger voice %i\n", voice); #endif /* Set OPL3 KEYON_BLOCK register of requested voice */ opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset); opl3->command(opl3, opl3_reg, blocknum); /* kill note after fixed duration (in centiseconds) */ if (fm->fix_dur) { opl3->voices[voice].note_off = jiffies + (fm->fix_dur * HZ) / 100; snd_opl3_start_timer(opl3); opl3->voices[voice].note_off_check = 1; } else opl3->voices[voice].note_off_check = 0; /* get extra pgm, but avoid possible loops */ extra_prg = (extra_prg) ? 0 : fm->modes; /* do the bookkeeping */ vp->time = opl3->use_time++; vp->note = key; vp->chan = chan; if (instr_4op) { vp->state = SNDRV_OPL3_ST_ON_4OP; vp2 = &opl3->voices[voice + 3]; vp2->time = opl3->use_time++; vp2->note = key; vp2->chan = chan; vp2->state = SNDRV_OPL3_ST_NOT_AVAIL; } else { if (vp->state == SNDRV_OPL3_ST_ON_4OP) { /* 4op killed by 2op, release bounded voice */ vp2 = &opl3->voices[voice + 3]; vp2->time = opl3->use_time++; vp2->state = SNDRV_OPL3_ST_OFF; } vp->state = SNDRV_OPL3_ST_ON_2OP; } #ifdef DEBUG_ALLOC debug_alloc(opl3, "note on ", voice); #endif /* allocate extra program if specified in patch library */ if (extra_prg) { if (extra_prg > 128) { bank = 128; /* percussions start at 35 */ prg = extra_prg - 128 + 35 - 1; } else { bank = 0; prg = extra_prg - 1; } #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG " *** allocating extra program\n"); #endif goto __extra_prg; } spin_unlock_irqrestore(&opl3->voice_lock, flags); } static void snd_opl3_kill_voice(struct snd_opl3 *opl3, int voice) { unsigned short reg_side; unsigned char voice_offset; unsigned short opl3_reg; struct snd_opl3_voice *vp, *vp2; if (snd_BUG_ON(voice >= MAX_OPL3_VOICES)) return; vp = &opl3->voices[voice]; if (voice < MAX_OPL2_VOICES) { /* Left register block for voices 0 .. 8 */ reg_side = OPL3_LEFT; voice_offset = voice; } else { /* Right register block for voices 9 .. 17 */ reg_side = OPL3_RIGHT; voice_offset = voice - MAX_OPL2_VOICES; } /* kill voice */ #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG " --> kill voice %i\n", voice); #endif opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset); /* clear Key ON bit */ opl3->command(opl3, opl3_reg, vp->keyon_reg); /* do the bookkeeping */ vp->time = opl3->use_time++; if (vp->state == SNDRV_OPL3_ST_ON_4OP) { vp2 = &opl3->voices[voice + 3]; vp2->time = opl3->use_time++; vp2->state = SNDRV_OPL3_ST_OFF; } vp->state = SNDRV_OPL3_ST_OFF; #ifdef DEBUG_ALLOC debug_alloc(opl3, "note off", voice); #endif } /* * Release a note in response to a midi note off. */ static void snd_opl3_note_off_unsafe(void *p, int note, int vel, struct snd_midi_channel *chan) { struct snd_opl3 *opl3; int voice; struct snd_opl3_voice *vp; opl3 = p; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG "Note off, ch %i, inst %i, note %i\n", chan->number, chan->midi_program, note); #endif if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) { if (chan->drum_channel && use_internal_drums) { snd_opl3_drum_switch(opl3, note, vel, 0, chan); return; } /* this loop will hopefully kill all extra voices, because they are grouped by the same channel and note values */ for (voice = 0; voice < opl3->max_voices; voice++) { vp = &opl3->voices[voice]; if (vp->state > 0 && vp->chan == chan && vp->note == note) { snd_opl3_kill_voice(opl3, voice); } } } else { /* remap OSS voices */ if (chan->number < MAX_OPL3_VOICES) { voice = snd_opl3_oss_map[chan->number]; snd_opl3_kill_voice(opl3, voice); } } } void snd_opl3_note_off(void *p, int note, int vel, struct snd_midi_channel *chan) { struct snd_opl3 *opl3 = p; unsigned long flags; spin_lock_irqsave(&opl3->voice_lock, flags); snd_opl3_note_off_unsafe(p, note, vel, chan); spin_unlock_irqrestore(&opl3->voice_lock, flags); } /* * key pressure change */ void snd_opl3_key_press(void *p, int note, int vel, struct snd_midi_channel *chan) { struct snd_opl3 *opl3; opl3 = p; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG "Key pressure, ch#: %i, inst#: %i\n", chan->number, chan->midi_program); #endif } /* * terminate note */ void snd_opl3_terminate_note(void *p, int note, struct snd_midi_channel *chan) { struct snd_opl3 *opl3; opl3 = p; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG "Terminate note, ch#: %i, inst#: %i\n", chan->number, chan->midi_program); #endif } static void snd_opl3_update_pitch(struct snd_opl3 *opl3, int voice) { unsigned short reg_side; unsigned char voice_offset; unsigned short opl3_reg; unsigned char fnum, blocknum; struct snd_opl3_voice *vp; if (snd_BUG_ON(voice >= MAX_OPL3_VOICES)) return; vp = &opl3->voices[voice]; if (vp->chan == NULL) return; /* not allocated? */ if (voice < MAX_OPL2_VOICES) { /* Left register block for voices 0 .. 8 */ reg_side = OPL3_LEFT; voice_offset = voice; } else { /* Right register block for voices 9 .. 17 */ reg_side = OPL3_RIGHT; voice_offset = voice - MAX_OPL2_VOICES; } snd_opl3_calc_pitch(&fnum, &blocknum, vp->note, vp->chan); /* Set OPL3 FNUM_LOW register of requested voice */ opl3_reg = reg_side | (OPL3_REG_FNUM_LOW + voice_offset); opl3->command(opl3, opl3_reg, fnum); vp->keyon_reg = blocknum; /* Set output sound flag */ blocknum |= OPL3_KEYON_BIT; /* Set OPL3 KEYON_BLOCK register of requested voice */ opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset); opl3->command(opl3, opl3_reg, blocknum); vp->time = opl3->use_time++; } /* * Update voice pitch controller */ static void snd_opl3_pitch_ctrl(struct snd_opl3 *opl3, struct snd_midi_channel *chan) { int voice; struct snd_opl3_voice *vp; unsigned long flags; spin_lock_irqsave(&opl3->voice_lock, flags); if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) { for (voice = 0; voice < opl3->max_voices; voice++) { vp = &opl3->voices[voice]; if (vp->state > 0 && vp->chan == chan) { snd_opl3_update_pitch(opl3, voice); } } } else { /* remap OSS voices */ if (chan->number < MAX_OPL3_VOICES) { voice = snd_opl3_oss_map[chan->number]; snd_opl3_update_pitch(opl3, voice); } } spin_unlock_irqrestore(&opl3->voice_lock, flags); } /* * Deal with a controller type event. This includes all types of * control events, not just the midi controllers */ void snd_opl3_control(void *p, int type, struct snd_midi_channel *chan) { struct snd_opl3 *opl3; opl3 = p; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG "Controller, TYPE = %i, ch#: %i, inst#: %i\n", type, chan->number, chan->midi_program); #endif switch (type) { case MIDI_CTL_MSB_MODWHEEL: if (chan->control[MIDI_CTL_MSB_MODWHEEL] > 63) opl3->drum_reg |= OPL3_VIBRATO_DEPTH; else opl3->drum_reg &= ~OPL3_VIBRATO_DEPTH; opl3->command(opl3, OPL3_LEFT | OPL3_REG_PERCUSSION, opl3->drum_reg); break; case MIDI_CTL_E2_TREMOLO_DEPTH: if (chan->control[MIDI_CTL_E2_TREMOLO_DEPTH] > 63) opl3->drum_reg |= OPL3_TREMOLO_DEPTH; else opl3->drum_reg &= ~OPL3_TREMOLO_DEPTH; opl3->command(opl3, OPL3_LEFT | OPL3_REG_PERCUSSION, opl3->drum_reg); break; case MIDI_CTL_PITCHBEND: snd_opl3_pitch_ctrl(opl3, chan); break; } } /* * NRPN events */ void snd_opl3_nrpn(void *p, struct snd_midi_channel *chan, struct snd_midi_channel_set *chset) { struct snd_opl3 *opl3; opl3 = p; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG "NRPN, ch#: %i, inst#: %i\n", chan->number, chan->midi_program); #endif } /* * receive sysex */ void snd_opl3_sysex(void *p, unsigned char *buf, int len, int parsed, struct snd_midi_channel_set *chset) { struct snd_opl3 *opl3; opl3 = p; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG "SYSEX\n"); #endif }
gpl-2.0
faux123/Galaxy_Note_2
sound/drivers/opl3/opl3_midi.c
4714
22751
/* * Copyright (c) by Uros Bizjak <uros@kss-loka.si> * * Midi synth routines for OPL2/OPL3/OPL4 FM * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #undef DEBUG_ALLOC #undef DEBUG_MIDI #include "opl3_voice.h" #include <sound/asoundef.h> extern char snd_opl3_regmap[MAX_OPL2_VOICES][4]; extern int use_internal_drums; static void snd_opl3_note_off_unsafe(void *p, int note, int vel, struct snd_midi_channel *chan); /* * The next table looks magical, but it certainly is not. Its values have * been calculated as table[i]=8*log(i/64)/log(2) with an obvious exception * for i=0. This log-table converts a linear volume-scaling (0..127) to a * logarithmic scaling as present in the FM-synthesizer chips. so : Volume * 64 = 0 db = relative volume 0 and: Volume 32 = -6 db = relative * volume -8 it was implemented as a table because it is only 128 bytes and * it saves a lot of log() calculations. (Rob Hooft <hooft@chem.ruu.nl>) */ static char opl3_volume_table[128] = { -63, -48, -40, -35, -32, -29, -27, -26, -24, -23, -21, -20, -19, -18, -18, -17, -16, -15, -15, -14, -13, -13, -12, -12, -11, -11, -10, -10, -10, -9, -9, -8, -8, -8, -7, -7, -7, -6, -6, -6, -5, -5, -5, -5, -4, -4, -4, -4, -3, -3, -3, -3, -2, -2, -2, -2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8 }; void snd_opl3_calc_volume(unsigned char *volbyte, int vel, struct snd_midi_channel *chan) { int oldvol, newvol, n; int volume; volume = (vel * chan->gm_volume * chan->gm_expression) / (127*127); if (volume > 127) volume = 127; oldvol = OPL3_TOTAL_LEVEL_MASK - (*volbyte & OPL3_TOTAL_LEVEL_MASK); newvol = opl3_volume_table[volume] + oldvol; if (newvol > OPL3_TOTAL_LEVEL_MASK) newvol = OPL3_TOTAL_LEVEL_MASK; else if (newvol < 0) newvol = 0; n = OPL3_TOTAL_LEVEL_MASK - (newvol & OPL3_TOTAL_LEVEL_MASK); *volbyte = (*volbyte & OPL3_KSL_MASK) | (n & OPL3_TOTAL_LEVEL_MASK); } /* * Converts the note frequency to block and fnum values for the FM chip */ static short opl3_note_table[16] = { 305, 323, /* for pitch bending, -2 semitones */ 343, 363, 385, 408, 432, 458, 485, 514, 544, 577, 611, 647, 686, 726 /* for pitch bending, +2 semitones */ }; static void snd_opl3_calc_pitch(unsigned char *fnum, unsigned char *blocknum, int note, struct snd_midi_channel *chan) { int block = ((note / 12) & 0x07) - 1; int idx = (note % 12) + 2; int freq; if (chan->midi_pitchbend) { int pitchbend = chan->midi_pitchbend; int segment; if (pitchbend > 0x1FFF) pitchbend = 0x1FFF; segment = pitchbend / 0x1000; freq = opl3_note_table[idx+segment]; freq += ((opl3_note_table[idx+segment+1] - freq) * (pitchbend % 0x1000)) / 0x1000; } else { freq = opl3_note_table[idx]; } *fnum = (unsigned char) freq; *blocknum = ((freq >> 8) & OPL3_FNUM_HIGH_MASK) | ((block << 2) & OPL3_BLOCKNUM_MASK); } #ifdef DEBUG_ALLOC static void debug_alloc(struct snd_opl3 *opl3, char *s, int voice) { int i; char *str = "x.24"; printk(KERN_DEBUG "time %.5i: %s [%.2i]: ", opl3->use_time, s, voice); for (i = 0; i < opl3->max_voices; i++) printk("%c", *(str + opl3->voices[i].state + 1)); printk("\n"); } #endif /* * Get a FM voice (channel) to play a note on. */ static int opl3_get_voice(struct snd_opl3 *opl3, int instr_4op, struct snd_midi_channel *chan) { int chan_4op_1; /* first voice for 4op instrument */ int chan_4op_2; /* second voice for 4op instrument */ struct snd_opl3_voice *vp, *vp2; unsigned int voice_time; int i; #ifdef DEBUG_ALLOC char *alloc_type[3] = { "FREE ", "CHEAP ", "EXPENSIVE" }; #endif /* This is our "allocation cost" table */ enum { FREE = 0, CHEAP, EXPENSIVE, END }; /* Keeps track of what we are finding */ struct best { unsigned int time; int voice; } best[END]; struct best *bp; for (i = 0; i < END; i++) { best[i].time = (unsigned int)(-1); /* XXX MAX_?INT really */; best[i].voice = -1; } /* Look through all the channels for the most suitable. */ for (i = 0; i < opl3->max_voices; i++) { vp = &opl3->voices[i]; if (vp->state == SNDRV_OPL3_ST_NOT_AVAIL) /* skip unavailable channels, allocated by drum voices or by bounded 4op voices) */ continue; voice_time = vp->time; bp = best; chan_4op_1 = ((i < 3) || (i > 8 && i < 12)); chan_4op_2 = ((i > 2 && i < 6) || (i > 11 && i < 15)); if (instr_4op) { /* allocate 4op voice */ /* skip channels unavailable to 4op instrument */ if (!chan_4op_1) continue; if (vp->state) /* kill one voice, CHEAP */ bp++; /* get state of bounded 2op channel to be allocated for 4op instrument */ vp2 = &opl3->voices[i + 3]; if (vp2->state == SNDRV_OPL3_ST_ON_2OP) { /* kill two voices, EXPENSIVE */ bp++; voice_time = (voice_time > vp->time) ? voice_time : vp->time; } } else { /* allocate 2op voice */ if ((chan_4op_1) || (chan_4op_2)) /* use bounded channels for 2op, CHEAP */ bp++; else if (vp->state) /* kill one voice on 2op channel, CHEAP */ bp++; /* raise kill cost to EXPENSIVE for all channels */ if (vp->state) bp++; } if (voice_time < bp->time) { bp->time = voice_time; bp->voice = i; } } for (i = 0; i < END; i++) { if (best[i].voice >= 0) { #ifdef DEBUG_ALLOC printk(KERN_DEBUG "%s %iop allocation on voice %i\n", alloc_type[i], instr_4op ? 4 : 2, best[i].voice); #endif return best[i].voice; } } /* not found */ return -1; } /* ------------------------------ */ /* * System timer interrupt function */ void snd_opl3_timer_func(unsigned long data) { struct snd_opl3 *opl3 = (struct snd_opl3 *)data; unsigned long flags; int again = 0; int i; spin_lock_irqsave(&opl3->voice_lock, flags); for (i = 0; i < opl3->max_voices; i++) { struct snd_opl3_voice *vp = &opl3->voices[i]; if (vp->state > 0 && vp->note_off_check) { if (vp->note_off == jiffies) snd_opl3_note_off_unsafe(opl3, vp->note, 0, vp->chan); else again++; } } spin_unlock_irqrestore(&opl3->voice_lock, flags); spin_lock_irqsave(&opl3->sys_timer_lock, flags); if (again) { opl3->tlist.expires = jiffies + 1; /* invoke again */ add_timer(&opl3->tlist); } else { opl3->sys_timer_status = 0; } spin_unlock_irqrestore(&opl3->sys_timer_lock, flags); } /* * Start system timer */ static void snd_opl3_start_timer(struct snd_opl3 *opl3) { unsigned long flags; spin_lock_irqsave(&opl3->sys_timer_lock, flags); if (! opl3->sys_timer_status) { opl3->tlist.expires = jiffies + 1; add_timer(&opl3->tlist); opl3->sys_timer_status = 1; } spin_unlock_irqrestore(&opl3->sys_timer_lock, flags); } /* ------------------------------ */ static int snd_opl3_oss_map[MAX_OPL3_VOICES] = { 0, 1, 2, 9, 10, 11, 6, 7, 8, 15, 16, 17, 3, 4 ,5, 12, 13, 14 }; /* * Start a note. */ void snd_opl3_note_on(void *p, int note, int vel, struct snd_midi_channel *chan) { struct snd_opl3 *opl3; int instr_4op; int voice; struct snd_opl3_voice *vp, *vp2; unsigned short connect_mask; unsigned char connection; unsigned char vol_op[4]; int extra_prg = 0; unsigned short reg_side; unsigned char op_offset; unsigned char voice_offset; unsigned short opl3_reg; unsigned char reg_val; unsigned char prg, bank; int key = note; unsigned char fnum, blocknum; int i; struct fm_patch *patch; struct fm_instrument *fm; unsigned long flags; opl3 = p; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG "Note on, ch %i, inst %i, note %i, vel %i\n", chan->number, chan->midi_program, note, vel); #endif /* in SYNTH mode, application takes care of voices */ /* in SEQ mode, drum voice numbers are notes on drum channel */ if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) { if (chan->drum_channel) { /* percussion instruments are located in bank 128 */ bank = 128; prg = note; } else { bank = chan->gm_bank_select; prg = chan->midi_program; } } else { /* Prepare for OSS mode */ if (chan->number >= MAX_OPL3_VOICES) return; /* OSS instruments are located in bank 127 */ bank = 127; prg = chan->midi_program; } spin_lock_irqsave(&opl3->voice_lock, flags); if (use_internal_drums) { snd_opl3_drum_switch(opl3, note, vel, 1, chan); spin_unlock_irqrestore(&opl3->voice_lock, flags); return; } __extra_prg: patch = snd_opl3_find_patch(opl3, prg, bank, 0); if (!patch) { spin_unlock_irqrestore(&opl3->voice_lock, flags); return; } fm = &patch->inst; switch (patch->type) { case FM_PATCH_OPL2: instr_4op = 0; break; case FM_PATCH_OPL3: if (opl3->hardware >= OPL3_HW_OPL3) { instr_4op = 1; break; } default: spin_unlock_irqrestore(&opl3->voice_lock, flags); return; } #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG " --> OPL%i instrument: %s\n", instr_4op ? 3 : 2, patch->name); #endif /* in SYNTH mode, application takes care of voices */ /* in SEQ mode, allocate voice on free OPL3 channel */ if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) { voice = opl3_get_voice(opl3, instr_4op, chan); } else { /* remap OSS voice */ voice = snd_opl3_oss_map[chan->number]; } if (voice < MAX_OPL2_VOICES) { /* Left register block for voices 0 .. 8 */ reg_side = OPL3_LEFT; voice_offset = voice; connect_mask = (OPL3_LEFT_4OP_0 << voice_offset) & 0x07; } else { /* Right register block for voices 9 .. 17 */ reg_side = OPL3_RIGHT; voice_offset = voice - MAX_OPL2_VOICES; connect_mask = (OPL3_RIGHT_4OP_0 << voice_offset) & 0x38; } /* kill voice on channel */ vp = &opl3->voices[voice]; if (vp->state > 0) { opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset); reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT; opl3->command(opl3, opl3_reg, reg_val); } if (instr_4op) { vp2 = &opl3->voices[voice + 3]; if (vp->state > 0) { opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset + 3); reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT; opl3->command(opl3, opl3_reg, reg_val); } } /* set connection register */ if (instr_4op) { if ((opl3->connection_reg ^ connect_mask) & connect_mask) { opl3->connection_reg |= connect_mask; /* set connection bit */ opl3_reg = OPL3_RIGHT | OPL3_REG_CONNECTION_SELECT; opl3->command(opl3, opl3_reg, opl3->connection_reg); } } else { if ((opl3->connection_reg ^ ~connect_mask) & connect_mask) { opl3->connection_reg &= ~connect_mask; /* clear connection bit */ opl3_reg = OPL3_RIGHT | OPL3_REG_CONNECTION_SELECT; opl3->command(opl3, opl3_reg, opl3->connection_reg); } } #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG " --> setting OPL3 connection: 0x%x\n", opl3->connection_reg); #endif /* * calculate volume depending on connection * between FM operators (see include/opl3.h) */ for (i = 0; i < (instr_4op ? 4 : 2); i++) vol_op[i] = fm->op[i].ksl_level; connection = fm->feedback_connection[0] & 0x01; if (instr_4op) { connection <<= 1; connection |= fm->feedback_connection[1] & 0x01; snd_opl3_calc_volume(&vol_op[3], vel, chan); switch (connection) { case 0x03: snd_opl3_calc_volume(&vol_op[2], vel, chan); /* fallthru */ case 0x02: snd_opl3_calc_volume(&vol_op[0], vel, chan); break; case 0x01: snd_opl3_calc_volume(&vol_op[1], vel, chan); } } else { snd_opl3_calc_volume(&vol_op[1], vel, chan); if (connection) snd_opl3_calc_volume(&vol_op[0], vel, chan); } /* Program the FM voice characteristics */ for (i = 0; i < (instr_4op ? 4 : 2); i++) { #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG " --> programming operator %i\n", i); #endif op_offset = snd_opl3_regmap[voice_offset][i]; /* Set OPL3 AM_VIB register of requested voice/operator */ reg_val = fm->op[i].am_vib; opl3_reg = reg_side | (OPL3_REG_AM_VIB + op_offset); opl3->command(opl3, opl3_reg, reg_val); /* Set OPL3 KSL_LEVEL register of requested voice/operator */ reg_val = vol_op[i]; opl3_reg = reg_side | (OPL3_REG_KSL_LEVEL + op_offset); opl3->command(opl3, opl3_reg, reg_val); /* Set OPL3 ATTACK_DECAY register of requested voice/operator */ reg_val = fm->op[i].attack_decay; opl3_reg = reg_side | (OPL3_REG_ATTACK_DECAY + op_offset); opl3->command(opl3, opl3_reg, reg_val); /* Set OPL3 SUSTAIN_RELEASE register of requested voice/operator */ reg_val = fm->op[i].sustain_release; opl3_reg = reg_side | (OPL3_REG_SUSTAIN_RELEASE + op_offset); opl3->command(opl3, opl3_reg, reg_val); /* Select waveform */ reg_val = fm->op[i].wave_select; opl3_reg = reg_side | (OPL3_REG_WAVE_SELECT + op_offset); opl3->command(opl3, opl3_reg, reg_val); } /* Set operator feedback and 2op inter-operator connection */ reg_val = fm->feedback_connection[0]; /* Set output voice connection */ reg_val |= OPL3_STEREO_BITS; if (chan->gm_pan < 43) reg_val &= ~OPL3_VOICE_TO_RIGHT; if (chan->gm_pan > 85) reg_val &= ~OPL3_VOICE_TO_LEFT; opl3_reg = reg_side | (OPL3_REG_FEEDBACK_CONNECTION + voice_offset); opl3->command(opl3, opl3_reg, reg_val); if (instr_4op) { /* Set 4op inter-operator connection */ reg_val = fm->feedback_connection[1] & OPL3_CONNECTION_BIT; /* Set output voice connection */ reg_val |= OPL3_STEREO_BITS; if (chan->gm_pan < 43) reg_val &= ~OPL3_VOICE_TO_RIGHT; if (chan->gm_pan > 85) reg_val &= ~OPL3_VOICE_TO_LEFT; opl3_reg = reg_side | (OPL3_REG_FEEDBACK_CONNECTION + voice_offset + 3); opl3->command(opl3, opl3_reg, reg_val); } /* * Special treatment of percussion notes for fm: * Requested pitch is really program, and pitch for * device is whatever was specified in the patch library. */ if (fm->fix_key) note = fm->fix_key; /* * use transpose if defined in patch library */ if (fm->trnsps) note += (fm->trnsps - 64); snd_opl3_calc_pitch(&fnum, &blocknum, note, chan); /* Set OPL3 FNUM_LOW register of requested voice */ opl3_reg = reg_side | (OPL3_REG_FNUM_LOW + voice_offset); opl3->command(opl3, opl3_reg, fnum); opl3->voices[voice].keyon_reg = blocknum; /* Set output sound flag */ blocknum |= OPL3_KEYON_BIT; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG " --> trigger voice %i\n", voice); #endif /* Set OPL3 KEYON_BLOCK register of requested voice */ opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset); opl3->command(opl3, opl3_reg, blocknum); /* kill note after fixed duration (in centiseconds) */ if (fm->fix_dur) { opl3->voices[voice].note_off = jiffies + (fm->fix_dur * HZ) / 100; snd_opl3_start_timer(opl3); opl3->voices[voice].note_off_check = 1; } else opl3->voices[voice].note_off_check = 0; /* get extra pgm, but avoid possible loops */ extra_prg = (extra_prg) ? 0 : fm->modes; /* do the bookkeeping */ vp->time = opl3->use_time++; vp->note = key; vp->chan = chan; if (instr_4op) { vp->state = SNDRV_OPL3_ST_ON_4OP; vp2 = &opl3->voices[voice + 3]; vp2->time = opl3->use_time++; vp2->note = key; vp2->chan = chan; vp2->state = SNDRV_OPL3_ST_NOT_AVAIL; } else { if (vp->state == SNDRV_OPL3_ST_ON_4OP) { /* 4op killed by 2op, release bounded voice */ vp2 = &opl3->voices[voice + 3]; vp2->time = opl3->use_time++; vp2->state = SNDRV_OPL3_ST_OFF; } vp->state = SNDRV_OPL3_ST_ON_2OP; } #ifdef DEBUG_ALLOC debug_alloc(opl3, "note on ", voice); #endif /* allocate extra program if specified in patch library */ if (extra_prg) { if (extra_prg > 128) { bank = 128; /* percussions start at 35 */ prg = extra_prg - 128 + 35 - 1; } else { bank = 0; prg = extra_prg - 1; } #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG " *** allocating extra program\n"); #endif goto __extra_prg; } spin_unlock_irqrestore(&opl3->voice_lock, flags); } static void snd_opl3_kill_voice(struct snd_opl3 *opl3, int voice) { unsigned short reg_side; unsigned char voice_offset; unsigned short opl3_reg; struct snd_opl3_voice *vp, *vp2; if (snd_BUG_ON(voice >= MAX_OPL3_VOICES)) return; vp = &opl3->voices[voice]; if (voice < MAX_OPL2_VOICES) { /* Left register block for voices 0 .. 8 */ reg_side = OPL3_LEFT; voice_offset = voice; } else { /* Right register block for voices 9 .. 17 */ reg_side = OPL3_RIGHT; voice_offset = voice - MAX_OPL2_VOICES; } /* kill voice */ #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG " --> kill voice %i\n", voice); #endif opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset); /* clear Key ON bit */ opl3->command(opl3, opl3_reg, vp->keyon_reg); /* do the bookkeeping */ vp->time = opl3->use_time++; if (vp->state == SNDRV_OPL3_ST_ON_4OP) { vp2 = &opl3->voices[voice + 3]; vp2->time = opl3->use_time++; vp2->state = SNDRV_OPL3_ST_OFF; } vp->state = SNDRV_OPL3_ST_OFF; #ifdef DEBUG_ALLOC debug_alloc(opl3, "note off", voice); #endif } /* * Release a note in response to a midi note off. */ static void snd_opl3_note_off_unsafe(void *p, int note, int vel, struct snd_midi_channel *chan) { struct snd_opl3 *opl3; int voice; struct snd_opl3_voice *vp; opl3 = p; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG "Note off, ch %i, inst %i, note %i\n", chan->number, chan->midi_program, note); #endif if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) { if (chan->drum_channel && use_internal_drums) { snd_opl3_drum_switch(opl3, note, vel, 0, chan); return; } /* this loop will hopefully kill all extra voices, because they are grouped by the same channel and note values */ for (voice = 0; voice < opl3->max_voices; voice++) { vp = &opl3->voices[voice]; if (vp->state > 0 && vp->chan == chan && vp->note == note) { snd_opl3_kill_voice(opl3, voice); } } } else { /* remap OSS voices */ if (chan->number < MAX_OPL3_VOICES) { voice = snd_opl3_oss_map[chan->number]; snd_opl3_kill_voice(opl3, voice); } } } void snd_opl3_note_off(void *p, int note, int vel, struct snd_midi_channel *chan) { struct snd_opl3 *opl3 = p; unsigned long flags; spin_lock_irqsave(&opl3->voice_lock, flags); snd_opl3_note_off_unsafe(p, note, vel, chan); spin_unlock_irqrestore(&opl3->voice_lock, flags); } /* * key pressure change */ void snd_opl3_key_press(void *p, int note, int vel, struct snd_midi_channel *chan) { struct snd_opl3 *opl3; opl3 = p; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG "Key pressure, ch#: %i, inst#: %i\n", chan->number, chan->midi_program); #endif } /* * terminate note */ void snd_opl3_terminate_note(void *p, int note, struct snd_midi_channel *chan) { struct snd_opl3 *opl3; opl3 = p; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG "Terminate note, ch#: %i, inst#: %i\n", chan->number, chan->midi_program); #endif } static void snd_opl3_update_pitch(struct snd_opl3 *opl3, int voice) { unsigned short reg_side; unsigned char voice_offset; unsigned short opl3_reg; unsigned char fnum, blocknum; struct snd_opl3_voice *vp; if (snd_BUG_ON(voice >= MAX_OPL3_VOICES)) return; vp = &opl3->voices[voice]; if (vp->chan == NULL) return; /* not allocated? */ if (voice < MAX_OPL2_VOICES) { /* Left register block for voices 0 .. 8 */ reg_side = OPL3_LEFT; voice_offset = voice; } else { /* Right register block for voices 9 .. 17 */ reg_side = OPL3_RIGHT; voice_offset = voice - MAX_OPL2_VOICES; } snd_opl3_calc_pitch(&fnum, &blocknum, vp->note, vp->chan); /* Set OPL3 FNUM_LOW register of requested voice */ opl3_reg = reg_side | (OPL3_REG_FNUM_LOW + voice_offset); opl3->command(opl3, opl3_reg, fnum); vp->keyon_reg = blocknum; /* Set output sound flag */ blocknum |= OPL3_KEYON_BIT; /* Set OPL3 KEYON_BLOCK register of requested voice */ opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset); opl3->command(opl3, opl3_reg, blocknum); vp->time = opl3->use_time++; } /* * Update voice pitch controller */ static void snd_opl3_pitch_ctrl(struct snd_opl3 *opl3, struct snd_midi_channel *chan) { int voice; struct snd_opl3_voice *vp; unsigned long flags; spin_lock_irqsave(&opl3->voice_lock, flags); if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) { for (voice = 0; voice < opl3->max_voices; voice++) { vp = &opl3->voices[voice]; if (vp->state > 0 && vp->chan == chan) { snd_opl3_update_pitch(opl3, voice); } } } else { /* remap OSS voices */ if (chan->number < MAX_OPL3_VOICES) { voice = snd_opl3_oss_map[chan->number]; snd_opl3_update_pitch(opl3, voice); } } spin_unlock_irqrestore(&opl3->voice_lock, flags); } /* * Deal with a controller type event. This includes all types of * control events, not just the midi controllers */ void snd_opl3_control(void *p, int type, struct snd_midi_channel *chan) { struct snd_opl3 *opl3; opl3 = p; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG "Controller, TYPE = %i, ch#: %i, inst#: %i\n", type, chan->number, chan->midi_program); #endif switch (type) { case MIDI_CTL_MSB_MODWHEEL: if (chan->control[MIDI_CTL_MSB_MODWHEEL] > 63) opl3->drum_reg |= OPL3_VIBRATO_DEPTH; else opl3->drum_reg &= ~OPL3_VIBRATO_DEPTH; opl3->command(opl3, OPL3_LEFT | OPL3_REG_PERCUSSION, opl3->drum_reg); break; case MIDI_CTL_E2_TREMOLO_DEPTH: if (chan->control[MIDI_CTL_E2_TREMOLO_DEPTH] > 63) opl3->drum_reg |= OPL3_TREMOLO_DEPTH; else opl3->drum_reg &= ~OPL3_TREMOLO_DEPTH; opl3->command(opl3, OPL3_LEFT | OPL3_REG_PERCUSSION, opl3->drum_reg); break; case MIDI_CTL_PITCHBEND: snd_opl3_pitch_ctrl(opl3, chan); break; } } /* * NRPN events */ void snd_opl3_nrpn(void *p, struct snd_midi_channel *chan, struct snd_midi_channel_set *chset) { struct snd_opl3 *opl3; opl3 = p; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG "NRPN, ch#: %i, inst#: %i\n", chan->number, chan->midi_program); #endif } /* * receive sysex */ void snd_opl3_sysex(void *p, unsigned char *buf, int len, int parsed, struct snd_midi_channel_set *chset) { struct snd_opl3 *opl3; opl3 = p; #ifdef DEBUG_MIDI snd_printk(KERN_DEBUG "SYSEX\n"); #endif }
gpl-2.0
schqiushui/kernel_kk443_sense_mec
drivers/staging/usbip/userspace/src/usbipd.c
5738
11873
/* * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> * 2005-2007 Takahiro Hirofuchi * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifdef HAVE_CONFIG_H #include "../config.h" #endif #include <errno.h> #include <unistd.h> #include <netdb.h> #include <string.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <arpa/inet.h> #include <sys/socket.h> #include <netinet/in.h> #ifdef HAVE_LIBWRAP #include <tcpd.h> #endif #define _GNU_SOURCE #include <getopt.h> #include <glib.h> #include <signal.h> #include "usbip_host_driver.h" #include "usbip_common.h" #include "usbip_network.h" #undef PROGNAME #define PROGNAME "usbipd" #define MAXSOCKFD 20 GMainLoop *main_loop; static const char usbip_version_string[] = PACKAGE_STRING; static const char usbipd_help_string[] = "usage: usbipd [options] \n" " -D, --daemon \n" " Run as a daemon process. \n" " \n" " -d, --debug \n" " Print debugging information. \n" " \n" " -h, --help \n" " Print this help. \n" " \n" " -v, --version \n" " Show version. \n"; static void usbipd_help(void) { printf("%s\n", usbipd_help_string); } static int recv_request_import(int sockfd) { struct op_import_request req; struct op_common reply; struct usbip_exported_device *edev; struct usbip_usb_device pdu_udev; int found = 0; int error = 0; int rc; memset(&req, 0, sizeof(req)); memset(&reply, 0, sizeof(reply)); rc = usbip_net_recv(sockfd, &req, sizeof(req)); if (rc < 0) { dbg("usbip_net_recv failed: import request"); return -1; } PACK_OP_IMPORT_REQUEST(0, &req); dlist_for_each_data(host_driver->edev_list, edev, struct usbip_exported_device) { if (!strncmp(req.busid, edev->udev.busid, SYSFS_BUS_ID_SIZE)) { info("found requested device: %s", req.busid); found = 1; break; } } if (found) { /* should set TCP_NODELAY for usbip */ usbip_net_set_nodelay(sockfd); /* export device needs a TCP/IP socket descriptor */ rc = usbip_host_export_device(edev, sockfd); if (rc < 0) error = 1; } else { info("requested device not found: %s", req.busid); error = 1; } rc = usbip_net_send_op_common(sockfd, OP_REP_IMPORT, (!error ? ST_OK : ST_NA)); if (rc < 0) { dbg("usbip_net_send_op_common failed: %#0x", OP_REP_IMPORT); return -1; } if (error) { dbg("import request busid %s: failed", req.busid); return -1; } memcpy(&pdu_udev, &edev->udev, sizeof(pdu_udev)); usbip_net_pack_usb_device(1, &pdu_udev); rc = usbip_net_send(sockfd, &pdu_udev, sizeof(pdu_udev)); if (rc < 0) { dbg("usbip_net_send failed: devinfo"); return -1; } dbg("import request busid %s: complete", req.busid); return 0; } static int send_reply_devlist(int connfd) { struct usbip_exported_device *edev; struct usbip_usb_device pdu_udev; struct usbip_usb_interface pdu_uinf; struct op_devlist_reply reply; int i; int rc; reply.ndev = 0; /* number of exported devices */ dlist_for_each_data(host_driver->edev_list, edev, struct usbip_exported_device) { reply.ndev += 1; } info("exportable devices: %d", reply.ndev); rc = usbip_net_send_op_common(connfd, OP_REP_DEVLIST, ST_OK); if (rc < 0) { dbg("usbip_net_send_op_common failed: %#0x", OP_REP_DEVLIST); return -1; } PACK_OP_DEVLIST_REPLY(1, &reply); rc = usbip_net_send(connfd, &reply, sizeof(reply)); if (rc < 0) { dbg("usbip_net_send failed: %#0x", OP_REP_DEVLIST); return -1; } dlist_for_each_data(host_driver->edev_list, edev, struct usbip_exported_device) { dump_usb_device(&edev->udev); memcpy(&pdu_udev, &edev->udev, sizeof(pdu_udev)); usbip_net_pack_usb_device(1, &pdu_udev); rc = usbip_net_send(connfd, &pdu_udev, sizeof(pdu_udev)); if (rc < 0) { dbg("usbip_net_send failed: pdu_udev"); return -1; } for (i = 0; i < edev->udev.bNumInterfaces; i++) { dump_usb_interface(&edev->uinf[i]); memcpy(&pdu_uinf, &edev->uinf[i], sizeof(pdu_uinf)); usbip_net_pack_usb_interface(1, &pdu_uinf); rc = usbip_net_send(connfd, &pdu_uinf, sizeof(pdu_uinf)); if (rc < 0) { dbg("usbip_net_send failed: pdu_uinf"); return -1; } } } return 0; } static int recv_request_devlist(int connfd) { struct op_devlist_request req; int rc; memset(&req, 0, sizeof(req)); rc = usbip_net_recv(connfd, &req, sizeof(req)); if (rc < 0) { dbg("usbip_net_recv failed: devlist request"); return -1; } rc = send_reply_devlist(connfd); if (rc < 0) { dbg("send_reply_devlist failed"); return -1; } return 0; } static int recv_pdu(int connfd) { uint16_t code = OP_UNSPEC; int ret; ret = usbip_net_recv_op_common(connfd, &code); if (ret < 0) { dbg("could not receive opcode: %#0x", code); return -1; } ret = usbip_host_refresh_device_list(); if (ret < 0) { dbg("could not refresh device list: %d", ret); return -1; } info("received request: %#0x(%d)", code, connfd); switch (code) { case OP_REQ_DEVLIST: ret = recv_request_devlist(connfd); break; case OP_REQ_IMPORT: ret = recv_request_import(connfd); break; case OP_REQ_DEVINFO: case OP_REQ_CRYPKEY: default: err("received an unknown opcode: %#0x", code); ret = -1; } if (ret == 0) info("request %#0x(%d): complete", code, connfd); else info("request %#0x(%d): failed", code, connfd); return ret; } #ifdef HAVE_LIBWRAP static int tcpd_auth(int connfd) { struct request_info request; int rc; request_init(&request, RQ_DAEMON, PROGNAME, RQ_FILE, connfd, 0); fromhost(&request); rc = hosts_access(&request); if (rc == 0) return -1; return 0; } #endif static int do_accept(int listenfd) { int connfd; struct sockaddr_storage ss; socklen_t len = sizeof(ss); char host[NI_MAXHOST], port[NI_MAXSERV]; int rc; memset(&ss, 0, sizeof(ss)); connfd = accept(listenfd, (struct sockaddr *) &ss, &len); if (connfd < 0) { err("failed to accept connection"); return -1; } rc = getnameinfo((struct sockaddr *) &ss, len, host, sizeof(host), port, sizeof(port), NI_NUMERICHOST | NI_NUMERICSERV); if (rc) err("getnameinfo: %s", gai_strerror(rc)); #ifdef HAVE_LIBWRAP rc = tcpd_auth(connfd); if (rc < 0) { info("denied access from %s", host); close(connfd); return -1; } #endif info("connection from %s:%s", host, port); return connfd; } gboolean process_request(GIOChannel *gio, GIOCondition condition, gpointer unused_data) { int listenfd; int connfd; (void) unused_data; if (condition & (G_IO_ERR | G_IO_HUP | G_IO_NVAL)) { err("unknown condition"); BUG(); } if (condition & G_IO_IN) { listenfd = g_io_channel_unix_get_fd(gio); connfd = do_accept(listenfd); if (connfd < 0) return TRUE; recv_pdu(connfd); close(connfd); } return TRUE; } static void log_addrinfo(struct addrinfo *ai) { char hbuf[NI_MAXHOST]; char sbuf[NI_MAXSERV]; int rc; rc = getnameinfo(ai->ai_addr, ai->ai_addrlen, hbuf, sizeof(hbuf), sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV); if (rc) err("getnameinfo: %s", gai_strerror(rc)); info("listening on %s:%s", hbuf, sbuf); } static int listen_all_addrinfo(struct addrinfo *ai_head, int sockfdlist[]) { struct addrinfo *ai; int ret, nsockfd = 0; for (ai = ai_head; ai && nsockfd < MAXSOCKFD; ai = ai->ai_next) { sockfdlist[nsockfd] = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); if (sockfdlist[nsockfd] < 0) continue; usbip_net_set_reuseaddr(sockfdlist[nsockfd]); usbip_net_set_nodelay(sockfdlist[nsockfd]); if (sockfdlist[nsockfd] >= FD_SETSIZE) { close(sockfdlist[nsockfd]); sockfdlist[nsockfd] = -1; continue; } ret = bind(sockfdlist[nsockfd], ai->ai_addr, ai->ai_addrlen); if (ret < 0) { close(sockfdlist[nsockfd]); sockfdlist[nsockfd] = -1; continue; } ret = listen(sockfdlist[nsockfd], SOMAXCONN); if (ret < 0) { close(sockfdlist[nsockfd]); sockfdlist[nsockfd] = -1; continue; } log_addrinfo(ai); nsockfd++; } if (nsockfd == 0) return -1; dbg("listening on %d address%s", nsockfd, (nsockfd == 1) ? "" : "es"); return nsockfd; } static struct addrinfo *do_getaddrinfo(char *host, int ai_family) { struct addrinfo hints, *ai_head; int rc; memset(&hints, 0, sizeof(hints)); hints.ai_family = ai_family; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_PASSIVE; rc = getaddrinfo(host, USBIP_PORT_STRING, &hints, &ai_head); if (rc) { err("failed to get a network address %s: %s", USBIP_PORT_STRING, gai_strerror(rc)); return NULL; } return ai_head; } static void signal_handler(int i) { dbg("received signal: code %d", i); if (main_loop) g_main_loop_quit(main_loop); } static void set_signal(void) { struct sigaction act; memset(&act, 0, sizeof(act)); act.sa_handler = signal_handler; sigemptyset(&act.sa_mask); sigaction(SIGTERM, &act, NULL); sigaction(SIGINT, &act, NULL); } static int do_standalone_mode(gboolean daemonize) { struct addrinfo *ai_head; int sockfdlist[MAXSOCKFD]; int nsockfd; int i; if (usbip_names_init(USBIDS_FILE)) err("failed to open %s", USBIDS_FILE); if (usbip_host_driver_open()) { err("please load " USBIP_CORE_MOD_NAME ".ko and " USBIP_HOST_DRV_NAME ".ko!"); return -1; } if (daemonize) { if (daemon(0,0) < 0) { err("daemonizing failed: %s", strerror(errno)); return -1; } usbip_use_syslog = 1; } set_signal(); ai_head = do_getaddrinfo(NULL, PF_UNSPEC); if (!ai_head) return -1; info("starting " PROGNAME " (%s)", usbip_version_string); nsockfd = listen_all_addrinfo(ai_head, sockfdlist); if (nsockfd <= 0) { err("failed to open a listening socket"); return -1; } for (i = 0; i < nsockfd; i++) { GIOChannel *gio; gio = g_io_channel_unix_new(sockfdlist[i]); g_io_add_watch(gio, (G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL), process_request, NULL); } main_loop = g_main_loop_new(FALSE, FALSE); g_main_loop_run(main_loop); info("shutting down " PROGNAME); freeaddrinfo(ai_head); usbip_host_driver_close(); usbip_names_free(); return 0; } int main(int argc, char *argv[]) { static const struct option longopts[] = { { "daemon", no_argument, NULL, 'D' }, { "debug", no_argument, NULL, 'd' }, { "help", no_argument, NULL, 'h' }, { "version", no_argument, NULL, 'v' }, { NULL, 0, NULL, 0 } }; enum { cmd_standalone_mode = 1, cmd_help, cmd_version } cmd; gboolean daemonize = FALSE; int opt, rc = -1; usbip_use_stderr = 1; usbip_use_syslog = 0; if (geteuid() != 0) err("not running as root?"); cmd = cmd_standalone_mode; for (;;) { opt = getopt_long(argc, argv, "Ddhv", longopts, NULL); if (opt == -1) break; switch (opt) { case 'D': daemonize = TRUE; break; case 'd': usbip_use_debug = 1; break; case 'h': cmd = cmd_help; break; case 'v': cmd = cmd_version; break; case '?': usbipd_help(); default: goto err_out; } } switch (cmd) { case cmd_standalone_mode: rc = do_standalone_mode(daemonize); break; case cmd_version: printf(PROGNAME " (%s)\n", usbip_version_string); rc = 0; break; case cmd_help: usbipd_help(); rc = 0; break; default: usbipd_help(); goto err_out; } err_out: return (rc > -1 ? EXIT_SUCCESS : EXIT_FAILURE); }
gpl-2.0
jld/b2g-hamachi-kernel
arch/powerpc/sysdev/bestcomm/bcom_fec_rx_task.c
14186
2688
/* * Bestcomm FEC RX task microcode * * Copyright (c) 2004 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex * on Tue Mar 22 11:19:38 2005 GMT */ #include <asm/types.h> /* * The header consists of the following fields: * u32 magic; * u8 desc_size; * u8 var_size; * u8 inc_size; * u8 first_var; * u8 reserved[8]; * * The size fields contain the number of 32-bit words. */ u32 bcom_fec_rx_task[] = { /* header */ 0x4243544b, 0x18060709, 0x00000000, 0x00000000, /* Task descriptors */ 0x808220e3, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */ 0x10601010, /* DRD1A: var4 = var2; FN=0 MORE init=3 WS=0 RS=0 */ 0xb8800264, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc4, idx3 += inc4 */ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */ 0xb8c58029, /* LCD: idx3 = *(idx1 + var00000015); idx3 once var0; idx3 += inc5 */ 0x60000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=0 RS=0 */ 0x088cf8cc, /* DRD2B1: idx2 = EU3(); EU3(idx3,var12) */ 0x991982f2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var11; idx2 += inc6, idx3 += inc2 */ 0x006acf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=1 RS=1 */ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */ 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */ 0x034cfc4e, /* DRD2B1: var13 = EU3(); EU3(*idx1,var14) */ 0x00008868, /* DRD1A: idx2 = var13; FN=0 init=0 WS=0 RS=0 */ 0x99198341, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var13; idx2 += inc0, idx3 += inc1 */ 0x007ecf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=3 RS=3 */ 0x99198272, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc6, idx3 += inc2 */ 0x046acf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=3 WS=1 RS=1 */ 0x9819002d, /* LCD: idx2 = idx0; idx2 once var0; idx2 += inc5 */ 0x0060c790, /* DRD1A: *idx1 = *idx2; FN=0 init=3 WS=0 RS=0 */ 0x000001f8, /* NOP */ /* VAR[9]-VAR[14] */ 0x40000000, 0x7fff7fff, 0x00000000, 0x00000003, 0x40000008, 0x43ffffff, /* INC[0]-INC[6] */ 0x40000000, 0xe0000000, 0xe0000000, 0xa0000008, 0x20000000, 0x00000000, 0x4000ffff, };
gpl-2.0
cphelps76/DEMENTEDElite_kernel_jf
drivers/sensorhub/factory/light_cm3320.c
107
3091
/* * Copyright (C) 2012, Samsung Electronics Co. Ltd. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "../ssp.h" #define VENDOR "CAPELLA" #define CHIP_ID_3320 "CM3320" #define CHIP_ID "CM3323" #if defined(CONFIG_MACH_JF_ATT) || defined(CONFIG_MACH_JF_TMO) || \ defined(CONFIG_MACH_JF_EUR) || defined(CONFIG_MACH_JACTIVE_EUR) #define CHIP_CM3323_REV 8 #elif defined(CONFIG_MACH_JF_SPR) || defined(CONFIG_MACH_JF_USC) || \ defined(CONFIG_MACH_JF_VZW) || defined(CONFIG_MACH_JF_LGT) || \ defined(CONFIG_MACH_JF_SKT) || defined(CONFIG_MACH_JF_KTT) || \ defined(CONFIG_MACH_JF_DCM) || defined(CONFIG_MACH_JF_CRI) #define CHIP_CM3323_REV 9 #endif /*************************************************************************/ /* factory Sysfs */ /*************************************************************************/ static ssize_t light_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", VENDOR); } static ssize_t light_name_show(struct device *dev, struct device_attribute *attr, char *buf) { #ifdef CHIP_CM3323_REV struct ssp_data *data = dev_get_drvdata(dev); if (data->ap_rev >= CHIP_CM3323_REV) return sprintf(buf, "%s\n", CHIP_ID); else return sprintf(buf, "%s\n", CHIP_ID_3320); #else return sprintf(buf, "%s\n", CHIP_ID_3320); #endif } static ssize_t light_lux_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ssp_data *data = dev_get_drvdata(dev); return sprintf(buf, "%u,%u,%u,%u\n", data->buf[LIGHT_SENSOR].r, data->buf[LIGHT_SENSOR].g, data->buf[LIGHT_SENSOR].b, data->buf[LIGHT_SENSOR].w); } static ssize_t light_data_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ssp_data *data = dev_get_drvdata(dev); return sprintf(buf, "%u,%u,%u,%u\n", data->buf[LIGHT_SENSOR].r, data->buf[LIGHT_SENSOR].g, data->buf[LIGHT_SENSOR].b, data->buf[LIGHT_SENSOR].w); } static DEVICE_ATTR(vendor, S_IRUGO, light_vendor_show, NULL); static DEVICE_ATTR(name, S_IRUGO, light_name_show, NULL); static DEVICE_ATTR(lux, S_IRUGO, light_lux_show, NULL); static DEVICE_ATTR(raw_data, S_IRUGO, light_data_show, NULL); static struct device_attribute *light_attrs[] = { &dev_attr_vendor, &dev_attr_name, &dev_attr_lux, &dev_attr_raw_data, NULL, }; void initialize_light_factorytest(struct ssp_data *data) { sensors_register(data->light_device, data, light_attrs, "light_sensor"); } void remove_light_factorytest(struct ssp_data *data) { sensors_unregister(data->light_device, light_attrs); }
gpl-2.0
skywave/caf-zte-blade
fs/nfsd/nfs4idmap.c
107
13589
/* * Mapping of UID/GIDs to name and vice versa. * * Copyright (c) 2002, 2003 The Regents of the University of * Michigan. All rights reserved. * * Marius Aamodt Eriksen <marius@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/sched.h> #include <linux/slab.h> #include "idmap.h" #include "nfsd.h" /* * Cache entry */ /* * XXX we know that IDMAP_NAMESZ < PAGE_SIZE, but it's ugly to rely on * that. */ #define IDMAP_TYPE_USER 0 #define IDMAP_TYPE_GROUP 1 struct ent { struct cache_head h; int type; /* User / Group */ uid_t id; char name[IDMAP_NAMESZ]; char authname[IDMAP_NAMESZ]; }; /* Common entry handling */ #define ENT_HASHBITS 8 #define ENT_HASHMAX (1 << ENT_HASHBITS) #define ENT_HASHMASK (ENT_HASHMAX - 1) static void ent_init(struct cache_head *cnew, struct cache_head *citm) { struct ent *new = container_of(cnew, struct ent, h); struct ent *itm = container_of(citm, struct ent, h); new->id = itm->id; new->type = itm->type; strlcpy(new->name, itm->name, sizeof(new->name)); strlcpy(new->authname, itm->authname, sizeof(new->name)); } static void ent_put(struct kref *ref) { struct ent *map = container_of(ref, struct ent, h.ref); kfree(map); } static struct cache_head * ent_alloc(void) { struct ent *e = kmalloc(sizeof(*e), GFP_KERNEL); if (e) return &e->h; else return NULL; } /* * ID -> Name cache */ static struct cache_head *idtoname_table[ENT_HASHMAX]; static uint32_t idtoname_hash(struct ent *ent) { uint32_t hash; hash = hash_str(ent->authname, ENT_HASHBITS); hash = hash_long(hash ^ ent->id, ENT_HASHBITS); /* Flip LSB for user/group */ if (ent->type == IDMAP_TYPE_GROUP) hash ^= 1; return hash; } static void idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, int *blen) { struct ent *ent = container_of(ch, struct ent, h); char idstr[11]; qword_add(bpp, blen, ent->authname); snprintf(idstr, sizeof(idstr), "%u", ent->id); qword_add(bpp, blen, ent->type == IDMAP_TYPE_GROUP ? "group" : "user"); qword_add(bpp, blen, idstr); (*bpp)[-1] = '\n'; } static int idtoname_upcall(struct cache_detail *cd, struct cache_head *ch) { return sunrpc_cache_pipe_upcall(cd, ch, idtoname_request); } static int idtoname_match(struct cache_head *ca, struct cache_head *cb) { struct ent *a = container_of(ca, struct ent, h); struct ent *b = container_of(cb, struct ent, h); return (a->id == b->id && a->type == b->type && strcmp(a->authname, b->authname) == 0); } static int idtoname_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct ent *ent; if (h == NULL) { seq_puts(m, "#domain type id [name]\n"); return 0; } ent = container_of(h, struct ent, h); seq_printf(m, "%s %s %u", ent->authname, ent->type == IDMAP_TYPE_GROUP ? "group" : "user", ent->id); if (test_bit(CACHE_VALID, &h->flags)) seq_printf(m, " %s", ent->name); seq_printf(m, "\n"); return 0; } static void warn_no_idmapd(struct cache_detail *detail, int has_died) { printk("nfsd: nfsv4 idmapping failing: has idmapd %s?\n", has_died ? "died" : "not been started"); } static int idtoname_parse(struct cache_detail *, char *, int); static struct ent *idtoname_lookup(struct ent *); static struct ent *idtoname_update(struct ent *, struct ent *); static struct cache_detail idtoname_cache = { .owner = THIS_MODULE, .hash_size = ENT_HASHMAX, .hash_table = idtoname_table, .name = "nfs4.idtoname", .cache_put = ent_put, .cache_upcall = idtoname_upcall, .cache_parse = idtoname_parse, .cache_show = idtoname_show, .warn_no_listener = warn_no_idmapd, .match = idtoname_match, .init = ent_init, .update = ent_init, .alloc = ent_alloc, }; static int idtoname_parse(struct cache_detail *cd, char *buf, int buflen) { struct ent ent, *res; char *buf1, *bp; int len; int error = -EINVAL; if (buf[buflen - 1] != '\n') return (-EINVAL); buf[buflen - 1]= '\0'; buf1 = kmalloc(PAGE_SIZE, GFP_KERNEL); if (buf1 == NULL) return (-ENOMEM); memset(&ent, 0, sizeof(ent)); /* Authentication name */ if (qword_get(&buf, buf1, PAGE_SIZE) <= 0) goto out; memcpy(ent.authname, buf1, sizeof(ent.authname)); /* Type */ if (qword_get(&buf, buf1, PAGE_SIZE) <= 0) goto out; ent.type = strcmp(buf1, "user") == 0 ? IDMAP_TYPE_USER : IDMAP_TYPE_GROUP; /* ID */ if (qword_get(&buf, buf1, PAGE_SIZE) <= 0) goto out; ent.id = simple_strtoul(buf1, &bp, 10); if (bp == buf1) goto out; /* expiry */ ent.h.expiry_time = get_expiry(&buf); if (ent.h.expiry_time == 0) goto out; error = -ENOMEM; res = idtoname_lookup(&ent); if (!res) goto out; /* Name */ error = -EINVAL; len = qword_get(&buf, buf1, PAGE_SIZE); if (len < 0) goto out; if (len == 0) set_bit(CACHE_NEGATIVE, &ent.h.flags); else if (len >= IDMAP_NAMESZ) goto out; else memcpy(ent.name, buf1, sizeof(ent.name)); error = -ENOMEM; res = idtoname_update(&ent, res); if (res == NULL) goto out; cache_put(&res->h, &idtoname_cache); error = 0; out: kfree(buf1); return error; } static struct ent * idtoname_lookup(struct ent *item) { struct cache_head *ch = sunrpc_cache_lookup(&idtoname_cache, &item->h, idtoname_hash(item)); if (ch) return container_of(ch, struct ent, h); else return NULL; } static struct ent * idtoname_update(struct ent *new, struct ent *old) { struct cache_head *ch = sunrpc_cache_update(&idtoname_cache, &new->h, &old->h, idtoname_hash(new)); if (ch) return container_of(ch, struct ent, h); else return NULL; } /* * Name -> ID cache */ static struct cache_head *nametoid_table[ENT_HASHMAX]; static inline int nametoid_hash(struct ent *ent) { return hash_str(ent->name, ENT_HASHBITS); } static void nametoid_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, int *blen) { struct ent *ent = container_of(ch, struct ent, h); qword_add(bpp, blen, ent->authname); qword_add(bpp, blen, ent->type == IDMAP_TYPE_GROUP ? "group" : "user"); qword_add(bpp, blen, ent->name); (*bpp)[-1] = '\n'; } static int nametoid_upcall(struct cache_detail *cd, struct cache_head *ch) { return sunrpc_cache_pipe_upcall(cd, ch, nametoid_request); } static int nametoid_match(struct cache_head *ca, struct cache_head *cb) { struct ent *a = container_of(ca, struct ent, h); struct ent *b = container_of(cb, struct ent, h); return (a->type == b->type && strcmp(a->name, b->name) == 0 && strcmp(a->authname, b->authname) == 0); } static int nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct ent *ent; if (h == NULL) { seq_puts(m, "#domain type name [id]\n"); return 0; } ent = container_of(h, struct ent, h); seq_printf(m, "%s %s %s", ent->authname, ent->type == IDMAP_TYPE_GROUP ? "group" : "user", ent->name); if (test_bit(CACHE_VALID, &h->flags)) seq_printf(m, " %u", ent->id); seq_printf(m, "\n"); return 0; } static struct ent *nametoid_lookup(struct ent *); static struct ent *nametoid_update(struct ent *, struct ent *); static int nametoid_parse(struct cache_detail *, char *, int); static struct cache_detail nametoid_cache = { .owner = THIS_MODULE, .hash_size = ENT_HASHMAX, .hash_table = nametoid_table, .name = "nfs4.nametoid", .cache_put = ent_put, .cache_upcall = nametoid_upcall, .cache_parse = nametoid_parse, .cache_show = nametoid_show, .warn_no_listener = warn_no_idmapd, .match = nametoid_match, .init = ent_init, .update = ent_init, .alloc = ent_alloc, }; static int nametoid_parse(struct cache_detail *cd, char *buf, int buflen) { struct ent ent, *res; char *buf1; int error = -EINVAL; if (buf[buflen - 1] != '\n') return (-EINVAL); buf[buflen - 1]= '\0'; buf1 = kmalloc(PAGE_SIZE, GFP_KERNEL); if (buf1 == NULL) return (-ENOMEM); memset(&ent, 0, sizeof(ent)); /* Authentication name */ if (qword_get(&buf, buf1, PAGE_SIZE) <= 0) goto out; memcpy(ent.authname, buf1, sizeof(ent.authname)); /* Type */ if (qword_get(&buf, buf1, PAGE_SIZE) <= 0) goto out; ent.type = strcmp(buf1, "user") == 0 ? IDMAP_TYPE_USER : IDMAP_TYPE_GROUP; /* Name */ error = qword_get(&buf, buf1, PAGE_SIZE); if (error <= 0 || error >= IDMAP_NAMESZ) goto out; memcpy(ent.name, buf1, sizeof(ent.name)); /* expiry */ ent.h.expiry_time = get_expiry(&buf); if (ent.h.expiry_time == 0) goto out; /* ID */ error = get_int(&buf, &ent.id); if (error == -EINVAL) goto out; if (error == -ENOENT) set_bit(CACHE_NEGATIVE, &ent.h.flags); error = -ENOMEM; res = nametoid_lookup(&ent); if (res == NULL) goto out; res = nametoid_update(&ent, res); if (res == NULL) goto out; cache_put(&res->h, &nametoid_cache); error = 0; out: kfree(buf1); return (error); } static struct ent * nametoid_lookup(struct ent *item) { struct cache_head *ch = sunrpc_cache_lookup(&nametoid_cache, &item->h, nametoid_hash(item)); if (ch) return container_of(ch, struct ent, h); else return NULL; } static struct ent * nametoid_update(struct ent *new, struct ent *old) { struct cache_head *ch = sunrpc_cache_update(&nametoid_cache, &new->h, &old->h, nametoid_hash(new)); if (ch) return container_of(ch, struct ent, h); else return NULL; } /* * Exported API */ int nfsd_idmap_init(void) { int rv; rv = cache_register(&idtoname_cache); if (rv) return rv; rv = cache_register(&nametoid_cache); if (rv) cache_unregister(&idtoname_cache); return rv; } void nfsd_idmap_shutdown(void) { cache_unregister(&idtoname_cache); cache_unregister(&nametoid_cache); } static int idmap_lookup(struct svc_rqst *rqstp, struct ent *(*lookup_fn)(struct ent *), struct ent *key, struct cache_detail *detail, struct ent **item) { int ret; *item = lookup_fn(key); if (!*item) return -ENOMEM; retry: ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle); if (ret == -ETIMEDOUT) { struct ent *prev_item = *item; *item = lookup_fn(key); if (*item != prev_item) goto retry; cache_put(&(*item)->h, detail); } return ret; } static char * rqst_authname(struct svc_rqst *rqstp) { struct auth_domain *clp; clp = rqstp->rq_gssclient ? rqstp->rq_gssclient : rqstp->rq_client; return clp->name; } static __be32 idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id) { struct ent *item, key = { .type = type, }; int ret; if (namelen + 1 > sizeof(key.name)) return nfserr_badowner; memcpy(key.name, name, namelen); key.name[namelen] = '\0'; strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname)); ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item); if (ret == -ENOENT) return nfserr_badowner; if (ret) return nfserrno(ret); *id = item->id; cache_put(&item->h, &nametoid_cache); return 0; } static int idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name) { struct ent *item, key = { .id = id, .type = type, }; int ret; strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname)); ret = idmap_lookup(rqstp, idtoname_lookup, &key, &idtoname_cache, &item); if (ret == -ENOENT) return sprintf(name, "%u", id); if (ret) return ret; ret = strlen(item->name); BUG_ON(ret > IDMAP_NAMESZ); memcpy(name, item->name, ret); cache_put(&item->h, &idtoname_cache); return ret; } __be32 nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen, __u32 *id) { return idmap_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id); } __be32 nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen, __u32 *id) { return idmap_name_to_id(rqstp, IDMAP_TYPE_GROUP, name, namelen, id); } int nfsd_map_uid_to_name(struct svc_rqst *rqstp, __u32 id, char *name) { return idmap_id_to_name(rqstp, IDMAP_TYPE_USER, id, name); } int nfsd_map_gid_to_name(struct svc_rqst *rqstp, __u32 id, char *name) { return idmap_id_to_name(rqstp, IDMAP_TYPE_GROUP, id, name); }
gpl-2.0
zanezam/boeffla-kernel-oos-oneplus2
drivers/char/adsprpc.c
107
49446
/* * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/slab.h> #include <linux/completion.h> #include <linux/pagemap.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/cdev.h> #include <linux/list.h> #include <linux/hash.h> #include <linux/msm_ion.h> #include <soc/qcom/smd.h> #include <soc/qcom/subsystem_notif.h> #include <linux/msm_iommu_domains.h> #include <linux/scatterlist.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/device.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/dma-contiguous.h> #include <linux/dma-buf.h> #include <linux/iommu.h> #include <linux/kref.h> #include <linux/sort.h> #include "adsprpc_compat.h" #include "adsprpc_shared.h" #ifndef ION_ADSPRPC_HEAP_ID #define ION_ADSPRPC_HEAP_ID ION_AUDIO_HEAP_ID #endif /*ION_ADSPRPC_HEAP_ID*/ #define RPC_TIMEOUT (5 * HZ) #define RPC_HASH_BITS 5 #define RPC_HASH_SZ (1 << RPC_HASH_BITS) #define BALIGN 32 #define NUM_CHANNELS 2 #define LOCK_MMAP(kernel)\ do {\ if (!kernel)\ down_read(&current->mm->mmap_sem);\ } while (0) #define UNLOCK_MMAP(kernel)\ do {\ if (!kernel)\ up_read(&current->mm->mmap_sem);\ } while (0) #define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0) static inline uintptr_t buf_page_start(void *buf) { uintptr_t start = (uintptr_t) buf & PAGE_MASK; return start; } static inline uintptr_t buf_page_offset(void *buf) { uintptr_t offset = (uintptr_t) buf & (PAGE_SIZE - 1); return offset; } static inline int buf_num_pages(void *buf, ssize_t len) { uintptr_t start = buf_page_start(buf) >> PAGE_SHIFT; uintptr_t end = (((uintptr_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT; int nPages = end - start + 1; return nPages; } static inline uint32_t buf_page_size(uint32_t size) { uint32_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK; return sz > PAGE_SIZE ? sz : PAGE_SIZE; } static inline int buf_get_pages(void *addr, ssize_t sz, int nr_pages, int access, struct smq_phy_page *pages, int nr_elems, struct smq_phy_page *range) { struct vm_area_struct *vma, *vmaend; uintptr_t start = buf_page_start(addr); uintptr_t end = buf_page_start((void *)((uintptr_t)addr + sz - 1)); uint32_t len = nr_pages << PAGE_SHIFT; unsigned long pfn, pfnend, paddr; int n = -1, err = 0; VERIFY(err, 0 != access_ok(access ? VERIFY_WRITE : VERIFY_READ, (void __user *)start, len)); if (err) goto bail; VERIFY(err, 0 != (vma = find_vma(current->mm, start))); if (err) goto bail; VERIFY(err, 0 != (vmaend = find_vma(current->mm, end))); if (err) goto bail; n = 0; if (follow_pfn(vma, start, &pfn)) goto bail; if (follow_pfn(vmaend, end, &pfnend)) goto bail; VERIFY(err, (pfn + nr_pages - 1) == pfnend); if (err) goto bail; VERIFY(err, nr_elems > 0); if (err) goto bail; VERIFY(err, __pfn_to_phys(pfnend) <= UINT_MAX); if (err) goto bail; paddr = __pfn_to_phys(pfn); if (range->size && (paddr < range->addr)) goto bail; if (range->size && ((paddr - range->addr + len) > range->size)) goto bail; pages->addr = paddr; pages->size = len; n++; bail: return n; } struct fastrpc_buf { struct ion_handle *handle; void *virt; ion_phys_addr_t phys; ssize_t size; int used; }; struct smq_context_list; struct overlap { uintptr_t start; uintptr_t end; int raix; uintptr_t mstart; uintptr_t mend; uintptr_t offset; }; struct smq_invoke_ctx { struct hlist_node hn; struct completion work; int retval; int pid; int tgid; remote_arg_t *pra; remote_arg_t *rpra; struct fastrpc_buf obuf; struct fastrpc_buf *abufs; struct fastrpc_device *dev; struct fastrpc_apps *apps; struct file_data *fdata; int *fds; struct ion_handle **handles; int nbufs; bool smmu; uint32_t sc; struct overlap *overs; struct overlap **overps; }; struct smq_context_list { struct hlist_head pending; struct hlist_head interrupted; spinlock_t hlock; }; struct fastrpc_smmu { struct iommu_group *group; struct iommu_domain *domain; int domain_id; bool enabled; }; struct fastrpc_channel_context { smd_channel_t *chan; struct device *dev; struct completion work; struct fastrpc_smmu smmu; struct kref kref; struct notifier_block nb; int ssrcount; }; struct fastrpc_apps { struct fastrpc_channel_context channel[NUM_CHANNELS]; struct smq_context_list clst; struct ion_client *iclient; struct cdev cdev; struct class *class; struct mutex smd_mutex; struct smq_phy_page range; dev_t dev_no; int compat; spinlock_t wrlock; spinlock_t hlock; struct hlist_head htbl[RPC_HASH_SZ]; }; struct fastrpc_mmap { struct hlist_node hn; struct ion_handle *handle; void *virt; ion_phys_addr_t phys; uintptr_t *vaddrin; uintptr_t vaddrout; ssize_t size; int refs; }; struct file_data { spinlock_t hlock; struct hlist_head hlst; uint32_t mode; int cid; int tgid; int ssrcount; }; struct fastrpc_device { uint32_t tgid; struct hlist_node hn; struct fastrpc_buf buf; }; struct fastrpc_channel_info { char *name; char *node; char *group; char *subsys; int channel; }; static struct fastrpc_apps gfa; static const struct fastrpc_channel_info gcinfo[NUM_CHANNELS] = { { .name = "adsprpc-smd", .node = "qcom,msm-audio-ion", .group = "lpass_audio", .subsys = "adsp", .channel = SMD_APPS_QDSP, }, { .name = "mdsprpc-smd", .subsys = "modem", .channel = SMD_APPS_MODEM, }, }; static int map_iommu_mem(struct ion_handle *handle, struct file_data *fdata, ion_phys_addr_t *iova, unsigned long size) { struct fastrpc_apps *me = &gfa; struct fastrpc_mmap *map = 0, *mapmatch = 0; struct hlist_node *n; unsigned long len = size; int cid = fdata->cid; int err = 0; spin_lock(&fdata->hlock); hlist_for_each_entry_safe(map, n, &fdata->hlst, hn) { if (handle == map->handle) { mapmatch = map; break; } } spin_unlock(&fdata->hlock); if (mapmatch) { *iova = mapmatch->phys; return 0; } mutex_lock(&me->smd_mutex); VERIFY(err, fdata->ssrcount == me->channel[cid].ssrcount); if (!err) VERIFY(err, 0 == ion_map_iommu(me->iclient, handle, me->channel[cid].smmu.domain_id, 0, SZ_4K, 0, iova, &len, 0, 0)); mutex_unlock(&me->smd_mutex); return err; } static void unmap_iommu_mem(struct ion_handle *handle, struct file_data *fdata, int cached) { struct fastrpc_apps *me = &gfa; struct fastrpc_mmap *map = 0, *mapmatch = 0; struct hlist_node *n; int cid = fdata->cid; if (cached) { spin_lock(&fdata->hlock); hlist_for_each_entry_safe(map, n, &fdata->hlst, hn) { if (handle == map->handle) { mapmatch = map; break; } } spin_unlock(&fdata->hlock); } if (!mapmatch) { mutex_lock(&me->smd_mutex); if (fdata->ssrcount == me->channel[cid].ssrcount) ion_unmap_iommu(me->iclient, handle, me->channel[cid].smmu.domain_id, 0); mutex_unlock(&me->smd_mutex); } } static void free_mem(struct fastrpc_buf *buf, struct file_data *fd) { struct fastrpc_apps *me = &gfa; if (!IS_ERR_OR_NULL(buf->handle)) { if (me->channel[fd->cid].smmu.enabled && buf->phys) { unmap_iommu_mem(buf->handle, fd, 0); buf->phys = 0; } if (!IS_ERR_OR_NULL(buf->virt)) { ion_unmap_kernel(me->iclient, buf->handle); buf->virt = 0; } ion_free(me->iclient, buf->handle); buf->handle = 0; } } static void free_map(struct fastrpc_mmap *map, struct file_data *fdata) { struct fastrpc_apps *me = &gfa; int cid = fdata->cid; if (!IS_ERR_OR_NULL(map->handle)) { if (me->channel[cid].smmu.enabled && map->phys) { unmap_iommu_mem(map->handle, fdata, 0); map->phys = 0; } if (!IS_ERR_OR_NULL(map->virt)) { ion_unmap_kernel(me->iclient, map->handle); map->virt = 0; } ion_free(me->iclient, map->handle); } map->handle = 0; } static int alloc_mem(struct fastrpc_buf *buf, struct file_data *fdata) { struct fastrpc_apps *me = &gfa; struct ion_client *clnt = gfa.iclient; struct sg_table *sg; int err = 0; int cid = fdata->cid; unsigned int heap; buf->handle = 0; buf->virt = 0; buf->phys = 0; heap = me->channel[cid].smmu.enabled ? ION_HEAP(ION_IOMMU_HEAP_ID) : ION_HEAP(ION_ADSP_HEAP_ID); buf->handle = ion_alloc(clnt, buf->size, SZ_4K, heap, ION_FLAG_CACHED); VERIFY(err, 0 == IS_ERR_OR_NULL(buf->handle)); if (err) goto bail; buf->virt = ion_map_kernel(clnt, buf->handle); VERIFY(err, 0 == IS_ERR_OR_NULL(buf->virt)); if (err) goto bail; if (me->channel[cid].smmu.enabled) { VERIFY(err, 0 == map_iommu_mem(buf->handle, fdata, &buf->phys, buf->size)); if (err) goto bail; } else { VERIFY(err, 0 != (sg = ion_sg_table(clnt, buf->handle))); if (err) goto bail; buf->phys = sg_dma_address(sg->sgl); } bail: if (err && !IS_ERR_OR_NULL(buf->handle)) free_mem(buf, fdata); return err; } static int context_restore_interrupted(struct fastrpc_apps *me, struct fastrpc_ioctl_invoke_fd *invokefd, struct file_data *fdata, struct smq_invoke_ctx **po) { int err = 0; struct smq_invoke_ctx *ctx = 0, *ictx = 0; struct hlist_node *n; struct fastrpc_ioctl_invoke *invoke = &invokefd->inv; spin_lock(&me->clst.hlock); hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) { if (ictx->pid == current->pid) { if (invoke->sc != ictx->sc || ictx->fdata != fdata) err = -1; else { ctx = ictx; hlist_del(&ctx->hn); hlist_add_head(&ctx->hn, &me->clst.pending); } break; } } spin_unlock(&me->clst.hlock); if (ctx) *po = ctx; return err; } #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1) static int overlap_ptr_cmp(const void *a, const void *b) { struct overlap *pa = *((struct overlap **)a); struct overlap *pb = *((struct overlap **)b); /* sort with lowest starting buffer first */ int st = CMP(pa->start, pb->start); /* sort with highest ending buffer first */ int ed = CMP(pb->end, pa->end); return st == 0 ? ed : st; } static int context_build_overlap(struct smq_invoke_ctx *ctx) { int err = 0, i; remote_arg_t *pra = ctx->pra; int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc); int nbufs = inbufs + outbufs; struct overlap max; ctx->overs = kzalloc(sizeof(*ctx->overs) * (nbufs), GFP_KERNEL); VERIFY(err, !IS_ERR_OR_NULL(ctx->overs)); if (err) goto bail; ctx->overps = kzalloc(sizeof(*ctx->overps) * (nbufs), GFP_KERNEL); VERIFY(err, !IS_ERR_OR_NULL(ctx->overps)); if (err) goto bail; for (i = 0; i < nbufs; ++i) { ctx->overs[i].start = (uintptr_t)pra[i].buf.pv; ctx->overs[i].end = ctx->overs[i].start + pra[i].buf.len; ctx->overs[i].raix = i; ctx->overps[i] = &ctx->overs[i]; } sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0); max.start = 0; max.end = 0; for (i = 0; i < nbufs; ++i) { if (ctx->overps[i]->start < max.end) { ctx->overps[i]->mstart = max.end; ctx->overps[i]->mend = ctx->overps[i]->end; ctx->overps[i]->offset = max.end - ctx->overps[i]->start; if (ctx->overps[i]->end > max.end) { max.end = ctx->overps[i]->end; } else { ctx->overps[i]->mend = 0; ctx->overps[i]->mstart = 0; } } else { ctx->overps[i]->mend = ctx->overps[i]->end; ctx->overps[i]->mstart = ctx->overps[i]->start; ctx->overps[i]->offset = 0; max = *ctx->overps[i]; } } bail: return err; } static void context_free(struct smq_invoke_ctx *ctx, int remove); static int context_alloc(struct fastrpc_apps *me, uint32_t kernel, struct fastrpc_ioctl_invoke_fd *invokefd, struct file_data *fdata, struct smq_invoke_ctx **po) { int err = 0, bufs, size = 0; struct smq_invoke_ctx *ctx = 0; struct smq_context_list *clst = &me->clst; struct fastrpc_ioctl_invoke *invoke = &invokefd->inv; bufs = REMOTE_SCALARS_INBUFS(invoke->sc) + REMOTE_SCALARS_OUTBUFS(invoke->sc); if (bufs) { size = bufs * sizeof(*ctx->pra); if (invokefd->fds) size = size + bufs * sizeof(*ctx->fds) + bufs * sizeof(*ctx->handles); } VERIFY(err, 0 != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL))); if (err) goto bail; INIT_HLIST_NODE(&ctx->hn); hlist_add_fake(&ctx->hn); ctx->apps = me; ctx->fdata = fdata; ctx->pra = (remote_arg_t *)(&ctx[1]); ctx->fds = invokefd->fds == 0 ? 0 : (int *)(&ctx->pra[bufs]); ctx->handles = invokefd->fds == 0 ? 0 : (struct ion_handle **)(&ctx->fds[bufs]); if (!kernel) { VERIFY(err, 0 == copy_from_user(ctx->pra, invoke->pra, bufs * sizeof(*ctx->pra))); if (err) goto bail; } else { memmove(ctx->pra, invoke->pra, bufs * sizeof(*ctx->pra)); } if (invokefd->fds) { if (!kernel) { VERIFY(err, 0 == copy_from_user(ctx->fds, invokefd->fds, bufs * sizeof(*ctx->fds))); if (err) goto bail; } else { memmove(ctx->fds, invokefd->fds, bufs * sizeof(*ctx->fds)); } } ctx->sc = invoke->sc; if (REMOTE_SCALARS_INBUFS(ctx->sc) + REMOTE_SCALARS_OUTBUFS(ctx->sc)) { VERIFY(err, 0 == context_build_overlap(ctx)); if (err) goto bail; } ctx->retval = -1; ctx->pid = current->pid; ctx->tgid = current->tgid; init_completion(&ctx->work); spin_lock(&clst->hlock); hlist_add_head(&ctx->hn, &clst->pending); spin_unlock(&clst->hlock); *po = ctx; bail: if (ctx && err) context_free(ctx, 1); return err; } static void context_save_interrupted(struct smq_invoke_ctx *ctx) { struct smq_context_list *clst = &ctx->apps->clst; spin_lock(&clst->hlock); hlist_del(&ctx->hn); hlist_add_head(&ctx->hn, &clst->interrupted); spin_unlock(&clst->hlock); } static void add_dev(struct fastrpc_apps *me, struct fastrpc_device *dev); static void context_free(struct smq_invoke_ctx *ctx, int remove) { struct smq_context_list *clst = &ctx->apps->clst; struct fastrpc_apps *apps = ctx->apps; struct ion_client *clnt = apps->iclient; int cid = ctx->fdata->cid; int ssrcount = ctx->fdata->ssrcount; struct fastrpc_smmu *smmu = &apps->channel[cid].smmu; struct fastrpc_buf *b; int i, bufs; if (ctx->smmu) { bufs = REMOTE_SCALARS_INBUFS(ctx->sc) + REMOTE_SCALARS_OUTBUFS(ctx->sc); if (ctx->fds) { for (i = 0; i < bufs; i++) { if (IS_ERR_OR_NULL(ctx->handles[i])) continue; unmap_iommu_mem(ctx->handles[i], ctx->fdata, 1); ion_free(clnt, ctx->handles[i]); } } mutex_lock(&apps->smd_mutex); if (ssrcount == apps->channel[cid].ssrcount) iommu_detach_group(smmu->domain, smmu->group); mutex_unlock(&apps->smd_mutex); } for (i = 0, b = ctx->abufs; i < ctx->nbufs; ++i, ++b) free_mem(b, ctx->fdata); kfree(ctx->abufs); if (ctx->dev) { add_dev(apps, ctx->dev); if (ctx->obuf.handle != ctx->dev->buf.handle) free_mem(&ctx->obuf, ctx->fdata); } if (remove) { spin_lock(&clst->hlock); hlist_del(&ctx->hn); spin_unlock(&clst->hlock); } kfree(ctx->overps); kfree(ctx->overs); kfree(ctx); } static void context_notify_user(struct smq_invoke_ctx *ctx, int retval) { ctx->retval = retval; complete(&ctx->work); } static void context_notify_all_users(struct smq_context_list *me, int cid) { struct smq_invoke_ctx *ictx = 0; struct hlist_node *n; spin_lock(&me->hlock); hlist_for_each_entry_safe(ictx, n, &me->pending, hn) { if (ictx->fdata->cid == cid) complete(&ictx->work); } hlist_for_each_entry_safe(ictx, n, &me->interrupted, hn) { if (ictx->fdata->cid == cid) complete(&ictx->work); } spin_unlock(&me->hlock); } static void context_list_ctor(struct smq_context_list *me) { INIT_HLIST_HEAD(&me->interrupted); INIT_HLIST_HEAD(&me->pending); spin_lock_init(&me->hlock); } static void context_list_dtor(struct fastrpc_apps *me, struct smq_context_list *clst) { struct smq_invoke_ctx *ictx = 0, *ctxfree; struct hlist_node *n; do { ctxfree = 0; spin_lock(&clst->hlock); hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) { hlist_del(&ictx->hn); ctxfree = ictx; break; } spin_unlock(&clst->hlock); if (ctxfree) context_free(ctxfree, 0); } while (ctxfree); do { ctxfree = 0; spin_lock(&clst->hlock); hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) { hlist_del(&ictx->hn); ctxfree = ictx; break; } spin_unlock(&clst->hlock); if (ctxfree) context_free(ctxfree, 0); } while (ctxfree); } static int get_page_list(uint32_t kernel, struct smq_invoke_ctx *ctx) { struct fastrpc_apps *me = &gfa; struct smq_phy_page *pgstart, *pages; struct smq_invoke_buf *list; struct fastrpc_buf *ibuf = &ctx->dev->buf; struct fastrpc_buf *obuf = &ctx->obuf; remote_arg_t *pra = ctx->pra; ssize_t rlen; uint32_t sc = ctx->sc; int cid = ctx->fdata->cid; int i, err = 0; int inbufs = REMOTE_SCALARS_INBUFS(sc); int outbufs = REMOTE_SCALARS_OUTBUFS(sc); LOCK_MMAP(kernel); *obuf = *ibuf; retry: list = smq_invoke_buf_start((remote_arg_t *)obuf->virt, sc); pgstart = smq_phy_page_start(sc, list); pages = pgstart + 1; rlen = obuf->size - ((uintptr_t)pages - (uintptr_t)obuf->virt); if (rlen < 0) { rlen = ((uintptr_t)pages - (uintptr_t)obuf->virt) - obuf->size; obuf->size += buf_page_size(rlen); VERIFY(err, 0 == alloc_mem(obuf, ctx->fdata)); if (err) goto bail; goto retry; } pgstart->addr = obuf->phys; pgstart->size = obuf->size; for (i = 0; i < inbufs + outbufs; ++i) { void *buf; int len, num; list[i].num = 0; list[i].pgidx = 0; len = pra[i].buf.len; VERIFY(err, len >= 0); if (err) goto bail; if (!len) continue; buf = pra[i].buf.pv; num = buf_num_pages(buf, len); if (!kernel) { if (me->channel[cid].smmu.enabled) { VERIFY(err, 0 != access_ok(i >= inbufs ? VERIFY_WRITE : VERIFY_READ, (void __user *)buf, len)); if (err) goto bail; if (ctx->fds && (ctx->fds[i] >= 0)) list[i].num = 1; } else { list[i].num = buf_get_pages(buf, len, num, i >= inbufs, pages, rlen / sizeof(*pages), &me->range); } } VERIFY(err, list[i].num >= 0); if (err) goto bail; if (list[i].num) { list[i].pgidx = pages - pgstart; pages = pages + list[i].num; } else if (rlen > sizeof(*pages)) { list[i].pgidx = pages - pgstart; pages = pages + 1; } else { if (obuf->handle != ibuf->handle) free_mem(obuf, ctx->fdata); obuf->size += buf_page_size(sizeof(*pages)); VERIFY(err, 0 == alloc_mem(obuf, ctx->fdata)); if (err) goto bail; goto retry; } rlen = obuf->size - ((uintptr_t)pages - (uintptr_t)obuf->virt); } obuf->used = obuf->size - rlen; bail: if (err && (obuf->handle != ibuf->handle)) free_mem(obuf, ctx->fdata); UNLOCK_MMAP(kernel); return err; } static inline int is_overlapped_outbuf(struct smq_invoke_ctx *ctx, int oix) { int inbufs, outbufs; inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc); if (!ctx->overps[oix]->mstart) return 1; oix = oix + 1; if ((oix < inbufs + outbufs) && !ctx->overps[oix]->mstart && ctx->overps[oix]->raix < inbufs) return 1; return 0; } static int clear_user_outbufs(struct smq_invoke_ctx *ctx) { remote_arg_t *pra = ctx->pra; remote_arg_t *rpra = ctx->rpra; uintptr_t ptr, end; int oix, err = 0; int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc); for (oix = 0; oix < inbufs + outbufs; ++oix) { int i = ctx->overps[oix]->raix; if ((i < inbufs) || (pra[i].buf.pv != rpra[i].buf.pv) || is_overlapped_outbuf(ctx, oix)) continue; VERIFY(err, 0 == clear_user(rpra[i].buf.pv, (rpra[i].buf.len < 8) ? rpra[i].buf.len : 8)); if (err) goto bail; ptr = buf_page_start(rpra[i].buf.pv) + PAGE_SIZE; end = (uintptr_t)rpra[i].buf.pv + rpra[i].buf.len; for (; ptr < end; ptr += PAGE_SIZE) { VERIFY(err, 0 == clear_user((void *)ptr, ((end - ptr) < 8) ? end - ptr : 8)); if (err) goto bail; } } bail: return err; } static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx, remote_arg_t *upra) { struct fastrpc_apps *me = &gfa; struct smq_invoke_buf *list; struct fastrpc_buf *pbuf = &ctx->obuf, *obufs = 0; struct smq_phy_page *pages; struct vm_area_struct *vma; struct ion_handle **handles = ctx->handles; void *args; remote_arg_t *pra = ctx->pra; remote_arg_t *rpra = ctx->rpra; ssize_t rlen, used, size; uint32_t sc = ctx->sc, start; int i, inh, bufs = 0, err = 0, oix, copylen = 0; int inbufs = REMOTE_SCALARS_INBUFS(sc); int outbufs = REMOTE_SCALARS_OUTBUFS(sc); int cid = ctx->fdata->cid; int *fds = ctx->fds, idx, num; ion_phys_addr_t iova; list = smq_invoke_buf_start(rpra, sc); pages = smq_phy_page_start(sc, list); used = ALIGN(pbuf->used, BALIGN); args = (void *)((char *)pbuf->virt + used); rlen = pbuf->size - used; /* map ion buffers */ for (i = 0; i < inbufs + outbufs; ++i) { rpra[i].buf.len = pra[i].buf.len; if (!pra[i].buf.len) continue; if (me->channel[cid].smmu.enabled && fds && (fds[i] >= 0)) { unsigned long len; start = buf_page_start(pra[i].buf.pv); len = buf_page_size(pra[i].buf.len); num = buf_num_pages(pra[i].buf.pv, pra[i].buf.len); idx = list[i].pgidx; handles[i] = ion_import_dma_buf(me->iclient, fds[i]); VERIFY(err, 0 == IS_ERR_OR_NULL(handles[i])); if (err) goto bail; VERIFY(err, 0 == map_iommu_mem(handles[i], ctx->fdata, &iova, len)); if (err) goto bail; VERIFY(err, 0 != (vma = find_vma(current->mm, start))); if (err) goto bail; rpra[i].buf.pv = pra[i].buf.pv; pages[idx].addr = iova + (start - vma->vm_start); pages[idx].size = num << PAGE_SHIFT; continue; } else if (list[i].num) { rpra[i].buf.pv = pra[i].buf.pv; continue; } } /* calculate len requreed for copying */ for (oix = 0; oix < inbufs + outbufs; ++oix) { int i = ctx->overps[oix]->raix; if (!pra[i].buf.len) continue; if (list[i].num) continue; if (ctx->overps[oix]->offset == 0) copylen = ALIGN(copylen, BALIGN); copylen += ctx->overps[oix]->mend - ctx->overps[oix]->mstart; } /* alocate new buffer */ if (copylen > rlen) { struct fastrpc_buf *b; pbuf->used = pbuf->size - rlen; VERIFY(err, 0 != (b = krealloc(obufs, (bufs + 1) * sizeof(*obufs), GFP_KERNEL))); if (err) goto bail; obufs = b; pbuf = obufs + bufs; pbuf->size = buf_num_pages(0, copylen) * PAGE_SIZE; VERIFY(err, 0 == alloc_mem(pbuf, ctx->fdata)); if (err) goto bail; bufs++; args = pbuf->virt; rlen = pbuf->size; } /* copy non ion buffers */ for (oix = 0; oix < inbufs + outbufs; ++oix) { int i = ctx->overps[oix]->raix; int mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart; if (!pra[i].buf.len) continue; if (list[i].num) continue; if (ctx->overps[oix]->offset == 0) { rlen -= ALIGN((uintptr_t)args, BALIGN) - (uintptr_t)args; args = (void *)ALIGN((uintptr_t)args, BALIGN); } VERIFY(err, rlen >= mlen); if (err) goto bail; list[i].num = 1; rpra[i].buf.pv = args - ctx->overps[oix]->offset; pages[list[i].pgidx].addr = buf_page_start((void *)((uintptr_t)pbuf->phys - ctx->overps[oix]->offset + (pbuf->size - rlen))); pages[list[i].pgidx].size = buf_num_pages(rpra[i].buf.pv, rpra[i].buf.len) * PAGE_SIZE; if (i < inbufs) { if (!kernel) { VERIFY(err, 0 == copy_from_user(rpra[i].buf.pv, pra[i].buf.pv, pra[i].buf.len)); if (err) goto bail; } else { memmove(rpra[i].buf.pv, pra[i].buf.pv, pra[i].buf.len); } } args = (void *)((uintptr_t)args + mlen); rlen -= mlen; } if (!kernel) { VERIFY(err, 0 == clear_user_outbufs(ctx)); if (err) goto bail; } for (oix = 0; oix < inbufs + outbufs; ++oix) { int i = ctx->overps[oix]->raix; if (rpra[i].buf.len && ctx->overps[oix]->mstart) dmac_flush_range(rpra[i].buf.pv, (char *)rpra[i].buf.pv + rpra[i].buf.len); } pbuf->used = pbuf->size - rlen; size = sizeof(*rpra) * REMOTE_SCALARS_INHANDLES(sc); if (size) { inh = inbufs + outbufs; if (!kernel) { VERIFY(err, 0 == copy_from_user(&rpra[inh], &upra[inh], size)); if (err) goto bail; } else { memmove(&rpra[inh], &upra[inh], size); } } dmac_flush_range(rpra, (char *)rpra + used); bail: ctx->abufs = obufs; ctx->nbufs = bufs; return err; } static int put_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra, remote_arg_t *rpra, remote_arg_t *upra) { int i, inbufs, outbufs, outh, size; int err = 0; inbufs = REMOTE_SCALARS_INBUFS(sc); outbufs = REMOTE_SCALARS_OUTBUFS(sc); for (i = inbufs; i < inbufs + outbufs; ++i) { if (rpra[i].buf.pv != pra[i].buf.pv) { if (!kernel) { VERIFY(err, 0 == copy_to_user(pra[i].buf.pv, rpra[i].buf.pv, rpra[i].buf.len)); if (err) goto bail; } else { memmove(pra[i].buf.pv, rpra[i].buf.pv, rpra[i].buf.len); } } } size = sizeof(*rpra) * REMOTE_SCALARS_OUTHANDLES(sc); if (size) { outh = inbufs + outbufs + REMOTE_SCALARS_INHANDLES(sc); if (!kernel) { VERIFY(err, 0 == copy_to_user(&upra[outh], &rpra[outh], size)); if (err) goto bail; } else { memmove(&upra[outh], &rpra[outh], size); } } bail: return err; } static void inv_args_pre(uint32_t sc, remote_arg_t *rpra) { int i, inbufs, outbufs; uintptr_t end; inbufs = REMOTE_SCALARS_INBUFS(sc); outbufs = REMOTE_SCALARS_OUTBUFS(sc); for (i = inbufs; i < inbufs + outbufs; ++i) { if (!rpra[i].buf.len) continue; if (buf_page_start(rpra) == buf_page_start(rpra[i].buf.pv)) continue; if (!IS_CACHE_ALIGNED((uintptr_t)rpra[i].buf.pv)) dmac_flush_range(rpra[i].buf.pv, (char *)rpra[i].buf.pv + 1); end = (uintptr_t)rpra[i].buf.pv + rpra[i].buf.len; if (!IS_CACHE_ALIGNED(end)) dmac_flush_range((char *)end, (char *)end + 1); } } static void inv_args(struct smq_invoke_ctx *ctx) { uint32_t sc = ctx->sc; remote_arg_t *rpra = ctx->rpra; int used = ctx->obuf.used; int i, inbufs, outbufs; int inv = 0; inbufs = REMOTE_SCALARS_INBUFS(sc); outbufs = REMOTE_SCALARS_OUTBUFS(sc); for (i = inbufs; i < inbufs + outbufs; ++i) { if (buf_page_start(rpra) == buf_page_start(rpra[i].buf.pv)) inv = 1; else if (rpra[i].buf.len) dmac_inv_range(rpra[i].buf.pv, (char *)rpra[i].buf.pv + rpra[i].buf.len); } if (inv || REMOTE_SCALARS_OUTHANDLES(sc)) dmac_inv_range(rpra, (char *)rpra + used); } static int fastrpc_invoke_send(struct fastrpc_apps *me, uint32_t kernel, uint32_t handle, uint32_t sc, struct smq_invoke_ctx *ctx, struct fastrpc_buf *buf) { struct smq_msg msg; int err = 0, len; msg.pid = current->tgid; msg.tid = current->pid; if (kernel) msg.pid = 0; msg.invoke.header.ctx = ctx; msg.invoke.header.handle = handle; msg.invoke.header.sc = sc; msg.invoke.page.addr = buf->phys; msg.invoke.page.size = buf_page_size(buf->used); spin_lock(&me->wrlock); len = smd_write(me->channel[ctx->fdata->cid].chan, &msg, sizeof(msg)); spin_unlock(&me->wrlock); VERIFY(err, len == sizeof(msg)); return err; } static void fastrpc_deinit(void) { struct fastrpc_apps *me = &gfa; int i; for (i = 0; i < NUM_CHANNELS; i++) { if (me->channel[i].chan) { (void)smd_close(me->channel[i].chan); me->channel[i].chan = 0; } } ion_client_destroy(me->iclient); me->iclient = 0; } static void fastrpc_read_handler(int cid) { struct fastrpc_apps *me = &gfa; struct smq_invoke_rsp rsp; int ret = 0; do { ret = smd_read_from_cb(me->channel[cid].chan, &rsp, sizeof(rsp)); if (ret != sizeof(rsp)) break; context_notify_user(rsp.ctx, rsp.retval); } while (ret == sizeof(rsp)); } static void smd_event_handler(void *priv, unsigned event) { struct fastrpc_apps *me = &gfa; int cid = (int)(uintptr_t)priv; switch (event) { case SMD_EVENT_OPEN: complete(&me->channel[cid].work); break; case SMD_EVENT_CLOSE: context_notify_all_users(&me->clst, cid); break; case SMD_EVENT_DATA: fastrpc_read_handler(cid); break; } } static int fastrpc_init(void) { int i, err = 0; struct fastrpc_apps *me = &gfa; struct device_node *node; struct fastrpc_smmu *smmu; bool enabled = 0; spin_lock_init(&me->hlock); spin_lock_init(&me->wrlock); mutex_init(&me->smd_mutex); context_list_ctor(&me->clst); for (i = 0; i < RPC_HASH_SZ; ++i) INIT_HLIST_HEAD(&me->htbl[i]); me->iclient = msm_ion_client_create(DEVICE_NAME); VERIFY(err, 0 == IS_ERR_OR_NULL(me->iclient)); if (err) goto bail; for (i = 0; i < NUM_CHANNELS; i++) { init_completion(&me->channel[i].work); if (!gcinfo[i].node) continue; smmu = &me->channel[i].smmu; node = of_find_compatible_node(NULL, NULL, gcinfo[i].node); if (node) enabled = of_property_read_bool(node, "qcom,smmu-enabled"); if (enabled) smmu->group = iommu_group_find(gcinfo[i].group); if (smmu->group) smmu->domain = iommu_group_get_iommudata(smmu->group); if (!IS_ERR_OR_NULL(smmu->domain)) { smmu->domain_id = msm_find_domain_no(smmu->domain); if (smmu->domain_id >= 0) smmu->enabled = enabled; } } return 0; bail: return err; } static void free_dev(struct fastrpc_device *dev, struct file_data *fdata) { if (dev) { free_mem(&dev->buf, fdata); kfree(dev); module_put(THIS_MODULE); } } static int alloc_dev(struct fastrpc_device **dev, struct file_data *fdata) { int err = 0; struct fastrpc_device *fd = 0; VERIFY(err, 0 != try_module_get(THIS_MODULE)); if (err) goto bail; VERIFY(err, 0 != (fd = kzalloc(sizeof(*fd), GFP_KERNEL))); if (err) goto bail; INIT_HLIST_NODE(&fd->hn); fd->buf.size = PAGE_SIZE; VERIFY(err, 0 == alloc_mem(&fd->buf, fdata)); if (err) goto bail; fd->tgid = current->tgid; *dev = fd; bail: if (err) free_dev(fd, fdata); return err; } static int get_dev(struct fastrpc_apps *me, struct file_data *fdata, struct fastrpc_device **rdev) { struct hlist_head *head; struct fastrpc_device *dev = 0, *devfree = 0; struct hlist_node *n; uint32_t h = hash_32(current->tgid, RPC_HASH_BITS); int err = 0; spin_lock(&me->hlock); head = &me->htbl[h]; hlist_for_each_entry_safe(dev, n, head, hn) { if (dev->tgid == current->tgid) { hlist_del(&dev->hn); devfree = dev; break; } } spin_unlock(&me->hlock); VERIFY(err, devfree != 0); if (err) goto bail; *rdev = devfree; bail: if (err) { free_dev(devfree, fdata); err = alloc_dev(rdev, fdata); } return err; } static void add_dev(struct fastrpc_apps *me, struct fastrpc_device *dev) { struct hlist_head *head; uint32_t h = hash_32(current->tgid, RPC_HASH_BITS); spin_lock(&me->hlock); head = &me->htbl[h]; hlist_add_head(&dev->hn, head); spin_unlock(&me->hlock); return; } static int fastrpc_release_current_dsp_process(struct file_data *fdata); static int fastrpc_internal_invoke(struct fastrpc_apps *me, uint32_t mode, uint32_t kernel, struct fastrpc_ioctl_invoke_fd *invokefd, struct file_data *fdata) { struct smq_invoke_ctx *ctx = 0; struct fastrpc_ioctl_invoke *invoke = &invokefd->inv; int cid = fdata->cid; int interrupted = 0; int err = 0; if (!kernel) { VERIFY(err, 0 == context_restore_interrupted(me, invokefd, fdata, &ctx)); if (err) goto bail; if (ctx) goto wait; } VERIFY(err, 0 == context_alloc(me, kernel, invokefd, fdata, &ctx)); if (err) goto bail; if (me->channel[cid].smmu.enabled) { mutex_lock(&me->smd_mutex); VERIFY(err, fdata->ssrcount == me->channel[cid].ssrcount); if (!err) VERIFY(err, 0 == iommu_attach_group( me->channel[cid].smmu.domain, me->channel[cid].smmu.group)); mutex_unlock(&me->smd_mutex); if (err) goto bail; ctx->smmu = 1; } if (REMOTE_SCALARS_LENGTH(ctx->sc)) { VERIFY(err, 0 == get_dev(me, fdata, &ctx->dev)); if (err) goto bail; VERIFY(err, 0 == get_page_list(kernel, ctx)); if (err) goto bail; ctx->rpra = (remote_arg_t *)ctx->obuf.virt; VERIFY(err, 0 == get_args(kernel, ctx, invoke->pra)); if (err) goto bail; } inv_args_pre(ctx->sc, ctx->rpra); if (FASTRPC_MODE_SERIAL == mode) inv_args(ctx); VERIFY(err, 0 == fastrpc_invoke_send(me, kernel, invoke->handle, ctx->sc, ctx, &ctx->obuf)); if (err) goto bail; if (FASTRPC_MODE_PARALLEL == mode) inv_args(ctx); wait: if (kernel) wait_for_completion(&ctx->work); else { interrupted = wait_for_completion_interruptible(&ctx->work); VERIFY(err, 0 == (err = interrupted)); if (err) goto bail; } VERIFY(err, 0 == (err = ctx->retval)); if (err) goto bail; VERIFY(err, 0 == put_args(kernel, ctx->sc, ctx->pra, ctx->rpra, invoke->pra)); if (err) goto bail; bail: if (ctx && interrupted == -ERESTARTSYS) context_save_interrupted(ctx); else if (ctx) context_free(ctx, 1); if (fdata->ssrcount != me->channel[cid].ssrcount) err = ECONNRESET; return err; } static int map_buffer(struct fastrpc_apps *me, struct file_data *fdata, int fd, char *buf, unsigned long len, struct fastrpc_mmap **ppmap, struct smq_phy_page **ppages, int *pnpages); static int fastrpc_init_process(struct file_data *fdata, struct fastrpc_ioctl_init *init) { int err = 0; struct fastrpc_ioctl_invoke_fd ioctl; struct smq_phy_page *pages = 0; struct fastrpc_mmap *map = 0; int npages = 0; struct fastrpc_apps *me = &gfa; if (init->flags == FASTRPC_INIT_ATTACH) { remote_arg_t ra[1]; int tgid = current->tgid; ra[0].buf.pv = &tgid; ra[0].buf.len = sizeof(tgid); ioctl.inv.handle = 1; ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0); ioctl.inv.pra = ra; ioctl.fds = 0; VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, FASTRPC_MODE_PARALLEL, 1, &ioctl, fdata))); if (err) goto bail; } else if (init->flags == FASTRPC_INIT_CREATE) { remote_arg_t ra[4]; int fds[4]; struct { int pgid; int namelen; int filelen; int pageslen; } inbuf; inbuf.pgid = current->tgid; inbuf.namelen = strlen(current->comm); inbuf.filelen = init->filelen; VERIFY(err, 0 == map_buffer(me, fdata, init->memfd, (char *)init->mem, init->memlen, &map, &pages, &npages)); if (err) goto bail; inbuf.pageslen = npages; ra[0].buf.pv = &inbuf; ra[0].buf.len = sizeof(inbuf); fds[0] = 0; ra[1].buf.pv = current->comm; ra[1].buf.len = inbuf.namelen; fds[1] = 0; ra[2].buf.pv = (void *)init->file; ra[2].buf.len = inbuf.filelen; fds[2] = init->filefd; ra[3].buf.pv = pages; ra[3].buf.len = npages * sizeof(*pages); fds[3] = 0; ioctl.inv.handle = 1; ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0); ioctl.inv.pra = ra; ioctl.fds = fds; VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, FASTRPC_MODE_PARALLEL, 1, &ioctl, fdata))); if (err) goto bail; spin_lock(&fdata->hlock); map->vaddrout = 0; hlist_add_head(&map->hn, &fdata->hlst); spin_unlock(&fdata->hlock); } else { err = -ENOTTY; } bail: kfree(pages); if (err && map) free_map(map, fdata); return err; } static int fastrpc_release_current_dsp_process(struct file_data *fdata) { int err = 0; struct fastrpc_apps *me = &gfa; struct fastrpc_ioctl_invoke_fd ioctl; remote_arg_t ra[1]; int tgid = 0; tgid = fdata->tgid; ra[0].buf.pv = &tgid; ra[0].buf.len = sizeof(tgid); ioctl.inv.handle = 1; ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0); ioctl.inv.pra = ra; ioctl.fds = 0; VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, FASTRPC_MODE_PARALLEL, 1, &ioctl, fdata))); return err; } static int fastrpc_mmap_on_dsp(struct fastrpc_apps *me, struct fastrpc_ioctl_mmap *mmap, struct smq_phy_page *pages, struct file_data *fdata, int num) { struct fastrpc_ioctl_invoke_fd ioctl; remote_arg_t ra[3]; int err = 0; struct { int pid; uint32_t flags; uintptr_t vaddrin; int num; } inargs; struct { uintptr_t vaddrout; } routargs; inargs.pid = current->tgid; inargs.vaddrin = (uintptr_t)mmap->vaddrin; inargs.flags = mmap->flags; inargs.num = me->compat ? num * sizeof(*pages) : num; ra[0].buf.pv = &inargs; ra[0].buf.len = sizeof(inargs); ra[1].buf.pv = pages; ra[1].buf.len = num * sizeof(*pages); ra[2].buf.pv = &routargs; ra[2].buf.len = sizeof(routargs); ioctl.inv.handle = 1; if (me->compat) ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1); else ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1); ioctl.inv.pra = ra; ioctl.fds = 0; VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, FASTRPC_MODE_PARALLEL, 1, &ioctl, fdata))); mmap->vaddrout = (uintptr_t)routargs.vaddrout; if (err) goto bail; bail: return err; } static int fastrpc_munmap_on_dsp(struct fastrpc_apps *me, struct fastrpc_ioctl_munmap *munmap, struct file_data *fdata) { struct fastrpc_ioctl_invoke_fd ioctl; remote_arg_t ra[1]; int err = 0; struct { int pid; uintptr_t vaddrout; ssize_t size; } inargs; inargs.pid = current->tgid; inargs.size = munmap->size; inargs.vaddrout = munmap->vaddrout; ra[0].buf.pv = &inargs; ra[0].buf.len = sizeof(inargs); ioctl.inv.handle = 1; if (me->compat) ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0); else ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0); ioctl.inv.pra = ra; ioctl.fds = 0; VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, FASTRPC_MODE_PARALLEL, 1, &ioctl, fdata))); return err; } static int fastrpc_internal_munmap(struct fastrpc_apps *me, struct file_data *fdata, struct fastrpc_ioctl_munmap *munmap) { int err = 0; struct fastrpc_mmap *map = 0, *mapfree = 0; struct hlist_node *n; spin_lock(&fdata->hlock); hlist_for_each_entry_safe(map, n, &fdata->hlst, hn) { if (map->vaddrout == munmap->vaddrout && map->size == munmap->size && --map->refs == 0) { hlist_del(&map->hn); mapfree = map; break; } } spin_unlock(&fdata->hlock); if (mapfree) { VERIFY(err, 0 == (err = fastrpc_munmap_on_dsp(me, munmap, fdata))); free_map(mapfree, fdata); kfree(mapfree); } return err; } static int map_buffer(struct fastrpc_apps *me, struct file_data *fdata, int fd, char *buf, unsigned long len, struct fastrpc_mmap **ppmap, struct smq_phy_page **ppages, int *pnpages) { struct ion_client *clnt = gfa.iclient; struct ion_handle *handle = 0; struct fastrpc_mmap *map = 0, *mapmatch = 0; struct smq_phy_page *pages = 0; struct hlist_node *n; uintptr_t vaddrout = 0; int num; int err = 0; handle = ion_import_dma_buf(clnt, fd); VERIFY(err, 0 == IS_ERR_OR_NULL(handle)); if (err) goto bail; spin_lock(&fdata->hlock); hlist_for_each_entry_safe(map, n, &fdata->hlst, hn) { if (map->handle == handle) { map->refs++; mapmatch = map; break; } } spin_unlock(&fdata->hlock); if (mapmatch) { vaddrout = mapmatch->vaddrout; return 0; } VERIFY(err, 0 != (map = kzalloc(sizeof(*map), GFP_KERNEL))); if (err) goto bail; map->handle = handle; handle = 0; map->virt = ion_map_kernel(clnt, map->handle); VERIFY(err, 0 == IS_ERR_OR_NULL(map->virt)); if (err) goto bail; num = buf_num_pages(buf, len); VERIFY(err, 0 != (pages = kzalloc(num * sizeof(*pages), GFP_KERNEL))); if (err) goto bail; if (me->channel[fdata->cid].smmu.enabled) { VERIFY(err, 0 == map_iommu_mem(map->handle, fdata, &map->phys, len)); if (err) goto bail; pages->addr = map->phys; pages->size = len; num = 1; } else { VERIFY(err, 0 < (num = buf_get_pages(buf, len, num, 1, pages, num, &me->range))); if (err) goto bail; } map->refs = 1; INIT_HLIST_NODE(&map->hn); map->vaddrin = (uintptr_t *)buf; map->vaddrout = vaddrout; map->size = len; if (ppages) *ppages = pages; pages = 0; if (pnpages) *pnpages = num; if (ppmap) *ppmap = map; map = 0; bail: if (map) free_map(map, fdata); kfree(pages); return err; } static int fastrpc_internal_mmap(struct fastrpc_apps *me, struct file_data *fdata, struct fastrpc_ioctl_mmap *mmap) { struct fastrpc_mmap *map = 0; struct smq_phy_page *pages = 0; int num = 0; int err = 0; VERIFY(err, 0 == map_buffer(me, fdata, mmap->fd, (char *)mmap->vaddrin, mmap->size, &map, &pages, &num)); VERIFY(err, 0 == fastrpc_mmap_on_dsp(me, mmap, pages, fdata, num)); if (err) goto bail; map->vaddrout = mmap->vaddrout; spin_lock(&fdata->hlock); hlist_add_head(&map->hn, &fdata->hlst); spin_unlock(&fdata->hlock); bail: if (err && map) { free_map(map, fdata); kfree(map); } kfree(pages); return err; } static void cleanup_current_dev(struct file_data *fdata) { struct fastrpc_apps *me = &gfa; uint32_t h = hash_32(current->tgid, RPC_HASH_BITS); struct hlist_head *head; struct hlist_node *n; struct fastrpc_device *dev, *devfree; rnext: devfree = dev = 0; spin_lock(&me->hlock); head = &me->htbl[h]; hlist_for_each_entry_safe(dev, n, head, hn) { if (dev->tgid == current->tgid) { hlist_del(&dev->hn); devfree = dev; break; } } spin_unlock(&me->hlock); if (devfree) { free_dev(devfree, fdata); goto rnext; } return; } static void fastrpc_channel_close(struct kref *kref) { struct fastrpc_apps *me = &gfa; struct fastrpc_channel_context *ctx; int cid; ctx = container_of(kref, struct fastrpc_channel_context, kref); smd_close(ctx->chan); ctx->chan = 0; mutex_unlock(&me->smd_mutex); cid = ctx - &me->channel[0]; pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name, MAJOR(me->dev_no), cid); } static int fastrpc_device_release(struct inode *inode, struct file *file) { struct file_data *fdata = (struct file_data *)file->private_data; struct fastrpc_apps *me = &gfa; struct smq_context_list *clst = &me->clst; struct smq_invoke_ctx *ictx = 0, *ctxfree; struct hlist_node *n; struct fastrpc_mmap *map = 0; int cid = MINOR(inode->i_rdev); if (!fdata) return 0; (void)fastrpc_release_current_dsp_process(fdata); do { ctxfree = 0; spin_lock(&clst->hlock); hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) { if ((ictx->tgid == current->tgid) && (ictx->fdata->cid == cid)) { hlist_del(&ictx->hn); ctxfree = ictx; break; } } spin_unlock(&clst->hlock); if (ctxfree) context_free(ctxfree, 0); } while (ctxfree); cleanup_current_dev(fdata); file->private_data = 0; hlist_for_each_entry_safe(map, n, &fdata->hlst, hn) { hlist_del(&map->hn); free_map(map, fdata); kfree(map); } if (fdata->ssrcount == me->channel[cid].ssrcount) kref_put_mutex(&me->channel[cid].kref, fastrpc_channel_close, &me->smd_mutex); kfree(fdata); return 0; } static int fastrpc_device_open(struct inode *inode, struct file *filp) { int cid = MINOR(inode->i_rdev); int err = 0, ssrcount; struct fastrpc_apps *me = &gfa; mutex_lock(&me->smd_mutex); ssrcount = me->channel[cid].ssrcount; if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) || (me->channel[cid].chan == 0)) { VERIFY(err, 0 == smd_named_open_on_edge( FASTRPC_SMD_GUID, gcinfo[cid].channel, &me->channel[cid].chan, (void *)(uintptr_t)cid, smd_event_handler)); if (err) goto smd_bail; VERIFY(err, 0 != wait_for_completion_timeout( &me->channel[cid].work, RPC_TIMEOUT)); if (err) goto completion_bail; kref_init(&me->channel[cid].kref); pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name, MAJOR(me->dev_no), cid); } mutex_unlock(&me->smd_mutex); filp->private_data = 0; if (0 != try_module_get(THIS_MODULE)) { struct file_data *fdata = 0; /* This call will cause a dev to be created * which will addref this module */ VERIFY(err, 0 != (fdata = kzalloc(sizeof(*fdata), GFP_KERNEL))); if (err) goto bail; spin_lock_init(&fdata->hlock); INIT_HLIST_HEAD(&fdata->hlst); fdata->cid = cid; fdata->tgid = current->tgid; fdata->ssrcount = ssrcount; filp->private_data = fdata; bail: if (err) { if (fdata) { cleanup_current_dev(fdata); kfree(fdata); } kref_put_mutex(&me->channel[cid].kref, fastrpc_channel_close, &me->smd_mutex); } module_put(THIS_MODULE); } return err; completion_bail: smd_close(me->channel[cid].chan); me->channel[cid].chan = 0; smd_bail: mutex_unlock(&me->smd_mutex); return err; } static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num, unsigned long ioctl_param) { struct fastrpc_apps *me = &gfa; struct fastrpc_ioctl_invoke_fd invokefd; struct fastrpc_ioctl_mmap mmap; struct fastrpc_ioctl_munmap munmap; struct fastrpc_ioctl_init init; void *param = (char *)ioctl_param; struct file_data *fdata = (struct file_data *)file->private_data; int size = 0, err = 0; switch (ioctl_num) { case FASTRPC_IOCTL_INVOKE_FD: case FASTRPC_IOCTL_INVOKE: invokefd.fds = 0; size = (ioctl_num == FASTRPC_IOCTL_INVOKE) ? sizeof(invokefd.inv) : sizeof(invokefd); VERIFY(err, 0 == copy_from_user(&invokefd, param, size)); if (err) goto bail; VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, fdata->mode, 0, &invokefd, fdata))); if (err) goto bail; break; case FASTRPC_IOCTL_MMAP: VERIFY(err, 0 == copy_from_user(&mmap, param, sizeof(mmap))); if (err) goto bail; VERIFY(err, 0 == (err = fastrpc_internal_mmap(me, fdata, &mmap))); if (err) goto bail; VERIFY(err, 0 == copy_to_user(param, &mmap, sizeof(mmap))); if (err) goto bail; break; case FASTRPC_IOCTL_MUNMAP: VERIFY(err, 0 == copy_from_user(&munmap, param, sizeof(munmap))); if (err) goto bail; VERIFY(err, 0 == (err = fastrpc_internal_munmap(me, fdata, &munmap))); if (err) goto bail; break; case FASTRPC_IOCTL_SETMODE: switch ((uint32_t)ioctl_param) { case FASTRPC_MODE_PARALLEL: case FASTRPC_MODE_SERIAL: fdata->mode = (uint32_t)ioctl_param; break; default: err = -ENOTTY; break; } break; case FASTRPC_IOCTL_INIT: VERIFY(err, 0 == copy_from_user(&init, param, sizeof(init))); if (err) goto bail; VERIFY(err, 0 == fastrpc_init_process(fdata, &init)); if (err) goto bail; break; default: err = -ENOTTY; break; } bail: return err; } static int fastrpc_restart_notifier_cb(struct notifier_block *nb, unsigned long code, void *data) { struct fastrpc_apps *me = &gfa; struct fastrpc_channel_context *ctx; int cid; ctx = container_of(nb, struct fastrpc_channel_context, nb); cid = ctx - &me->channel[0]; if (code == SUBSYS_BEFORE_SHUTDOWN) { mutex_lock(&me->smd_mutex); ctx->ssrcount++; if (ctx->chan) { smd_close(ctx->chan); ctx->chan = 0; pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name, MAJOR(me->dev_no), cid); } mutex_unlock(&me->smd_mutex); context_notify_all_users(&me->clst, cid); } return NOTIFY_DONE; } static const struct file_operations fops = { .open = fastrpc_device_open, .release = fastrpc_device_release, .unlocked_ioctl = fastrpc_device_ioctl, .compat_ioctl = compat_fastrpc_device_ioctl, }; static int __init fastrpc_device_init(void) { struct fastrpc_apps *me = &gfa; struct device_node *ion_node, *node, *pnode; struct platform_device *pdev; const u32 *addr; uint64_t size; uint32_t val; int i, err = 0; memset(me, 0, sizeof(*me)); VERIFY(err, 0 == fastrpc_init()); if (err) goto fastrpc_bail; VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS, DEVICE_NAME)); if (err) goto alloc_chrdev_bail; cdev_init(&me->cdev, &fops); me->cdev.owner = THIS_MODULE; VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0), NUM_CHANNELS)); if (err) goto cdev_init_bail; me->class = class_create(THIS_MODULE, "fastrpc"); VERIFY(err, !IS_ERR(me->class)); if (err) goto class_create_bail; me->compat = (NULL == fops.compat_ioctl) ? 0 : 1; ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion"); if (ion_node) { for_each_available_child_of_node(ion_node, node) { if (of_property_read_u32(node, "reg", &val)) continue; if (val != ION_ADSP_HEAP_ID) continue; pdev = of_find_device_by_node(node); if (!pdev) break; pnode = of_parse_phandle(node, "linux,contiguous-region", 0); if (!pnode) break; addr = of_get_address(pnode, 0, &size, NULL); of_node_put(pnode); if (!addr) break; me->range.addr = cma_get_base(&pdev->dev); me->range.size = (size_t)size; break; } } for (i = 0; i < NUM_CHANNELS; i++) { me->channel[i].dev = device_create(me->class, NULL, MKDEV(MAJOR(me->dev_no), i), NULL, gcinfo[i].name); VERIFY(err, !IS_ERR(me->channel[i].dev)); if (err) goto device_create_bail; me->channel[i].ssrcount = 0; me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb, (void)subsys_notif_register_notifier(gcinfo[i].subsys, &me->channel[i].nb); } return 0; device_create_bail: class_destroy(me->class); class_create_bail: cdev_del(&me->cdev); cdev_init_bail: unregister_chrdev_region(me->dev_no, NUM_CHANNELS); alloc_chrdev_bail: fastrpc_deinit(); fastrpc_bail: return err; } static void __exit fastrpc_device_exit(void) { struct fastrpc_apps *me = &gfa; int i; context_list_dtor(me, &me->clst); fastrpc_deinit(); for (i = 0; i < NUM_CHANNELS; i++) { device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i)); subsys_notif_unregister_notifier(gcinfo[i].subsys, &me->channel[i].nb); } class_destroy(me->class); cdev_del(&me->cdev); unregister_chrdev_region(me->dev_no, NUM_CHANNELS); } late_initcall(fastrpc_device_init); module_exit(fastrpc_device_exit); MODULE_LICENSE("GPL v2");
gpl-2.0
sac23/Sacs_Stock_Kernel
arch/arm/mach-msm/qdsp5v2/audio_amrnb_in.c
619
23940
/* * amrnb audio input device * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <asm/atomic.h> #include <asm/ioctls.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/dma-mapping.h> #include <linux/msm_audio_amrnb.h> #include <linux/android_pmem.h> #include <linux/memory_alloc.h> #include <mach/iommu.h> #include <mach/iommu_domains.h> #include <mach/msm_adsp.h> #include <mach/socinfo.h> #include <mach/qdsp5v2/qdsp5audreccmdi.h> #include <mach/qdsp5v2/qdsp5audrecmsg.h> #include <mach/qdsp5v2/audpreproc.h> #include <mach/qdsp5v2/audio_dev_ctl.h> #include <mach/debug_mm.h> #include <mach/msm_memtypes.h> /* FRAME_NUM must be a power of two */ #define FRAME_NUM (8) #define FRAME_SIZE (22 * 2) /* 36 bytes data */ #define DMASZ (FRAME_SIZE * FRAME_NUM) struct buffer { void *data; uint32_t size; uint32_t read; uint32_t addr; }; struct audio_in { struct buffer in[FRAME_NUM]; spinlock_t dsp_lock; atomic_t in_bytes; atomic_t in_samples; struct mutex lock; struct mutex read_lock; wait_queue_head_t wait; wait_queue_head_t wait_enable; struct msm_adsp_module *audrec; struct audrec_session_info session_info; /*audrec session info*/ /* configuration to use on next enable */ uint32_t buffer_size; /* Frame size (36 bytes) */ uint32_t enc_type; int dtx_mode; uint32_t frame_format; uint32_t used_mode; uint32_t rec_mode; uint32_t dsp_cnt; uint32_t in_head; /* next buffer dsp will write */ uint32_t in_tail; /* next buffer read() will read */ uint32_t in_count; /* number of buffers available to read() */ uint32_t mode; const char *module_name; unsigned queue_ids; uint16_t enc_id; uint16_t source; /* Encoding source bit mask */ uint32_t device_events; uint32_t in_call; uint32_t dev_cnt; int voice_state; spinlock_t dev_lock; /* data allocated for various buffers */ char *data; dma_addr_t phys; void *map_v_read; int opened; int enabled; int running; int stopped; /* set when stopped, cleared on flush */ char *build_id; }; struct audio_frame { uint16_t frame_count_lsw; uint16_t frame_count_msw; uint16_t frame_length; uint16_t erased_pcm; unsigned char raw_bitstream[]; /* samples */ } __attribute__((packed)); /* Audrec Queue command sent macro's */ #define audrec_send_bitstreamqueue(audio, cmd, len) \ msm_adsp_write(audio->audrec, ((audio->queue_ids & 0xFFFF0000) >> 16),\ cmd, len) #define audrec_send_audrecqueue(audio, cmd, len) \ msm_adsp_write(audio->audrec, (audio->queue_ids & 0x0000FFFF),\ cmd, len) struct audio_in the_audio_amrnb_in; /* DSP command send functions */ static int audamrnb_in_enc_config(struct audio_in *audio, int enable); static int audamrnb_in_param_config(struct audio_in *audio); static int audamrnb_in_mem_config(struct audio_in *audio); static int audamrnb_in_record_config(struct audio_in *audio, int enable); static int audamrnb_dsp_read_buffer(struct audio_in *audio, uint32_t read_cnt); static void audamrnb_in_get_dsp_frames(struct audio_in *audio); static void audamrnb_in_flush(struct audio_in *audio); static void amrnb_in_listener(u32 evt_id, union auddev_evt_data *evt_payload, void *private_data) { struct audio_in *audio = (struct audio_in *) private_data; unsigned long flags; MM_DBG("evt_id = 0x%8x\n", evt_id); switch (evt_id) { case AUDDEV_EVT_DEV_RDY: { MM_DBG("AUDDEV_EVT_DEV_RDY\n"); spin_lock_irqsave(&audio->dev_lock, flags); audio->dev_cnt++; if (!audio->in_call) audio->source |= (0x1 << evt_payload->routing_id); spin_unlock_irqrestore(&audio->dev_lock, flags); if ((audio->running == 1) && (audio->enabled == 1)) audamrnb_in_record_config(audio, 1); break; } case AUDDEV_EVT_DEV_RLS: { MM_DBG("AUDDEV_EVT_DEV_RLS\n"); spin_lock_irqsave(&audio->dev_lock, flags); audio->dev_cnt--; if (!audio->in_call) audio->source &= ~(0x1 << evt_payload->routing_id); spin_unlock_irqrestore(&audio->dev_lock, flags); if ((!audio->running) || (!audio->enabled)) break; /* Turn of as per source */ if (audio->source) audamrnb_in_record_config(audio, 1); else /* Turn off all */ audamrnb_in_record_config(audio, 0); break; } case AUDDEV_EVT_VOICE_STATE_CHG: { MM_DBG("AUDDEV_EVT_VOICE_STATE_CHG, state = %d\n", evt_payload->voice_state); audio->voice_state = evt_payload->voice_state; if (audio->in_call && audio->running) { if (audio->voice_state == VOICE_STATE_INCALL) audamrnb_in_record_config(audio, 1); else if (audio->voice_state == VOICE_STATE_OFFCALL) { audamrnb_in_record_config(audio, 0); wake_up(&audio->wait); } } break; } default: MM_ERR("wrong event %d\n", evt_id); break; } } /* ------------------- dsp preproc event handler--------------------- */ static void audpreproc_dsp_event(void *data, unsigned id, void *msg) { struct audio_in *audio = data; switch (id) { case AUDPREPROC_ERROR_MSG: { struct audpreproc_err_msg *err_msg = msg; MM_ERR("ERROR_MSG: stream id %d err idx %d\n", err_msg->stream_id, err_msg->aud_preproc_err_idx); /* Error case */ wake_up(&audio->wait_enable); break; } case AUDPREPROC_CMD_CFG_DONE_MSG: { MM_DBG("CMD_CFG_DONE_MSG \n"); break; } case AUDPREPROC_CMD_ENC_CFG_DONE_MSG: { struct audpreproc_cmd_enc_cfg_done_msg *enc_cfg_msg = msg; MM_DBG("CMD_ENC_CFG_DONE_MSG: stream id %d enc type \ 0x%8x\n", enc_cfg_msg->stream_id, enc_cfg_msg->rec_enc_type); /* Encoder enable success */ if (enc_cfg_msg->rec_enc_type & ENCODE_ENABLE) audamrnb_in_param_config(audio); else { /* Encoder disable success */ audio->running = 0; audamrnb_in_record_config(audio, 0); } break; } case AUDPREPROC_CMD_ENC_PARAM_CFG_DONE_MSG: { MM_DBG("CMD_ENC_PARAM_CFG_DONE_MSG \n"); audamrnb_in_mem_config(audio); break; } case AUDPREPROC_AFE_CMD_AUDIO_RECORD_CFG_DONE_MSG: { MM_DBG("AFE_CMD_AUDIO_RECORD_CFG_DONE_MSG \n"); wake_up(&audio->wait_enable); break; } default: MM_ERR("Unknown Event id %d\n", id); } } /* ------------------- dsp audrec event handler--------------------- */ static void audrec_dsp_event(void *data, unsigned id, size_t len, void (*getevent)(void *ptr, size_t len)) { struct audio_in *audio = data; switch (id) { case AUDREC_CMD_MEM_CFG_DONE_MSG: { MM_DBG("CMD_MEM_CFG_DONE MSG DONE\n"); audio->running = 1; if ((!audio->in_call && (audio->dev_cnt > 0)) || (audio->in_call && (audio->voice_state == VOICE_STATE_INCALL))) audamrnb_in_record_config(audio, 1); break; } case AUDREC_FATAL_ERR_MSG: { struct audrec_fatal_err_msg fatal_err_msg; getevent(&fatal_err_msg, AUDREC_FATAL_ERR_MSG_LEN); MM_ERR("FATAL_ERR_MSG: err id %d\n", fatal_err_msg.audrec_err_id); /* Error stop the encoder */ audio->stopped = 1; wake_up(&audio->wait); break; } case AUDREC_UP_PACKET_READY_MSG: { struct audrec_up_pkt_ready_msg pkt_ready_msg; getevent(&pkt_ready_msg, AUDREC_UP_PACKET_READY_MSG_LEN); MM_DBG("UP_PACKET_READY_MSG: write cnt lsw %d \ write cnt msw %d read cnt lsw %d read cnt msw %d \n",\ pkt_ready_msg.audrec_packet_write_cnt_lsw, \ pkt_ready_msg.audrec_packet_write_cnt_msw, \ pkt_ready_msg.audrec_up_prev_read_cnt_lsw, \ pkt_ready_msg.audrec_up_prev_read_cnt_msw); audamrnb_in_get_dsp_frames(audio); break; } case ADSP_MESSAGE_ID: { MM_DBG("Received ADSP event:module audrectask\n"); break; } default: MM_ERR("Unknown Event id %d\n", id); } } static void audamrnb_in_get_dsp_frames(struct audio_in *audio) { struct audio_frame *frame; uint32_t index; unsigned long flags; index = audio->in_head; frame = (void *) (((char *)audio->in[index].data) - \ sizeof(*frame)); spin_lock_irqsave(&audio->dsp_lock, flags); audio->in[index].size = frame->frame_length; /* statistics of read */ atomic_add(audio->in[index].size, &audio->in_bytes); atomic_add(1, &audio->in_samples); audio->in_head = (audio->in_head + 1) & (FRAME_NUM - 1); /* If overflow, move the tail index foward. */ if (audio->in_head == audio->in_tail) audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1); else audio->in_count++; audamrnb_dsp_read_buffer(audio, audio->dsp_cnt++); spin_unlock_irqrestore(&audio->dsp_lock, flags); wake_up(&audio->wait); } struct msm_adsp_ops audrec_amrnb_adsp_ops = { .event = audrec_dsp_event, }; static int audamrnb_in_enc_config(struct audio_in *audio, int enable) { struct audpreproc_audrec_cmd_enc_cfg cmd; memset(&cmd, 0, sizeof(cmd)); if (audio->build_id[17] == '1') { cmd.cmd_id = AUDPREPROC_AUDREC_CMD_ENC_CFG_2; MM_ERR("sending AUDPREPROC_AUDREC_CMD_ENC_CFG_2 command"); } else { cmd.cmd_id = AUDPREPROC_AUDREC_CMD_ENC_CFG; MM_ERR("sending AUDPREPROC_AUDREC_CMD_ENC_CFG command"); } cmd.stream_id = audio->enc_id; if (enable) cmd.audrec_enc_type = audio->enc_type | ENCODE_ENABLE; else cmd.audrec_enc_type &= ~(ENCODE_ENABLE); return audpreproc_send_audreccmdqueue(&cmd, sizeof(cmd)); } static int audamrnb_in_param_config(struct audio_in *audio) { struct audpreproc_audrec_cmd_parm_cfg_amrnb cmd; memset(&cmd, 0, sizeof(cmd)); cmd.common.cmd_id = AUDPREPROC_AUDREC_CMD_PARAM_CFG; cmd.common.stream_id = audio->enc_id; cmd.dtx_mode = audio->dtx_mode; cmd.test_mode = -1; /* Default set to -1 */ cmd.used_mode = audio->used_mode; return audpreproc_send_audreccmdqueue(&cmd, sizeof(cmd)); } /* To Do: msm_snddev_route_enc(audio->enc_id); */ static int audamrnb_in_record_config(struct audio_in *audio, int enable) { struct audpreproc_afe_cmd_audio_record_cfg cmd; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDPREPROC_AFE_CMD_AUDIO_RECORD_CFG; cmd.stream_id = audio->enc_id; if (enable) cmd.destination_activity = AUDIO_RECORDING_TURN_ON; else cmd.destination_activity = AUDIO_RECORDING_TURN_OFF; cmd.source_mix_mask = audio->source; if (audio->enc_id == 2) { if ((cmd.source_mix_mask & INTERNAL_CODEC_TX_SOURCE_MIX_MASK) || (cmd.source_mix_mask & AUX_CODEC_TX_SOURCE_MIX_MASK) || (cmd.source_mix_mask & VOICE_UL_SOURCE_MIX_MASK) || (cmd.source_mix_mask & VOICE_DL_SOURCE_MIX_MASK)) { cmd.pipe_id = SOURCE_PIPE_1; } if (cmd.source_mix_mask & AUDPP_A2DP_PIPE_SOURCE_MIX_MASK) cmd.pipe_id |= SOURCE_PIPE_0; } return audpreproc_send_audreccmdqueue(&cmd, sizeof(cmd)); } static int audamrnb_in_mem_config(struct audio_in *audio) { struct audrec_cmd_arecmem_cfg cmd; uint16_t *data = (void *) audio->data; int n; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_MEM_CFG_CMD; cmd.audrec_up_pkt_intm_count = 1; cmd.audrec_ext_pkt_start_addr_msw = audio->phys >> 16; cmd.audrec_ext_pkt_start_addr_lsw = audio->phys; cmd.audrec_ext_pkt_buf_number = FRAME_NUM; /* prepare buffer pointers: * 36 bytes amrnb packet + 4 halfword header */ for (n = 0; n < FRAME_NUM; n++) { audio->in[n].data = data + 4; data += (FRAME_SIZE/2); /* word increment */ MM_DBG("0x%8x\n", (int)(audio->in[n].data - 8)); } return audrec_send_audrecqueue(audio, &cmd, sizeof(cmd)); } static int audamrnb_dsp_read_buffer(struct audio_in *audio, uint32_t read_cnt) { struct up_audrec_packet_ext_ptr cmd; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = UP_AUDREC_PACKET_EXT_PTR; cmd.audrec_up_curr_read_count_msw = read_cnt >> 16; cmd.audrec_up_curr_read_count_lsw = read_cnt; return audrec_send_bitstreamqueue(audio, &cmd, sizeof(cmd)); } /* must be called with audio->lock held */ static int audamrnb_in_enable(struct audio_in *audio) { if (audio->enabled) return 0; if (audpreproc_enable(audio->enc_id, &audpreproc_dsp_event, audio)) { MM_ERR("msm_adsp_enable(audpreproc) failed\n"); return -ENODEV; } if (msm_adsp_enable(audio->audrec)) { MM_ERR("msm_adsp_enable(audrec) failed\n"); audpreproc_disable(audio->enc_id, audio); return -ENODEV; } audio->enabled = 1; audamrnb_in_enc_config(audio, 1); return 0; } /* must be called with audio->lock held */ static int audamrnb_in_disable(struct audio_in *audio) { if (audio->enabled) { audio->enabled = 0; audamrnb_in_enc_config(audio, 0); wake_up(&audio->wait); wait_event_interruptible_timeout(audio->wait_enable, audio->running == 0, 1*HZ); msm_adsp_disable(audio->audrec); audpreproc_disable(audio->enc_id, audio); } return 0; } static void audamrnb_in_flush(struct audio_in *audio) { int i; audio->dsp_cnt = 0; audio->in_head = 0; audio->in_tail = 0; audio->in_count = 0; for (i = 0; i < FRAME_NUM; i++) { audio->in[i].size = 0; audio->in[i].read = 0; } MM_DBG("in_bytes %d\n", atomic_read(&audio->in_bytes)); MM_DBG("in_samples %d\n", atomic_read(&audio->in_samples)); atomic_set(&audio->in_bytes, 0); atomic_set(&audio->in_samples, 0); } /* ------------------- device --------------------- */ static long audamrnb_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct audio_in *audio = file->private_data; int rc = 0; if (cmd == AUDIO_GET_STATS) { struct msm_audio_stats stats; stats.byte_count = atomic_read(&audio->in_bytes); stats.sample_count = atomic_read(&audio->in_samples); if (copy_to_user((void *) arg, &stats, sizeof(stats))) return -EFAULT; return rc; } mutex_lock(&audio->lock); switch (cmd) { case AUDIO_START: { uint32_t freq; freq = 48000; MM_DBG("AUDIO_START\n"); if (audio->in_call && (audio->voice_state != VOICE_STATE_INCALL)) { rc = -EPERM; break; } rc = msm_snddev_request_freq(&freq, audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("sample rate configured %d\n", freq); if (rc < 0) { MM_DBG(" Sample rate can not be set, return code %d\n", rc); msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("msm_snddev_withdraw_freq\n"); break; } /*update aurec session info in audpreproc layer*/ audio->session_info.session_id = audio->enc_id; /*amrnb works only on 8KHz*/ audio->session_info.sampling_freq = 8000; audpreproc_update_audrec_info(&audio->session_info); rc = audamrnb_in_enable(audio); if (!rc) { rc = wait_event_interruptible_timeout(audio->wait_enable, audio->running != 0, 1*HZ); MM_DBG("state %d rc = %d\n", audio->running, rc); if (audio->running == 0) rc = -ENODEV; else rc = 0; } audio->stopped = 0; break; } case AUDIO_STOP: { /*reset the sampling frequency information at audpreproc layer*/ audio->session_info.sampling_freq = 0; audpreproc_update_audrec_info(&audio->session_info); rc = audamrnb_in_disable(audio); rc = msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); MM_DBG("msm_snddev_withdraw_freq\n"); audio->stopped = 1; break; } case AUDIO_FLUSH: { if (audio->stopped) { /* Make sure we're stopped and we wake any threads * that might be blocked holding the read_lock. * While audio->stopped read threads will always * exit immediately. */ wake_up(&audio->wait); mutex_lock(&audio->read_lock); audamrnb_in_flush(audio); mutex_unlock(&audio->read_lock); } break; } case AUDIO_SET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } /* Allow only single frame */ if (cfg.buffer_size != (FRAME_SIZE - 8)) rc = -EINVAL; else audio->buffer_size = cfg.buffer_size; break; } case AUDIO_GET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; memset(&cfg, 0, sizeof(cfg)); cfg.buffer_size = audio->buffer_size; cfg.buffer_count = FRAME_NUM; if (copy_to_user((void *) arg, &cfg, sizeof(cfg))) rc = -EFAULT; break; } case AUDIO_GET_AMRNB_ENC_CONFIG_V2: { struct msm_audio_amrnb_enc_config_v2 cfg; memset(&cfg, 0, sizeof(cfg)); cfg.dtx_enable = ((audio->dtx_mode == -1) ? 1 : 0); cfg.band_mode = audio->used_mode; cfg.frame_format = audio->frame_format; if (copy_to_user((void *) arg, &cfg, sizeof(cfg))) rc = -EFAULT; break; } case AUDIO_SET_AMRNB_ENC_CONFIG_V2: { struct msm_audio_amrnb_enc_config_v2 cfg; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } /* DSP does not support any other than default format */ if (audio->frame_format != cfg.frame_format) { rc = -EINVAL; break; } if (cfg.dtx_enable == 0) audio->dtx_mode = 0; else if (cfg.dtx_enable == 1) audio->dtx_mode = -1; else { rc = -EINVAL; break; } audio->used_mode = cfg.band_mode; break; } case AUDIO_SET_INCALL: { struct msm_voicerec_mode cfg; unsigned long flags; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } if (cfg.rec_mode != VOC_REC_BOTH && cfg.rec_mode != VOC_REC_UPLINK && cfg.rec_mode != VOC_REC_DOWNLINK) { MM_ERR("invalid rec_mode\n"); rc = -EINVAL; break; } else { spin_lock_irqsave(&audio->dev_lock, flags); if (cfg.rec_mode == VOC_REC_UPLINK) audio->source = VOICE_UL_SOURCE_MIX_MASK; else if (cfg.rec_mode == VOC_REC_DOWNLINK) audio->source = VOICE_DL_SOURCE_MIX_MASK; else audio->source = VOICE_DL_SOURCE_MIX_MASK | VOICE_UL_SOURCE_MIX_MASK ; audio->in_call = 1; spin_unlock_irqrestore(&audio->dev_lock, flags); } break; } case AUDIO_GET_SESSION_ID: { if (copy_to_user((void *) arg, &audio->enc_id, sizeof(unsigned short))) { rc = -EFAULT; } break; } default: rc = -EINVAL; } mutex_unlock(&audio->lock); return rc; } static ssize_t audamrnb_in_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct audio_in *audio = file->private_data; unsigned long flags; const char __user *start = buf; void *data; uint32_t index; uint32_t size; int rc = 0; mutex_lock(&audio->read_lock); while (count > 0) { rc = wait_event_interruptible( audio->wait, (audio->in_count > 0) || audio->stopped || (audio->in_call && audio->running && (audio->voice_state == VOICE_STATE_OFFCALL))); if (rc < 0) break; if (!audio->in_count) { if (audio->stopped) { rc = 0;/* End of File */ break; } else if (audio->in_call && audio->running && (audio->voice_state == VOICE_STATE_OFFCALL)) { MM_DBG("Not Permitted Voice Terminated\n"); rc = -EPERM; /* Voice Call stopped */ break; } } index = audio->in_tail; data = (uint8_t *) audio->in[index].data; size = audio->in[index].size; if (count >= size) { if (copy_to_user(buf, data, size)) { rc = -EFAULT; break; } spin_lock_irqsave(&audio->dsp_lock, flags); if (index != audio->in_tail) { /* overrun -- data is * invalid and we need to retry */ spin_unlock_irqrestore(&audio->dsp_lock, flags); continue; } audio->in[index].size = 0; audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1); audio->in_count--; spin_unlock_irqrestore(&audio->dsp_lock, flags); count -= size; buf += size; } else { MM_ERR("short read\n"); break; } } mutex_unlock(&audio->read_lock); if (buf > start) return buf - start; return rc; } static ssize_t audamrnb_in_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { return -EINVAL; } static int audamrnb_in_release(struct inode *inode, struct file *file) { struct audio_in *audio = file->private_data; MM_DBG("\n"); mutex_lock(&audio->lock); audio->in_call = 0; /* with draw frequency for session incase not stopped the driver */ msm_snddev_withdraw_freq(audio->enc_id, SNDDEV_CAP_TX, AUDDEV_CLNT_ENC); auddev_unregister_evt_listner(AUDDEV_CLNT_ENC, audio->enc_id); /*reset the sampling frequency information at audpreproc layer*/ audio->session_info.sampling_freq = 0; audpreproc_update_audrec_info(&audio->session_info); audamrnb_in_disable(audio); audamrnb_in_flush(audio); msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); audio->audrec = NULL; audio->opened = 0; if (audio->data) { iounmap(audio->map_v_read); free_contiguous_memory_by_paddr(audio->phys); audio->data = NULL; } mutex_unlock(&audio->lock); return 0; } static int audamrnb_in_open(struct inode *inode, struct file *file) { struct audio_in *audio = &the_audio_amrnb_in; int rc; int encid; mutex_lock(&audio->lock); if (audio->opened) { rc = -EBUSY; goto done; } audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K); if (audio->phys) { audio->map_v_read = ioremap(audio->phys, DMASZ); if (IS_ERR(audio->map_v_read)) { MM_ERR("could not map DMA buffers\n"); rc = -ENOMEM; free_contiguous_memory_by_paddr(audio->phys); goto done; } audio->data = audio->map_v_read; } else { MM_ERR("could not allocate DMA buffers\n"); rc = -ENOMEM; goto done; } MM_DBG("Memory addr = 0x%8x phy addr = 0x%8x\n",\ (int) audio->data, (int) audio->phys); if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { rc = -EACCES; MM_ERR("Non tunnel encoding is not supported\n"); goto done; } else if (!(file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { audio->mode = MSM_AUD_ENC_MODE_TUNNEL; MM_DBG("Opened for tunnel mode encoding\n"); } else { rc = -EACCES; goto done; } /* Settings will be re-config at AUDIO_SET_CONFIG, * but at least we need to have initial config */ audio->buffer_size = (FRAME_SIZE - 8); audio->enc_type = ENC_TYPE_AMRNB | audio->mode; audio->dtx_mode = -1; audio->frame_format = 0; audio->used_mode = 7; /* Bit Rate 12.2 kbps MR122 */ encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name, &audio->queue_ids); if (encid < 0) { MM_ERR("No free encoder available\n"); rc = -ENODEV; goto done; } audio->enc_id = encid; rc = msm_adsp_get(audio->module_name, &audio->audrec, &audrec_amrnb_adsp_ops, audio); if (rc) { audpreproc_aenc_free(audio->enc_id); goto done; } audio->stopped = 0; audio->source = 0; audamrnb_in_flush(audio); audio->device_events = AUDDEV_EVT_DEV_RDY | AUDDEV_EVT_DEV_RLS | AUDDEV_EVT_VOICE_STATE_CHG; audio->voice_state = msm_get_voice_state(); rc = auddev_register_evt_listner(audio->device_events, AUDDEV_CLNT_ENC, audio->enc_id, amrnb_in_listener, (void *) audio); if (rc) { MM_ERR("failed to register device event listener\n"); goto evt_error; } audio->build_id = socinfo_get_build_id(); MM_DBG("Modem build id = %s\n", audio->build_id); file->private_data = audio; audio->opened = 1; done: mutex_unlock(&audio->lock); return rc; evt_error: msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); mutex_unlock(&audio->lock); return rc; } static const struct file_operations audio_in_fops = { .owner = THIS_MODULE, .open = audamrnb_in_open, .release = audamrnb_in_release, .read = audamrnb_in_read, .write = audamrnb_in_write, .unlocked_ioctl = audamrnb_in_ioctl, }; struct miscdevice audio_amrnb_in_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_amrnb_in", .fops = &audio_in_fops, }; static int __init audamrnb_in_init(void) { mutex_init(&the_audio_amrnb_in.lock); mutex_init(&the_audio_amrnb_in.read_lock); spin_lock_init(&the_audio_amrnb_in.dsp_lock); spin_lock_init(&the_audio_amrnb_in.dev_lock); init_waitqueue_head(&the_audio_amrnb_in.wait); init_waitqueue_head(&the_audio_amrnb_in.wait_enable); return misc_register(&audio_amrnb_in_misc); } device_initcall(audamrnb_in_init);
gpl-2.0
kissthink/aufs4-linux
fs/hfsplus/xattr_trusted.c
619
1161
/* * linux/fs/hfsplus/xattr_trusted.c * * Vyacheslav Dubeyko <slava@dubeyko.com> * * Handler for trusted extended attributes. */ #include <linux/nls.h> #include "hfsplus_fs.h" #include "xattr.h" static int hfsplus_trusted_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { return hfsplus_getxattr(dentry, name, buffer, size, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN); } static int hfsplus_trusted_setxattr(struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags, int type) { return hfsplus_setxattr(dentry, name, buffer, size, flags, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN); } static size_t hfsplus_trusted_listxattr(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t name_len, int type) { /* * This method is not used. * It is used hfsplus_listxattr() instead of generic_listxattr(). */ return -EOPNOTSUPP; } const struct xattr_handler hfsplus_xattr_trusted_handler = { .prefix = XATTR_TRUSTED_PREFIX, .list = hfsplus_trusted_listxattr, .get = hfsplus_trusted_getxattr, .set = hfsplus_trusted_setxattr, };
gpl-2.0
OPNay/android_kernel_samsung_palladio
drivers/char/ip2/i2lib.c
1643
66114
/******************************************************************************* * * (c) 1999 by Computone Corporation * ******************************************************************************** * * * PACKAGE: Linux tty Device Driver for IntelliPort family of multiport * serial I/O controllers. * * DESCRIPTION: High-level interface code for the device driver. Uses the * Extremely Low Level Interface Support (i2ellis.c). Provides an * interface to the standard loadware, to support drivers or * application code. (This is included source code, not a separate * compilation module.) * *******************************************************************************/ //------------------------------------------------------------------------------ // Note on Strategy: // Once the board has been initialized, it will interrupt us when: // 1) It has something in the fifo for us to read (incoming data, flow control // packets, or whatever). // 2) It has stripped whatever we have sent last time in the FIFO (and // consequently is ready for more). // // Note also that the buffer sizes declared in i2lib.h are VERY SMALL. This // worsens performance considerably, but is done so that a great many channels // might use only a little memory. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Revision History: // // 0.00 - 4/16/91 --- First Draft // 0.01 - 4/29/91 --- 1st beta release // 0.02 - 6/14/91 --- Changes to allow small model compilation // 0.03 - 6/17/91 MAG Break reporting protected from interrupts routines with // in-line asm added for moving data to/from ring buffers, // replacing a variety of methods used previously. // 0.04 - 6/21/91 MAG Initial flow-control packets not queued until // i2_enable_interrupts time. Former versions would enqueue // them at i2_init_channel time, before we knew how many // channels were supposed to exist! // 0.05 - 10/12/91 MAG Major changes: works through the ellis.c routines now; // supports new 16-bit protocol and expandable boards. // - 10/24/91 MAG Most changes in place and stable. // 0.06 - 2/20/92 MAG Format of CMD_HOTACK corrected: the command takes no // argument. // 0.07 -- 3/11/92 MAG Support added to store special packet types at interrupt // level (mostly responses to specific commands.) // 0.08 -- 3/30/92 MAG Support added for STAT_MODEM packet // 0.09 -- 6/24/93 MAG i2Link... needed to update number of boards BEFORE // turning on the interrupt. // 0.10 -- 6/25/93 MAG To avoid gruesome death from a bad board, we sanity check // some incoming. // // 1.1 - 12/25/96 AKM Linux version. // - 10/09/98 DMC Revised Linux version. //------------------------------------------------------------------------------ //************ //* Includes * //************ #include <linux/sched.h> #include "i2lib.h" //*********************** //* Function Prototypes * //*********************** static void i2QueueNeeds(i2eBordStrPtr, i2ChanStrPtr, int); static i2ChanStrPtr i2DeQueueNeeds(i2eBordStrPtr, int ); static void i2StripFifo(i2eBordStrPtr); static void i2StuffFifoBypass(i2eBordStrPtr); static void i2StuffFifoFlow(i2eBordStrPtr); static void i2StuffFifoInline(i2eBordStrPtr); static int i2RetryFlushOutput(i2ChanStrPtr); // Not a documented part of the library routines (careful...) but the Diagnostic // i2diag.c finds them useful to help the throughput in certain limited // single-threaded operations. static void iiSendPendingMail(i2eBordStrPtr); static void serviceOutgoingFifo(i2eBordStrPtr); // Functions defined in ip2.c as part of interrupt handling static void do_input(struct work_struct *); static void do_status(struct work_struct *); //*************** //* Debug Data * //*************** #ifdef DEBUG_FIFO unsigned char DBGBuf[0x4000]; unsigned short I = 0; static void WriteDBGBuf(char *s, unsigned char *src, unsigned short n ) { char *p = src; // XXX: We need a spin lock here if we ever use this again while (*s) { // copy label DBGBuf[I] = *s++; I = I++ & 0x3fff; } while (n--) { // copy data DBGBuf[I] = *p++; I = I++ & 0x3fff; } } static void fatality(i2eBordStrPtr pB ) { int i; for (i=0;i<sizeof(DBGBuf);i++) { if ((i%16) == 0) printk("\n%4x:",i); printk("%02x ",DBGBuf[i]); } printk("\n"); for (i=0;i<sizeof(DBGBuf);i++) { if ((i%16) == 0) printk("\n%4x:",i); if (DBGBuf[i] >= ' ' && DBGBuf[i] <= '~') { printk(" %c ",DBGBuf[i]); } else { printk(" . "); } } printk("\n"); printk("Last index %x\n",I); } #endif /* DEBUG_FIFO */ //******** //* Code * //******** static inline int i2Validate ( i2ChanStrPtr pCh ) { //ip2trace(pCh->port_index, ITRC_VERIFY,ITRC_ENTER,2,pCh->validity, // (CHANNEL_MAGIC | CHANNEL_SUPPORT)); return ((pCh->validity & (CHANNEL_MAGIC_BITS | CHANNEL_SUPPORT)) == (CHANNEL_MAGIC | CHANNEL_SUPPORT)); } static void iiSendPendingMail_t(unsigned long data) { i2eBordStrPtr pB = (i2eBordStrPtr)data; iiSendPendingMail(pB); } //****************************************************************************** // Function: iiSendPendingMail(pB) // Parameters: Pointer to a board structure // Returns: Nothing // // Description: // If any outgoing mail bits are set and there is outgoing mailbox is empty, // send the mail and clear the bits. //****************************************************************************** static void iiSendPendingMail(i2eBordStrPtr pB) { if (pB->i2eOutMailWaiting && (!pB->i2eWaitingForEmptyFifo) ) { if (iiTrySendMail(pB, pB->i2eOutMailWaiting)) { /* If we were already waiting for fifo to empty, * or just sent MB_OUT_STUFFED, then we are * still waiting for it to empty, until we should * receive an MB_IN_STRIPPED from the board. */ pB->i2eWaitingForEmptyFifo |= (pB->i2eOutMailWaiting & MB_OUT_STUFFED); pB->i2eOutMailWaiting = 0; pB->SendPendingRetry = 0; } else { /* The only time we hit this area is when "iiTrySendMail" has failed. That only occurs when the outbound mailbox is still busy with the last message. We take a short breather to let the board catch up with itself and then try again. 16 Retries is the limit - then we got a borked board. /\/\|=mhw=|\/\/ */ if( ++pB->SendPendingRetry < 16 ) { setup_timer(&pB->SendPendingTimer, iiSendPendingMail_t, (unsigned long)pB); mod_timer(&pB->SendPendingTimer, jiffies + 1); } else { printk( KERN_ERR "IP2: iiSendPendingMail unable to queue outbound mail\n" ); } } } } //****************************************************************************** // Function: i2InitChannels(pB, nChannels, pCh) // Parameters: Pointer to Ellis Board structure // Number of channels to initialize // Pointer to first element in an array of channel structures // Returns: Success or failure // // Description: // // This function patches pointers, back-pointers, and initializes all the // elements in the channel structure array. // // This should be run after the board structure is initialized, through having // loaded the standard loadware (otherwise it complains). // // In any case, it must be done before any serious work begins initializing the // irq's or sending commands... // //****************************************************************************** static int i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh) { int index, stuffIndex; i2ChanStrPtr *ppCh; if (pB->i2eValid != I2E_MAGIC) { I2_COMPLETE(pB, I2EE_BADMAGIC); } if (pB->i2eState != II_STATE_STDLOADED) { I2_COMPLETE(pB, I2EE_BADSTATE); } rwlock_init(&pB->read_fifo_spinlock); rwlock_init(&pB->write_fifo_spinlock); rwlock_init(&pB->Dbuf_spinlock); rwlock_init(&pB->Bbuf_spinlock); rwlock_init(&pB->Fbuf_spinlock); // NO LOCK needed yet - this is init pB->i2eChannelPtr = pCh; pB->i2eChannelCnt = nChannels; pB->i2Fbuf_strip = pB->i2Fbuf_stuff = 0; pB->i2Dbuf_strip = pB->i2Dbuf_stuff = 0; pB->i2Bbuf_strip = pB->i2Bbuf_stuff = 0; pB->SendPendingRetry = 0; memset ( pCh, 0, sizeof (i2ChanStr) * nChannels ); for (index = stuffIndex = 0, ppCh = (i2ChanStrPtr *)(pB->i2Fbuf); nChannels && index < ABS_MOST_PORTS; index++) { if ( !(pB->i2eChannelMap[index >> 4] & (1 << (index & 0xf)) ) ) { continue; } rwlock_init(&pCh->Ibuf_spinlock); rwlock_init(&pCh->Obuf_spinlock); rwlock_init(&pCh->Cbuf_spinlock); rwlock_init(&pCh->Pbuf_spinlock); // NO LOCK needed yet - this is init // Set up validity flag according to support level if (pB->i2eGoodMap[index >> 4] & (1 << (index & 0xf)) ) { pCh->validity = CHANNEL_MAGIC | CHANNEL_SUPPORT; } else { pCh->validity = CHANNEL_MAGIC; } pCh->pMyBord = pB; /* Back-pointer */ // Prepare an outgoing flow-control packet to send as soon as the chance // occurs. if ( pCh->validity & CHANNEL_SUPPORT ) { pCh->infl.hd.i2sChannel = index; pCh->infl.hd.i2sCount = 5; pCh->infl.hd.i2sType = PTYPE_BYPASS; pCh->infl.fcmd = 37; pCh->infl.asof = 0; pCh->infl.room = IBUF_SIZE - 1; pCh->whenSendFlow = (IBUF_SIZE/5)*4; // when 80% full // The following is similar to calling i2QueueNeeds, except that this // is done in longhand, since we are setting up initial conditions on // many channels at once. pCh->channelNeeds = NEED_FLOW; // Since starting from scratch pCh->sinceLastFlow = 0; // No bytes received since last flow // control packet was queued stuffIndex++; *ppCh++ = pCh; // List this channel as needing // initial flow control packet sent } // Don't allow anything to be sent until the status packets come in from // the board. pCh->outfl.asof = 0; pCh->outfl.room = 0; // Initialize all the ring buffers pCh->Ibuf_stuff = pCh->Ibuf_strip = 0; pCh->Obuf_stuff = pCh->Obuf_strip = 0; pCh->Cbuf_stuff = pCh->Cbuf_strip = 0; memset( &pCh->icount, 0, sizeof (struct async_icount) ); pCh->hotKeyIn = HOT_CLEAR; pCh->channelOptions = 0; pCh->bookMarks = 0; init_waitqueue_head(&pCh->pBookmarkWait); init_waitqueue_head(&pCh->open_wait); init_waitqueue_head(&pCh->close_wait); init_waitqueue_head(&pCh->delta_msr_wait); // Set base and divisor so default custom rate is 9600 pCh->BaudBase = 921600; // MAX for ST654, changed after we get pCh->BaudDivisor = 96; // the boxids (UART types) later pCh->dataSetIn = 0; pCh->dataSetOut = 0; pCh->wopen = 0; pCh->throttled = 0; pCh->speed = CBR_9600; pCh->flags = 0; pCh->ClosingDelay = 5*HZ/10; pCh->ClosingWaitTime = 30*HZ; // Initialize task queue objects INIT_WORK(&pCh->tqueue_input, do_input); INIT_WORK(&pCh->tqueue_status, do_status); #ifdef IP2DEBUG_TRACE pCh->trace = ip2trace; #endif ++pCh; --nChannels; } // No need to check for wrap here; this is initialization. pB->i2Fbuf_stuff = stuffIndex; I2_COMPLETE(pB, I2EE_GOOD); } //****************************************************************************** // Function: i2DeQueueNeeds(pB, type) // Parameters: Pointer to a board structure // type bit map: may include NEED_INLINE, NEED_BYPASS, or NEED_FLOW // Returns: // Pointer to a channel structure // // Description: Returns pointer struct of next channel that needs service of // the type specified. Otherwise returns a NULL reference. // //****************************************************************************** static i2ChanStrPtr i2DeQueueNeeds(i2eBordStrPtr pB, int type) { unsigned short queueIndex; unsigned long flags; i2ChanStrPtr pCh = NULL; switch(type) { case NEED_INLINE: write_lock_irqsave(&pB->Dbuf_spinlock, flags); if ( pB->i2Dbuf_stuff != pB->i2Dbuf_strip) { queueIndex = pB->i2Dbuf_strip; pCh = pB->i2Dbuf[queueIndex]; queueIndex++; if (queueIndex >= CH_QUEUE_SIZE) { queueIndex = 0; } pB->i2Dbuf_strip = queueIndex; pCh->channelNeeds &= ~NEED_INLINE; } write_unlock_irqrestore(&pB->Dbuf_spinlock, flags); break; case NEED_BYPASS: write_lock_irqsave(&pB->Bbuf_spinlock, flags); if (pB->i2Bbuf_stuff != pB->i2Bbuf_strip) { queueIndex = pB->i2Bbuf_strip; pCh = pB->i2Bbuf[queueIndex]; queueIndex++; if (queueIndex >= CH_QUEUE_SIZE) { queueIndex = 0; } pB->i2Bbuf_strip = queueIndex; pCh->channelNeeds &= ~NEED_BYPASS; } write_unlock_irqrestore(&pB->Bbuf_spinlock, flags); break; case NEED_FLOW: write_lock_irqsave(&pB->Fbuf_spinlock, flags); if (pB->i2Fbuf_stuff != pB->i2Fbuf_strip) { queueIndex = pB->i2Fbuf_strip; pCh = pB->i2Fbuf[queueIndex]; queueIndex++; if (queueIndex >= CH_QUEUE_SIZE) { queueIndex = 0; } pB->i2Fbuf_strip = queueIndex; pCh->channelNeeds &= ~NEED_FLOW; } write_unlock_irqrestore(&pB->Fbuf_spinlock, flags); break; default: printk(KERN_ERR "i2DeQueueNeeds called with bad type:%x\n",type); break; } return pCh; } //****************************************************************************** // Function: i2QueueNeeds(pB, pCh, type) // Parameters: Pointer to a board structure // Pointer to a channel structure // type bit map: may include NEED_INLINE, NEED_BYPASS, or NEED_FLOW // Returns: Nothing // // Description: // For each type of need selected, if the given channel is not already in the // queue, adds it, and sets the flag indicating it is in the queue. //****************************************************************************** static void i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type) { unsigned short queueIndex; unsigned long flags; // We turn off all the interrupts during this brief process, since the // interrupt-level code might want to put things on the queue as well. switch (type) { case NEED_INLINE: write_lock_irqsave(&pB->Dbuf_spinlock, flags); if ( !(pCh->channelNeeds & NEED_INLINE) ) { pCh->channelNeeds |= NEED_INLINE; queueIndex = pB->i2Dbuf_stuff; pB->i2Dbuf[queueIndex++] = pCh; if (queueIndex >= CH_QUEUE_SIZE) queueIndex = 0; pB->i2Dbuf_stuff = queueIndex; } write_unlock_irqrestore(&pB->Dbuf_spinlock, flags); break; case NEED_BYPASS: write_lock_irqsave(&pB->Bbuf_spinlock, flags); if ((type & NEED_BYPASS) && !(pCh->channelNeeds & NEED_BYPASS)) { pCh->channelNeeds |= NEED_BYPASS; queueIndex = pB->i2Bbuf_stuff; pB->i2Bbuf[queueIndex++] = pCh; if (queueIndex >= CH_QUEUE_SIZE) queueIndex = 0; pB->i2Bbuf_stuff = queueIndex; } write_unlock_irqrestore(&pB->Bbuf_spinlock, flags); break; case NEED_FLOW: write_lock_irqsave(&pB->Fbuf_spinlock, flags); if ((type & NEED_FLOW) && !(pCh->channelNeeds & NEED_FLOW)) { pCh->channelNeeds |= NEED_FLOW; queueIndex = pB->i2Fbuf_stuff; pB->i2Fbuf[queueIndex++] = pCh; if (queueIndex >= CH_QUEUE_SIZE) queueIndex = 0; pB->i2Fbuf_stuff = queueIndex; } write_unlock_irqrestore(&pB->Fbuf_spinlock, flags); break; case NEED_CREDIT: pCh->channelNeeds |= NEED_CREDIT; break; default: printk(KERN_ERR "i2QueueNeeds called with bad type:%x\n",type); break; } return; } //****************************************************************************** // Function: i2QueueCommands(type, pCh, timeout, nCommands, pCs,...) // Parameters: type - PTYPE_BYPASS or PTYPE_INLINE // pointer to the channel structure // maximum period to wait // number of commands (n) // n commands // Returns: Number of commands sent, or -1 for error // // get board lock before calling // // Description: // Queues up some commands to be sent to a channel. To send possibly several // bypass or inline commands to the given channel. The timeout parameter // indicates how many HUNDREDTHS OF SECONDS to wait until there is room: // 0 = return immediately if no room, -ive = wait forever, +ive = number of // 1/100 seconds to wait. Return values: // -1 Some kind of nasty error: bad channel structure or invalid arguments. // 0 No room to send all the commands // (+) Number of commands sent //****************************************************************************** static int i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands, cmdSyntaxPtr pCs0,...) { int totalsize = 0; int blocksize; int lastended; cmdSyntaxPtr *ppCs; cmdSyntaxPtr pCs; int count; int flag; i2eBordStrPtr pB; unsigned short maxBlock; unsigned short maxBuff; short bufroom; unsigned short stuffIndex; unsigned char *pBuf; unsigned char *pInsert; unsigned char *pDest, *pSource; unsigned short channel; int cnt; unsigned long flags = 0; rwlock_t *lock_var_p = NULL; // Make sure the channel exists, otherwise do nothing if ( !i2Validate ( pCh ) ) { return -1; } ip2trace (CHANN, ITRC_QUEUE, ITRC_ENTER, 0 ); pB = pCh->pMyBord; // Board must also exist, and THE INTERRUPT COMMAND ALREADY SENT if (pB->i2eValid != I2E_MAGIC || pB->i2eUsingIrq == I2_IRQ_UNDEFINED) return -2; // If the board has gone fatal, return bad, and also hit the trap routine if // it exists. if (pB->i2eFatal) { if ( pB->i2eFatalTrap ) { (*(pB)->i2eFatalTrap)(pB); } return -3; } // Set up some variables, Which buffers are we using? How big are they? switch(type) { case PTYPE_INLINE: flag = INL; maxBlock = MAX_OBUF_BLOCK; maxBuff = OBUF_SIZE; pBuf = pCh->Obuf; break; case PTYPE_BYPASS: flag = BYP; maxBlock = MAX_CBUF_BLOCK; maxBuff = CBUF_SIZE; pBuf = pCh->Cbuf; break; default: return -4; } // Determine the total size required for all the commands totalsize = blocksize = sizeof(i2CmdHeader); lastended = 0; ppCs = &pCs0; for ( count = nCommands; count; count--, ppCs++) { pCs = *ppCs; cnt = pCs->length; // Will a new block be needed for this one? // Two possible reasons: too // big or previous command has to be at the end of a packet. if ((blocksize + cnt > maxBlock) || lastended) { blocksize = sizeof(i2CmdHeader); totalsize += sizeof(i2CmdHeader); } totalsize += cnt; blocksize += cnt; // If this command had to end a block, then we will make sure to // account for it should there be any more blocks. lastended = pCs->flags & END; } for (;;) { // Make sure any pending flush commands go out before we add more data. if ( !( pCh->flush_flags && i2RetryFlushOutput( pCh ) ) ) { // How much room (this time through) ? switch(type) { case PTYPE_INLINE: lock_var_p = &pCh->Obuf_spinlock; write_lock_irqsave(lock_var_p, flags); stuffIndex = pCh->Obuf_stuff; bufroom = pCh->Obuf_strip - stuffIndex; break; case PTYPE_BYPASS: lock_var_p = &pCh->Cbuf_spinlock; write_lock_irqsave(lock_var_p, flags); stuffIndex = pCh->Cbuf_stuff; bufroom = pCh->Cbuf_strip - stuffIndex; break; default: return -5; } if (--bufroom < 0) { bufroom += maxBuff; } ip2trace (CHANN, ITRC_QUEUE, 2, 1, bufroom ); // Check for overflow if (totalsize <= bufroom) { // Normal Expected path - We still hold LOCK break; /* from for()- Enough room: goto proceed */ } ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize); write_unlock_irqrestore(lock_var_p, flags); } else ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize); /* Prepare to wait for buffers to empty */ serviceOutgoingFifo(pB); // Dump what we got if (timeout == 0) { return 0; // Tired of waiting } if (timeout > 0) timeout--; // So negative values == forever if (!in_interrupt()) { schedule_timeout_interruptible(1); // short nap } else { // we cannot sched/sleep in interrupt silly return 0; } if (signal_pending(current)) { return 0; // Wake up! Time to die!!! } ip2trace (CHANN, ITRC_QUEUE, 4, 0 ); } // end of for(;;) // At this point we have room and the lock - stick them in. channel = pCh->infl.hd.i2sChannel; pInsert = &pBuf[stuffIndex]; // Pointer to start of packet pDest = CMD_OF(pInsert); // Pointer to start of command // When we start counting, the block is the size of the header for (blocksize = sizeof(i2CmdHeader), count = nCommands, lastended = 0, ppCs = &pCs0; count; count--, ppCs++) { pCs = *ppCs; // Points to command protocol structure // If this is a bookmark request command, post the fact that a bookmark // request is pending. NOTE THIS TRICK ONLY WORKS BECAUSE CMD_BMARK_REQ // has no parameters! The more general solution would be to reference // pCs->cmd[0]. if (pCs == CMD_BMARK_REQ) { pCh->bookMarks++; ip2trace (CHANN, ITRC_DRAIN, 30, 1, pCh->bookMarks ); } cnt = pCs->length; // If this command would put us over the maximum block size or // if the last command had to be at the end of a block, we end // the existing block here and start a new one. if ((blocksize + cnt > maxBlock) || lastended) { ip2trace (CHANN, ITRC_QUEUE, 5, 0 ); PTYPE_OF(pInsert) = type; CHANNEL_OF(pInsert) = channel; // count here does not include the header CMD_COUNT_OF(pInsert) = blocksize - sizeof(i2CmdHeader); stuffIndex += blocksize; if(stuffIndex >= maxBuff) { stuffIndex = 0; pInsert = pBuf; } pInsert = &pBuf[stuffIndex]; // Pointer to start of next pkt pDest = CMD_OF(pInsert); blocksize = sizeof(i2CmdHeader); } // Now we know there is room for this one in the current block blocksize += cnt; // Total bytes in this command pSource = pCs->cmd; // Copy the command into the buffer while (cnt--) { *pDest++ = *pSource++; } // If this command had to end a block, then we will make sure to account // for it should there be any more blocks. lastended = pCs->flags & END; } // end for // Clean up the final block by writing header, etc PTYPE_OF(pInsert) = type; CHANNEL_OF(pInsert) = channel; // count here does not include the header CMD_COUNT_OF(pInsert) = blocksize - sizeof(i2CmdHeader); stuffIndex += blocksize; if(stuffIndex >= maxBuff) { stuffIndex = 0; pInsert = pBuf; } // Updates the index, and post the need for service. When adding these to // the queue of channels, we turn off the interrupt while doing so, // because at interrupt level we might want to push a channel back to the // end of the queue. switch(type) { case PTYPE_INLINE: pCh->Obuf_stuff = stuffIndex; // Store buffer pointer write_unlock_irqrestore(&pCh->Obuf_spinlock, flags); pB->debugInlineQueued++; // Add the channel pointer to list of channels needing service (first // come...), if it's not already there. i2QueueNeeds(pB, pCh, NEED_INLINE); break; case PTYPE_BYPASS: pCh->Cbuf_stuff = stuffIndex; // Store buffer pointer write_unlock_irqrestore(&pCh->Cbuf_spinlock, flags); pB->debugBypassQueued++; // Add the channel pointer to list of channels needing service (first // come...), if it's not already there. i2QueueNeeds(pB, pCh, NEED_BYPASS); break; } ip2trace (CHANN, ITRC_QUEUE, ITRC_RETURN, 1, nCommands ); return nCommands; // Good status: number of commands sent } //****************************************************************************** // Function: i2GetStatus(pCh,resetBits) // Parameters: Pointer to a channel structure // Bit map of status bits to clear // Returns: Bit map of current status bits // // Description: // Returns the state of data set signals, and whether a break has been received, // (see i2lib.h for bit-mapped result). resetBits is a bit-map of any status // bits to be cleared: I2_BRK, I2_PAR, I2_FRA, I2_OVR,... These are cleared // AFTER the condition is passed. If pCh does not point to a valid channel, // returns -1 (which would be impossible otherwise. //****************************************************************************** static int i2GetStatus(i2ChanStrPtr pCh, int resetBits) { unsigned short status; i2eBordStrPtr pB; ip2trace (CHANN, ITRC_STATUS, ITRC_ENTER, 2, pCh->dataSetIn, resetBits ); // Make sure the channel exists, otherwise do nothing */ if ( !i2Validate ( pCh ) ) return -1; pB = pCh->pMyBord; status = pCh->dataSetIn; // Clear any specified error bits: but note that only actual error bits can // be cleared, regardless of the value passed. if (resetBits) { pCh->dataSetIn &= ~(resetBits & (I2_BRK | I2_PAR | I2_FRA | I2_OVR)); pCh->dataSetIn &= ~(I2_DDCD | I2_DCTS | I2_DDSR | I2_DRI); } ip2trace (CHANN, ITRC_STATUS, ITRC_RETURN, 1, pCh->dataSetIn ); return status; } //****************************************************************************** // Function: i2Input(pChpDest,count) // Parameters: Pointer to a channel structure // Pointer to data buffer // Number of bytes to read // Returns: Number of bytes read, or -1 for error // // Description: // Strips data from the input buffer and writes it to pDest. If there is a // collosal blunder, (invalid structure pointers or the like), returns -1. // Otherwise, returns the number of bytes read. //****************************************************************************** static int i2Input(i2ChanStrPtr pCh) { int amountToMove; unsigned short stripIndex; int count; unsigned long flags = 0; ip2trace (CHANN, ITRC_INPUT, ITRC_ENTER, 0); // Ensure channel structure seems real if ( !i2Validate( pCh ) ) { count = -1; goto i2Input_exit; } write_lock_irqsave(&pCh->Ibuf_spinlock, flags); // initialize some accelerators and private copies stripIndex = pCh->Ibuf_strip; count = pCh->Ibuf_stuff - stripIndex; // If buffer is empty or requested data count was 0, (trivial case) return // without any further thought. if ( count == 0 ) { write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); goto i2Input_exit; } // Adjust for buffer wrap if ( count < 0 ) { count += IBUF_SIZE; } // Don't give more than can be taken by the line discipline amountToMove = pCh->pTTY->receive_room; if (count > amountToMove) { count = amountToMove; } // How much could we copy without a wrap? amountToMove = IBUF_SIZE - stripIndex; if (amountToMove > count) { amountToMove = count; } // Move the first block pCh->pTTY->ldisc->ops->receive_buf( pCh->pTTY, &(pCh->Ibuf[stripIndex]), NULL, amountToMove ); // If we needed to wrap, do the second data move if (count > amountToMove) { pCh->pTTY->ldisc->ops->receive_buf( pCh->pTTY, pCh->Ibuf, NULL, count - amountToMove ); } // Bump and wrap the stripIndex all at once by the amount of data read. This // method is good regardless of whether the data was in one or two pieces. stripIndex += count; if (stripIndex >= IBUF_SIZE) { stripIndex -= IBUF_SIZE; } pCh->Ibuf_strip = stripIndex; // Update our flow control information and possibly queue ourselves to send // it, depending on how much data has been stripped since the last time a // packet was sent. pCh->infl.asof += count; if ((pCh->sinceLastFlow += count) >= pCh->whenSendFlow) { pCh->sinceLastFlow -= pCh->whenSendFlow; write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW); } else { write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); } i2Input_exit: ip2trace (CHANN, ITRC_INPUT, ITRC_RETURN, 1, count); return count; } //****************************************************************************** // Function: i2InputFlush(pCh) // Parameters: Pointer to a channel structure // Returns: Number of bytes stripped, or -1 for error // // Description: // Strips any data from the input buffer. If there is a collosal blunder, // (invalid structure pointers or the like), returns -1. Otherwise, returns the // number of bytes stripped. //****************************************************************************** static int i2InputFlush(i2ChanStrPtr pCh) { int count; unsigned long flags; // Ensure channel structure seems real if ( !i2Validate ( pCh ) ) return -1; ip2trace (CHANN, ITRC_INPUT, 10, 0); write_lock_irqsave(&pCh->Ibuf_spinlock, flags); count = pCh->Ibuf_stuff - pCh->Ibuf_strip; // Adjust for buffer wrap if (count < 0) { count += IBUF_SIZE; } // Expedient way to zero out the buffer pCh->Ibuf_strip = pCh->Ibuf_stuff; // Update our flow control information and possibly queue ourselves to send // it, depending on how much data has been stripped since the last time a // packet was sent. pCh->infl.asof += count; if ( (pCh->sinceLastFlow += count) >= pCh->whenSendFlow ) { pCh->sinceLastFlow -= pCh->whenSendFlow; write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW); } else { write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); } ip2trace (CHANN, ITRC_INPUT, 19, 1, count); return count; } //****************************************************************************** // Function: i2InputAvailable(pCh) // Parameters: Pointer to a channel structure // Returns: Number of bytes available, or -1 for error // // Description: // If there is a collosal blunder, (invalid structure pointers or the like), // returns -1. Otherwise, returns the number of bytes stripped. Otherwise, // returns the number of bytes available in the buffer. //****************************************************************************** #if 0 static int i2InputAvailable(i2ChanStrPtr pCh) { int count; // Ensure channel structure seems real if ( !i2Validate ( pCh ) ) return -1; // initialize some accelerators and private copies read_lock_irqsave(&pCh->Ibuf_spinlock, flags); count = pCh->Ibuf_stuff - pCh->Ibuf_strip; read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); // Adjust for buffer wrap if (count < 0) { count += IBUF_SIZE; } return count; } #endif //****************************************************************************** // Function: i2Output(pCh, pSource, count) // Parameters: Pointer to channel structure // Pointer to source data // Number of bytes to send // Returns: Number of bytes sent, or -1 for error // // Description: // Queues the data at pSource to be sent as data packets to the board. If there // is a collosal blunder, (invalid structure pointers or the like), returns -1. // Otherwise, returns the number of bytes written. What if there is not enough // room for all the data? If pCh->channelOptions & CO_NBLOCK_WRITE is set, then // we transfer as many characters as we can now, then return. If this bit is // clear (default), routine will spin along until all the data is buffered. // Should this occur, the 1-ms delay routine is called while waiting to avoid // applications that one cannot break out of. //****************************************************************************** static int i2Output(i2ChanStrPtr pCh, const char *pSource, int count) { i2eBordStrPtr pB; unsigned char *pInsert; int amountToMove; int countOriginal = count; unsigned short channel; unsigned short stuffIndex; unsigned long flags; int bailout = 10; ip2trace (CHANN, ITRC_OUTPUT, ITRC_ENTER, 2, count, 0 ); // Ensure channel structure seems real if ( !i2Validate ( pCh ) ) return -1; // initialize some accelerators and private copies pB = pCh->pMyBord; channel = pCh->infl.hd.i2sChannel; // If the board has gone fatal, return bad, and also hit the trap routine if // it exists. if (pB->i2eFatal) { if (pB->i2eFatalTrap) { (*(pB)->i2eFatalTrap)(pB); } return -1; } // Proceed as though we would do everything while ( count > 0 ) { // How much room in output buffer is there? read_lock_irqsave(&pCh->Obuf_spinlock, flags); amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1; read_unlock_irqrestore(&pCh->Obuf_spinlock, flags); if (amountToMove < 0) { amountToMove += OBUF_SIZE; } // Subtract off the headers size and see how much room there is for real // data. If this is negative, we will discover later. amountToMove -= sizeof (i2DataHeader); // Don't move more (now) than can go in a single packet if ( amountToMove > (int)(MAX_OBUF_BLOCK - sizeof(i2DataHeader)) ) { amountToMove = MAX_OBUF_BLOCK - sizeof(i2DataHeader); } // Don't move more than the count we were given if (amountToMove > count) { amountToMove = count; } // Now we know how much we must move: NB because the ring buffers have // an overflow area at the end, we needn't worry about wrapping in the // middle of a packet. // Small WINDOW here with no LOCK but I can't call Flush with LOCK // We would be flushing (or ending flush) anyway ip2trace (CHANN, ITRC_OUTPUT, 10, 1, amountToMove ); if ( !(pCh->flush_flags && i2RetryFlushOutput(pCh) ) && amountToMove > 0 ) { write_lock_irqsave(&pCh->Obuf_spinlock, flags); stuffIndex = pCh->Obuf_stuff; // Had room to move some data: don't know whether the block size, // buffer space, or what was the limiting factor... pInsert = &(pCh->Obuf[stuffIndex]); // Set up the header CHANNEL_OF(pInsert) = channel; PTYPE_OF(pInsert) = PTYPE_DATA; TAG_OF(pInsert) = 0; ID_OF(pInsert) = ID_ORDINARY_DATA; DATA_COUNT_OF(pInsert) = amountToMove; // Move the data memcpy( (char*)(DATA_OF(pInsert)), pSource, amountToMove ); // Adjust pointers and indices pSource += amountToMove; pCh->Obuf_char_count += amountToMove; stuffIndex += amountToMove + sizeof(i2DataHeader); count -= amountToMove; if (stuffIndex >= OBUF_SIZE) { stuffIndex = 0; } pCh->Obuf_stuff = stuffIndex; write_unlock_irqrestore(&pCh->Obuf_spinlock, flags); ip2trace (CHANN, ITRC_OUTPUT, 13, 1, stuffIndex ); } else { // Cannot move data // becuz we need to stuff a flush // or amount to move is <= 0 ip2trace(CHANN, ITRC_OUTPUT, 14, 3, amountToMove, pB->i2eFifoRemains, pB->i2eWaitingForEmptyFifo ); // Put this channel back on queue // this ultimatly gets more data or wakes write output i2QueueNeeds(pB, pCh, NEED_INLINE); if ( pB->i2eWaitingForEmptyFifo ) { ip2trace (CHANN, ITRC_OUTPUT, 16, 0 ); // or schedule if (!in_interrupt()) { ip2trace (CHANN, ITRC_OUTPUT, 61, 0 ); schedule_timeout_interruptible(2); if (signal_pending(current)) { break; } continue; } else { ip2trace (CHANN, ITRC_OUTPUT, 62, 0 ); // let interrupt in = WAS restore_flags() // We hold no lock nor is irq off anymore??? break; } break; // from while(count) } else if ( pB->i2eFifoRemains < 32 && !pB->i2eTxMailEmpty ( pB ) ) { ip2trace (CHANN, ITRC_OUTPUT, 19, 2, pB->i2eFifoRemains, pB->i2eTxMailEmpty ); break; // from while(count) } else if ( pCh->channelNeeds & NEED_CREDIT ) { ip2trace (CHANN, ITRC_OUTPUT, 22, 0 ); break; // from while(count) } else if ( --bailout) { // Try to throw more things (maybe not us) in the fifo if we're // not already waiting for it. ip2trace (CHANN, ITRC_OUTPUT, 20, 0 ); serviceOutgoingFifo(pB); //break; CONTINUE; } else { ip2trace (CHANN, ITRC_OUTPUT, 21, 3, pB->i2eFifoRemains, pB->i2eOutMailWaiting, pB->i2eWaitingForEmptyFifo ); break; // from while(count) } } } // End of while(count) i2QueueNeeds(pB, pCh, NEED_INLINE); // We drop through either when the count expires, or when there is some // count left, but there was a non-blocking write. if (countOriginal > count) { ip2trace (CHANN, ITRC_OUTPUT, 17, 2, countOriginal, count ); serviceOutgoingFifo( pB ); } ip2trace (CHANN, ITRC_OUTPUT, ITRC_RETURN, 2, countOriginal, count ); return countOriginal - count; } //****************************************************************************** // Function: i2FlushOutput(pCh) // Parameters: Pointer to a channel structure // Returns: Nothing // // Description: // Sends bypass command to start flushing (waiting possibly forever until there // is room), then sends inline command to stop flushing output, (again waiting // possibly forever). //****************************************************************************** static inline void i2FlushOutput(i2ChanStrPtr pCh) { ip2trace (CHANN, ITRC_FLUSH, 1, 1, pCh->flush_flags ); if (pCh->flush_flags) return; if ( 1 != i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_STARTFL) ) { pCh->flush_flags = STARTFL_FLAG; // Failed - flag for later ip2trace (CHANN, ITRC_FLUSH, 2, 0 ); } else if ( 1 != i2QueueCommands(PTYPE_INLINE, pCh, 0, 1, CMD_STOPFL) ) { pCh->flush_flags = STOPFL_FLAG; // Failed - flag for later ip2trace (CHANN, ITRC_FLUSH, 3, 0 ); } } static int i2RetryFlushOutput(i2ChanStrPtr pCh) { int old_flags = pCh->flush_flags; ip2trace (CHANN, ITRC_FLUSH, 14, 1, old_flags ); pCh->flush_flags = 0; // Clear flag so we can avoid recursion // and queue the commands if ( old_flags & STARTFL_FLAG ) { if ( 1 == i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_STARTFL) ) { old_flags = STOPFL_FLAG; //Success - send stop flush } else { old_flags = STARTFL_FLAG; //Failure - Flag for retry later } ip2trace (CHANN, ITRC_FLUSH, 15, 1, old_flags ); } if ( old_flags & STOPFL_FLAG ) { if (1 == i2QueueCommands(PTYPE_INLINE, pCh, 0, 1, CMD_STOPFL)) { old_flags = 0; // Success - clear flags } ip2trace (CHANN, ITRC_FLUSH, 16, 1, old_flags ); } pCh->flush_flags = old_flags; ip2trace (CHANN, ITRC_FLUSH, 17, 1, old_flags ); return old_flags; } //****************************************************************************** // Function: i2DrainOutput(pCh,timeout) // Parameters: Pointer to a channel structure // Maximum period to wait // Returns: ? // // Description: // Uses the bookmark request command to ask the board to send a bookmark back as // soon as all the data is completely sent. //****************************************************************************** static void i2DrainWakeup(unsigned long d) { i2ChanStrPtr pCh = (i2ChanStrPtr)d; ip2trace (CHANN, ITRC_DRAIN, 10, 1, pCh->BookmarkTimer.expires ); pCh->BookmarkTimer.expires = 0; wake_up_interruptible( &pCh->pBookmarkWait ); } static void i2DrainOutput(i2ChanStrPtr pCh, int timeout) { wait_queue_t wait; i2eBordStrPtr pB; ip2trace (CHANN, ITRC_DRAIN, ITRC_ENTER, 1, pCh->BookmarkTimer.expires); pB = pCh->pMyBord; // If the board has gone fatal, return bad, // and also hit the trap routine if it exists. if (pB->i2eFatal) { if (pB->i2eFatalTrap) { (*(pB)->i2eFatalTrap)(pB); } return; } if ((timeout > 0) && (pCh->BookmarkTimer.expires == 0 )) { // One per customer (channel) setup_timer(&pCh->BookmarkTimer, i2DrainWakeup, (unsigned long)pCh); ip2trace (CHANN, ITRC_DRAIN, 1, 1, pCh->BookmarkTimer.expires ); mod_timer(&pCh->BookmarkTimer, jiffies + timeout); } i2QueueCommands( PTYPE_INLINE, pCh, -1, 1, CMD_BMARK_REQ ); init_waitqueue_entry(&wait, current); add_wait_queue(&(pCh->pBookmarkWait), &wait); set_current_state( TASK_INTERRUPTIBLE ); serviceOutgoingFifo( pB ); schedule(); // Now we take our interruptible sleep on // Clean up the queue set_current_state( TASK_RUNNING ); remove_wait_queue(&(pCh->pBookmarkWait), &wait); // if expires == 0 then timer poped, then do not need to del_timer if ((timeout > 0) && pCh->BookmarkTimer.expires && time_before(jiffies, pCh->BookmarkTimer.expires)) { del_timer( &(pCh->BookmarkTimer) ); pCh->BookmarkTimer.expires = 0; ip2trace (CHANN, ITRC_DRAIN, 3, 1, pCh->BookmarkTimer.expires ); } ip2trace (CHANN, ITRC_DRAIN, ITRC_RETURN, 1, pCh->BookmarkTimer.expires ); return; } //****************************************************************************** // Function: i2OutputFree(pCh) // Parameters: Pointer to a channel structure // Returns: Space in output buffer // // Description: // Returns -1 if very gross error. Otherwise returns the amount of bytes still // free in the output buffer. //****************************************************************************** static int i2OutputFree(i2ChanStrPtr pCh) { int amountToMove; unsigned long flags; // Ensure channel structure seems real if ( !i2Validate ( pCh ) ) { return -1; } read_lock_irqsave(&pCh->Obuf_spinlock, flags); amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1; read_unlock_irqrestore(&pCh->Obuf_spinlock, flags); if (amountToMove < 0) { amountToMove += OBUF_SIZE; } // If this is negative, we will discover later amountToMove -= sizeof(i2DataHeader); return (amountToMove < 0) ? 0 : amountToMove; } static void ip2_owake( PTTY tp) { i2ChanStrPtr pCh; if (tp == NULL) return; pCh = tp->driver_data; ip2trace (CHANN, ITRC_SICMD, 10, 2, tp->flags, (1 << TTY_DO_WRITE_WAKEUP) ); tty_wakeup(tp); } static inline void set_baud_params(i2eBordStrPtr pB) { int i,j; i2ChanStrPtr *pCh; pCh = (i2ChanStrPtr *) pB->i2eChannelPtr; for (i = 0; i < ABS_MAX_BOXES; i++) { if (pB->channelBtypes.bid_value[i]) { if (BID_HAS_654(pB->channelBtypes.bid_value[i])) { for (j = 0; j < ABS_BIGGEST_BOX; j++) { if (pCh[i*16+j] == NULL) break; (pCh[i*16+j])->BaudBase = 921600; // MAX for ST654 (pCh[i*16+j])->BaudDivisor = 96; } } else { // has cirrus cd1400 for (j = 0; j < ABS_BIGGEST_BOX; j++) { if (pCh[i*16+j] == NULL) break; (pCh[i*16+j])->BaudBase = 115200; // MAX for CD1400 (pCh[i*16+j])->BaudDivisor = 12; } } } } } //****************************************************************************** // Function: i2StripFifo(pB) // Parameters: Pointer to a board structure // Returns: ? // // Description: // Strips all the available data from the incoming FIFO, identifies the type of // packet, and either buffers the data or does what needs to be done. // // Note there is no overflow checking here: if the board sends more data than it // ought to, we will not detect it here, but blindly overflow... //****************************************************************************** // A buffer for reading in blocks for unknown channels static unsigned char junkBuffer[IBUF_SIZE]; // A buffer to read in a status packet. Because of the size of the count field // for these things, the maximum packet size must be less than MAX_CMD_PACK_SIZE static unsigned char cmdBuffer[MAX_CMD_PACK_SIZE + 4]; // This table changes the bit order from MSR order given by STAT_MODEM packet to // status bits used in our library. static char xlatDss[16] = { 0 | 0 | 0 | 0 , 0 | 0 | 0 | I2_CTS , 0 | 0 | I2_DSR | 0 , 0 | 0 | I2_DSR | I2_CTS , 0 | I2_RI | 0 | 0 , 0 | I2_RI | 0 | I2_CTS , 0 | I2_RI | I2_DSR | 0 , 0 | I2_RI | I2_DSR | I2_CTS , I2_DCD | 0 | 0 | 0 , I2_DCD | 0 | 0 | I2_CTS , I2_DCD | 0 | I2_DSR | 0 , I2_DCD | 0 | I2_DSR | I2_CTS , I2_DCD | I2_RI | 0 | 0 , I2_DCD | I2_RI | 0 | I2_CTS , I2_DCD | I2_RI | I2_DSR | 0 , I2_DCD | I2_RI | I2_DSR | I2_CTS }; static inline void i2StripFifo(i2eBordStrPtr pB) { i2ChanStrPtr pCh; int channel; int count; unsigned short stuffIndex; int amountToRead; unsigned char *pc, *pcLimit; unsigned char uc; unsigned char dss_change; unsigned long bflags,cflags; // ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_ENTER, 0 ); while (I2_HAS_INPUT(pB)) { // ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 2, 0 ); // Process packet from fifo a one atomic unit write_lock_irqsave(&pB->read_fifo_spinlock, bflags); // The first word (or two bytes) will have channel number and type of // packet, possibly other information pB->i2eLeadoffWord[0] = iiReadWord(pB); switch(PTYPE_OF(pB->i2eLeadoffWord)) { case PTYPE_DATA: pB->got_input = 1; // ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 3, 0 ); channel = CHANNEL_OF(pB->i2eLeadoffWord); /* Store channel */ count = iiReadWord(pB); /* Count is in the next word */ // NEW: Check the count for sanity! Should the hardware fail, our death // is more pleasant. While an oversize channel is acceptable (just more // than the driver supports), an over-length count clearly means we are // sick! if ( ((unsigned int)count) > IBUF_SIZE ) { pB->i2eFatal = 2; write_unlock_irqrestore(&pB->read_fifo_spinlock, bflags); return; /* Bail out ASAP */ } // Channel is illegally big ? if ((channel >= pB->i2eChannelCnt) || (NULL==(pCh = ((i2ChanStrPtr*)pB->i2eChannelPtr)[channel]))) { iiReadBuf(pB, junkBuffer, count); write_unlock_irqrestore(&pB->read_fifo_spinlock, bflags); break; /* From switch: ready for next packet */ } // Channel should be valid, then // If this is a hot-key, merely post its receipt for now. These are // always supposed to be 1-byte packets, so we won't even check the // count. Also we will post an acknowledgement to the board so that // more data can be forthcoming. Note that we are not trying to use // these sequences in this driver, merely to robustly ignore them. if(ID_OF(pB->i2eLeadoffWord) == ID_HOT_KEY) { pCh->hotKeyIn = iiReadWord(pB) & 0xff; write_unlock_irqrestore(&pB->read_fifo_spinlock, bflags); i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_HOTACK); break; /* From the switch: ready for next packet */ } // Normal data! We crudely assume there is room for the data in our // buffer because the board wouldn't have exceeded his credit limit. write_lock_irqsave(&pCh->Ibuf_spinlock, cflags); // We have 2 locks now stuffIndex = pCh->Ibuf_stuff; amountToRead = IBUF_SIZE - stuffIndex; if (amountToRead > count) amountToRead = count; // stuffIndex would have been already adjusted so there would // always be room for at least one, and count is always at least // one. iiReadBuf(pB, &(pCh->Ibuf[stuffIndex]), amountToRead); pCh->icount.rx += amountToRead; // Update the stuffIndex by the amount of data moved. Note we could // never ask for more data than would just fit. However, we might // have read in one more byte than we wanted because the read // rounds up to even bytes. If this byte is on the end of the // packet, and is padding, we ignore it. If the byte is part of // the actual data, we need to move it. stuffIndex += amountToRead; if (stuffIndex >= IBUF_SIZE) { if ((amountToRead & 1) && (count > amountToRead)) { pCh->Ibuf[0] = pCh->Ibuf[IBUF_SIZE]; amountToRead++; stuffIndex = 1; } else { stuffIndex = 0; } } // If there is anything left over, read it as well if (count > amountToRead) { amountToRead = count - amountToRead; iiReadBuf(pB, &(pCh->Ibuf[stuffIndex]), amountToRead); pCh->icount.rx += amountToRead; stuffIndex += amountToRead; } // Update stuff index pCh->Ibuf_stuff = stuffIndex; write_unlock_irqrestore(&pCh->Ibuf_spinlock, cflags); write_unlock_irqrestore(&pB->read_fifo_spinlock, bflags); #ifdef USE_IQ schedule_work(&pCh->tqueue_input); #else do_input(&pCh->tqueue_input); #endif // Note we do not need to maintain any flow-control credits at this // time: if we were to increment .asof and decrement .room, there // would be no net effect. Instead, when we strip data, we will // increment .asof and leave .room unchanged. break; // From switch: ready for next packet case PTYPE_STATUS: ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 4, 0 ); count = CMD_COUNT_OF(pB->i2eLeadoffWord); iiReadBuf(pB, cmdBuffer, count); // We can release early with buffer grab write_unlock_irqrestore(&pB->read_fifo_spinlock, bflags); pc = cmdBuffer; pcLimit = &(cmdBuffer[count]); while (pc < pcLimit) { channel = *pc++; ip2trace (channel, ITRC_SFIFO, 7, 2, channel, *pc ); /* check for valid channel */ if (channel < pB->i2eChannelCnt && (pCh = (((i2ChanStrPtr*)pB->i2eChannelPtr)[channel])) != NULL ) { dss_change = 0; switch (uc = *pc++) { /* Breaks and modem signals are easy: just update status */ case STAT_CTS_UP: if ( !(pCh->dataSetIn & I2_CTS) ) { pCh->dataSetIn |= I2_DCTS; pCh->icount.cts++; dss_change = 1; } pCh->dataSetIn |= I2_CTS; break; case STAT_CTS_DN: if ( pCh->dataSetIn & I2_CTS ) { pCh->dataSetIn |= I2_DCTS; pCh->icount.cts++; dss_change = 1; } pCh->dataSetIn &= ~I2_CTS; break; case STAT_DCD_UP: ip2trace (channel, ITRC_MODEM, 1, 1, pCh->dataSetIn ); if ( !(pCh->dataSetIn & I2_DCD) ) { ip2trace (CHANN, ITRC_MODEM, 2, 0 ); pCh->dataSetIn |= I2_DDCD; pCh->icount.dcd++; dss_change = 1; } pCh->dataSetIn |= I2_DCD; ip2trace (channel, ITRC_MODEM, 3, 1, pCh->dataSetIn ); break; case STAT_DCD_DN: ip2trace (channel, ITRC_MODEM, 4, 1, pCh->dataSetIn ); if ( pCh->dataSetIn & I2_DCD ) { ip2trace (channel, ITRC_MODEM, 5, 0 ); pCh->dataSetIn |= I2_DDCD; pCh->icount.dcd++; dss_change = 1; } pCh->dataSetIn &= ~I2_DCD; ip2trace (channel, ITRC_MODEM, 6, 1, pCh->dataSetIn ); break; case STAT_DSR_UP: if ( !(pCh->dataSetIn & I2_DSR) ) { pCh->dataSetIn |= I2_DDSR; pCh->icount.dsr++; dss_change = 1; } pCh->dataSetIn |= I2_DSR; break; case STAT_DSR_DN: if ( pCh->dataSetIn & I2_DSR ) { pCh->dataSetIn |= I2_DDSR; pCh->icount.dsr++; dss_change = 1; } pCh->dataSetIn &= ~I2_DSR; break; case STAT_RI_UP: if ( !(pCh->dataSetIn & I2_RI) ) { pCh->dataSetIn |= I2_DRI; pCh->icount.rng++; dss_change = 1; } pCh->dataSetIn |= I2_RI ; break; case STAT_RI_DN: // to be compat with serial.c //if ( pCh->dataSetIn & I2_RI ) //{ // pCh->dataSetIn |= I2_DRI; // pCh->icount.rng++; // dss_change = 1; //} pCh->dataSetIn &= ~I2_RI ; break; case STAT_BRK_DET: pCh->dataSetIn |= I2_BRK; pCh->icount.brk++; dss_change = 1; break; // Bookmarks? one less request we're waiting for case STAT_BMARK: pCh->bookMarks--; if (pCh->bookMarks <= 0 ) { pCh->bookMarks = 0; wake_up_interruptible( &pCh->pBookmarkWait ); ip2trace (channel, ITRC_DRAIN, 20, 1, pCh->BookmarkTimer.expires ); } break; // Flow control packets? Update the new credits, and if // someone was waiting for output, queue him up again. case STAT_FLOW: pCh->outfl.room = ((flowStatPtr)pc)->room - (pCh->outfl.asof - ((flowStatPtr)pc)->asof); ip2trace (channel, ITRC_STFLW, 1, 1, pCh->outfl.room ); if (pCh->channelNeeds & NEED_CREDIT) { ip2trace (channel, ITRC_STFLW, 2, 1, pCh->channelNeeds); pCh->channelNeeds &= ~NEED_CREDIT; i2QueueNeeds(pB, pCh, NEED_INLINE); if ( pCh->pTTY ) ip2_owake(pCh->pTTY); } ip2trace (channel, ITRC_STFLW, 3, 1, pCh->channelNeeds); pc += sizeof(flowStat); break; /* Special packets: */ /* Just copy the information into the channel structure */ case STAT_STATUS: pCh->channelStatus = *((debugStatPtr)pc); pc += sizeof(debugStat); break; case STAT_TXCNT: pCh->channelTcount = *((cntStatPtr)pc); pc += sizeof(cntStat); break; case STAT_RXCNT: pCh->channelRcount = *((cntStatPtr)pc); pc += sizeof(cntStat); break; case STAT_BOXIDS: pB->channelBtypes = *((bidStatPtr)pc); pc += sizeof(bidStat); set_baud_params(pB); break; case STAT_HWFAIL: i2QueueCommands (PTYPE_INLINE, pCh, 0, 1, CMD_HW_TEST); pCh->channelFail = *((failStatPtr)pc); pc += sizeof(failStat); break; /* No explicit match? then * Might be an error packet... */ default: switch (uc & STAT_MOD_ERROR) { case STAT_ERROR: if (uc & STAT_E_PARITY) { pCh->dataSetIn |= I2_PAR; pCh->icount.parity++; } if (uc & STAT_E_FRAMING){ pCh->dataSetIn |= I2_FRA; pCh->icount.frame++; } if (uc & STAT_E_OVERRUN){ pCh->dataSetIn |= I2_OVR; pCh->icount.overrun++; } break; case STAT_MODEM: // the answer to DSS_NOW request (not change) pCh->dataSetIn = (pCh->dataSetIn & ~(I2_RI | I2_CTS | I2_DCD | I2_DSR) ) | xlatDss[uc & 0xf]; wake_up_interruptible ( &pCh->dss_now_wait ); default: break; } } /* End of switch on status type */ if (dss_change) { #ifdef USE_IQ schedule_work(&pCh->tqueue_status); #else do_status(&pCh->tqueue_status); #endif } } else /* Or else, channel is invalid */ { // Even though the channel is invalid, we must test the // status to see how much additional data it has (to be // skipped) switch (*pc++) { case STAT_FLOW: pc += 4; /* Skip the data */ break; default: break; } } } // End of while (there is still some status packet left) break; default: // Neither packet? should be impossible ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 5, 1, PTYPE_OF(pB->i2eLeadoffWord) ); write_unlock_irqrestore(&pB->read_fifo_spinlock, bflags); break; } // End of switch on type of packets } /*while(board I2_HAS_INPUT)*/ ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_RETURN, 0 ); // Send acknowledgement to the board even if there was no data! pB->i2eOutMailWaiting |= MB_IN_STRIPPED; return; } //****************************************************************************** // Function: i2Write2Fifo(pB,address,count) // Parameters: Pointer to a board structure, source address, byte count // Returns: bytes written // // Description: // Writes count bytes to board io address(implied) from source // Adjusts count, leaves reserve for next time around bypass cmds //****************************************************************************** static int i2Write2Fifo(i2eBordStrPtr pB, unsigned char *source, int count,int reserve) { int rc = 0; unsigned long flags; write_lock_irqsave(&pB->write_fifo_spinlock, flags); if (!pB->i2eWaitingForEmptyFifo) { if (pB->i2eFifoRemains > (count+reserve)) { pB->i2eFifoRemains -= count; iiWriteBuf(pB, source, count); pB->i2eOutMailWaiting |= MB_OUT_STUFFED; rc = count; } } write_unlock_irqrestore(&pB->write_fifo_spinlock, flags); return rc; } //****************************************************************************** // Function: i2StuffFifoBypass(pB) // Parameters: Pointer to a board structure // Returns: Nothing // // Description: // Stuffs as many bypass commands into the fifo as possible. This is simpler // than stuffing data or inline commands to fifo, since we do not have // flow-control to deal with. //****************************************************************************** static inline void i2StuffFifoBypass(i2eBordStrPtr pB) { i2ChanStrPtr pCh; unsigned char *pRemove; unsigned short stripIndex; unsigned short packetSize; unsigned short paddedSize; unsigned short notClogged = 1; unsigned long flags; int bailout = 1000; // Continue processing so long as there are entries, or there is room in the // fifo. Each entry represents a channel with something to do. while ( --bailout && notClogged && (NULL != (pCh = i2DeQueueNeeds(pB,NEED_BYPASS)))) { write_lock_irqsave(&pCh->Cbuf_spinlock, flags); stripIndex = pCh->Cbuf_strip; // as long as there are packets for this channel... while (stripIndex != pCh->Cbuf_stuff) { pRemove = &(pCh->Cbuf[stripIndex]); packetSize = CMD_COUNT_OF(pRemove) + sizeof(i2CmdHeader); paddedSize = roundup(packetSize, 2); if (paddedSize > 0) { if ( 0 == i2Write2Fifo(pB, pRemove, paddedSize,0)) { notClogged = 0; /* fifo full */ i2QueueNeeds(pB, pCh, NEED_BYPASS); // Put back on queue break; // Break from the channel } } #ifdef DEBUG_FIFO WriteDBGBuf("BYPS", pRemove, paddedSize); #endif /* DEBUG_FIFO */ pB->debugBypassCount++; pRemove += packetSize; stripIndex += packetSize; if (stripIndex >= CBUF_SIZE) { stripIndex = 0; pRemove = pCh->Cbuf; } } // Done with this channel. Move to next, removing this one from // the queue of channels if we cleaned it out (i.e., didn't get clogged. pCh->Cbuf_strip = stripIndex; write_unlock_irqrestore(&pCh->Cbuf_spinlock, flags); } // Either clogged or finished all the work #ifdef IP2DEBUG_TRACE if ( !bailout ) { ip2trace (ITRC_NO_PORT, ITRC_ERROR, 1, 0 ); } #endif } //****************************************************************************** // Function: i2StuffFifoFlow(pB) // Parameters: Pointer to a board structure // Returns: Nothing // // Description: // Stuffs as many flow control packets into the fifo as possible. This is easier // even than doing normal bypass commands, because there is always at most one // packet, already assembled, for each channel. //****************************************************************************** static inline void i2StuffFifoFlow(i2eBordStrPtr pB) { i2ChanStrPtr pCh; unsigned short paddedSize = roundup(sizeof(flowIn), 2); ip2trace (ITRC_NO_PORT, ITRC_SFLOW, ITRC_ENTER, 2, pB->i2eFifoRemains, paddedSize ); // Continue processing so long as there are entries, or there is room in the // fifo. Each entry represents a channel with something to do. while ( (NULL != (pCh = i2DeQueueNeeds(pB,NEED_FLOW)))) { pB->debugFlowCount++; // NO Chan LOCK needed ??? if ( 0 == i2Write2Fifo(pB,(unsigned char *)&(pCh->infl),paddedSize,0)) { break; } #ifdef DEBUG_FIFO WriteDBGBuf("FLOW",(unsigned char *) &(pCh->infl), paddedSize); #endif /* DEBUG_FIFO */ } // Either clogged or finished all the work ip2trace (ITRC_NO_PORT, ITRC_SFLOW, ITRC_RETURN, 0 ); } //****************************************************************************** // Function: i2StuffFifoInline(pB) // Parameters: Pointer to a board structure // Returns: Nothing // // Description: // Stuffs as much data and inline commands into the fifo as possible. This is // the most complex fifo-stuffing operation, since there if now the channel // flow-control issue to deal with. //****************************************************************************** static inline void i2StuffFifoInline(i2eBordStrPtr pB) { i2ChanStrPtr pCh; unsigned char *pRemove; unsigned short stripIndex; unsigned short packetSize; unsigned short paddedSize; unsigned short notClogged = 1; unsigned short flowsize; unsigned long flags; int bailout = 1000; int bailout2; ip2trace (ITRC_NO_PORT, ITRC_SICMD, ITRC_ENTER, 3, pB->i2eFifoRemains, pB->i2Dbuf_strip, pB->i2Dbuf_stuff ); // Continue processing so long as there are entries, or there is room in the // fifo. Each entry represents a channel with something to do. while ( --bailout && notClogged && (NULL != (pCh = i2DeQueueNeeds(pB,NEED_INLINE))) ) { write_lock_irqsave(&pCh->Obuf_spinlock, flags); stripIndex = pCh->Obuf_strip; ip2trace (CHANN, ITRC_SICMD, 3, 2, stripIndex, pCh->Obuf_stuff ); // as long as there are packets for this channel... bailout2 = 1000; while ( --bailout2 && stripIndex != pCh->Obuf_stuff) { pRemove = &(pCh->Obuf[stripIndex]); // Must determine whether this be a data or command packet to // calculate correctly the header size and the amount of // flow-control credit this type of packet will use. if (PTYPE_OF(pRemove) == PTYPE_DATA) { flowsize = DATA_COUNT_OF(pRemove); packetSize = flowsize + sizeof(i2DataHeader); } else { flowsize = CMD_COUNT_OF(pRemove); packetSize = flowsize + sizeof(i2CmdHeader); } flowsize = CREDIT_USAGE(flowsize); paddedSize = roundup(packetSize, 2); ip2trace (CHANN, ITRC_SICMD, 4, 2, pB->i2eFifoRemains, paddedSize ); // If we don't have enough credits from the board to send the data, // flag the channel that we are waiting for flow control credit, and // break out. This will clean up this channel and remove us from the // queue of hot things to do. ip2trace (CHANN, ITRC_SICMD, 5, 2, pCh->outfl.room, flowsize ); if (pCh->outfl.room <= flowsize) { // Do Not have the credits to send this packet. i2QueueNeeds(pB, pCh, NEED_CREDIT); notClogged = 0; break; // So to do next channel } if ( (paddedSize > 0) && ( 0 == i2Write2Fifo(pB, pRemove, paddedSize, 128))) { // Do Not have room in fifo to send this packet. notClogged = 0; i2QueueNeeds(pB, pCh, NEED_INLINE); break; // Break from the channel } #ifdef DEBUG_FIFO WriteDBGBuf("DATA", pRemove, paddedSize); #endif /* DEBUG_FIFO */ pB->debugInlineCount++; pCh->icount.tx += flowsize; // Update current credits pCh->outfl.room -= flowsize; pCh->outfl.asof += flowsize; if (PTYPE_OF(pRemove) == PTYPE_DATA) { pCh->Obuf_char_count -= DATA_COUNT_OF(pRemove); } pRemove += packetSize; stripIndex += packetSize; ip2trace (CHANN, ITRC_SICMD, 6, 2, stripIndex, pCh->Obuf_strip); if (stripIndex >= OBUF_SIZE) { stripIndex = 0; pRemove = pCh->Obuf; ip2trace (CHANN, ITRC_SICMD, 7, 1, stripIndex ); } } /* while */ if ( !bailout2 ) { ip2trace (CHANN, ITRC_ERROR, 3, 0 ); } // Done with this channel. Move to next, removing this one from the // queue of channels if we cleaned it out (i.e., didn't get clogged. pCh->Obuf_strip = stripIndex; write_unlock_irqrestore(&pCh->Obuf_spinlock, flags); if ( notClogged ) { ip2trace (CHANN, ITRC_SICMD, 8, 0 ); if ( pCh->pTTY ) { ip2_owake(pCh->pTTY); } } } // Either clogged or finished all the work if ( !bailout ) { ip2trace (ITRC_NO_PORT, ITRC_ERROR, 4, 0 ); } ip2trace (ITRC_NO_PORT, ITRC_SICMD, ITRC_RETURN, 1,pB->i2Dbuf_strip); } //****************************************************************************** // Function: serviceOutgoingFifo(pB) // Parameters: Pointer to a board structure // Returns: Nothing // // Description: // Helper routine to put data in the outgoing fifo, if we aren't already waiting // for something to be there. If the fifo has only room for a very little data, // go head and hit the board with a mailbox hit immediately. Otherwise, it will // have to happen later in the interrupt processing. Since this routine may be // called both at interrupt and foreground time, we must turn off interrupts // during the entire process. //****************************************************************************** static void serviceOutgoingFifo(i2eBordStrPtr pB) { // If we aren't currently waiting for the board to empty our fifo, service // everything that is pending, in priority order (especially, Bypass before // Inline). if ( ! pB->i2eWaitingForEmptyFifo ) { i2StuffFifoFlow(pB); i2StuffFifoBypass(pB); i2StuffFifoInline(pB); iiSendPendingMail(pB); } } //****************************************************************************** // Function: i2ServiceBoard(pB) // Parameters: Pointer to a board structure // Returns: Nothing // // Description: // Normally this is called from interrupt level, but there is deliberately // nothing in here specific to being called from interrupt level. All the // hardware-specific, interrupt-specific things happen at the outer levels. // // For example, a timer interrupt could drive this routine for some sort of // polled operation. The only requirement is that the programmer deal with any // atomiticity/concurrency issues that result. // // This routine responds to the board's having sent mailbox information to the // host (which would normally cause an interrupt). This routine reads the // incoming mailbox. If there is no data in it, this board did not create the // interrupt and/or has nothing to be done to it. (Except, if we have been // waiting to write mailbox data to it, we may do so. // // Based on the value in the mailbox, we may take various actions. // // No checking here of pB validity: after all, it shouldn't have been called by // the handler unless pB were on the list. //****************************************************************************** static inline int i2ServiceBoard ( i2eBordStrPtr pB ) { unsigned inmail; unsigned long flags; /* This should be atomic because of the way we are called... */ if (NO_MAIL_HERE == ( inmail = pB->i2eStartMail ) ) { inmail = iiGetMail(pB); } pB->i2eStartMail = NO_MAIL_HERE; ip2trace (ITRC_NO_PORT, ITRC_INTR, 2, 1, inmail ); if (inmail != NO_MAIL_HERE) { // If the board has gone fatal, nothing to do but hit a bit that will // alert foreground tasks to protest! if ( inmail & MB_FATAL_ERROR ) { pB->i2eFatal = 1; goto exit_i2ServiceBoard; } /* Assuming no fatal condition, we proceed to do work */ if ( inmail & MB_IN_STUFFED ) { pB->i2eFifoInInts++; i2StripFifo(pB); /* There might be incoming packets */ } if (inmail & MB_OUT_STRIPPED) { pB->i2eFifoOutInts++; write_lock_irqsave(&pB->write_fifo_spinlock, flags); pB->i2eFifoRemains = pB->i2eFifoSize; pB->i2eWaitingForEmptyFifo = 0; write_unlock_irqrestore(&pB->write_fifo_spinlock, flags); ip2trace (ITRC_NO_PORT, ITRC_INTR, 30, 1, pB->i2eFifoRemains ); } serviceOutgoingFifo(pB); } ip2trace (ITRC_NO_PORT, ITRC_INTR, 8, 0 ); exit_i2ServiceBoard: return 0; }
gpl-2.0
philenotfound/linux-stable-15khz
drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.c
1899
5410
/****************************************************************************** * * Copyright(c) 2009-2010 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../pwrseqcmd.h" #include "pwrseq.h" /* drivers should parse below arrays and do the corresponding actions */ /* 3 Power on Array */ struct wlan_pwr_cfg rtl8812_power_on_flow[RTL8812_TRANS_CARDEMU_TO_ACT_STEPS + RTL8812_TRANS_END_STEPS] = { RTL8812_TRANS_CARDEMU_TO_ACT RTL8812_TRANS_END }; /* 3Radio off GPIO Array */ struct wlan_pwr_cfg rtl8812_radio_off_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + RTL8812_TRANS_END_STEPS] = { RTL8812_TRANS_ACT_TO_CARDEMU RTL8812_TRANS_END }; /* 3Card Disable Array */ struct wlan_pwr_cfg rtl8812_card_disable_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS + RTL8812_TRANS_END_STEPS] = { RTL8812_TRANS_ACT_TO_CARDEMU RTL8812_TRANS_CARDEMU_TO_CARDDIS RTL8812_TRANS_END }; /* 3 Card Enable Array */ struct wlan_pwr_cfg rtl8812_card_enable_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS + RTL8812_TRANS_END_STEPS] = { RTL8812_TRANS_CARDDIS_TO_CARDEMU RTL8812_TRANS_CARDEMU_TO_ACT RTL8812_TRANS_END }; /* 3Suspend Array */ struct wlan_pwr_cfg rtl8812_suspend_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + RTL8812_TRANS_CARDEMU_TO_SUS_STEPS + RTL8812_TRANS_END_STEPS] = { RTL8812_TRANS_ACT_TO_CARDEMU RTL8812_TRANS_CARDEMU_TO_SUS RTL8812_TRANS_END }; /* 3 Resume Array */ struct wlan_pwr_cfg rtl8812_resume_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + RTL8812_TRANS_CARDEMU_TO_SUS_STEPS + RTL8812_TRANS_END_STEPS] = { RTL8812_TRANS_SUS_TO_CARDEMU RTL8812_TRANS_CARDEMU_TO_ACT RTL8812_TRANS_END }; /* 3HWPDN Array */ struct wlan_pwr_cfg rtl8812_hwpdn_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS + RTL8812_TRANS_END_STEPS] = { RTL8812_TRANS_ACT_TO_CARDEMU RTL8812_TRANS_CARDEMU_TO_PDN RTL8812_TRANS_END }; /* 3 Enter LPS */ struct wlan_pwr_cfg rtl8812_enter_lps_flow[RTL8812_TRANS_ACT_TO_LPS_STEPS + RTL8812_TRANS_END_STEPS] = { /* FW behavior */ RTL8812_TRANS_ACT_TO_LPS RTL8812_TRANS_END }; /* 3 Leave LPS */ struct wlan_pwr_cfg rtl8812_leave_lps_flow[RTL8812_TRANS_LPS_TO_ACT_STEPS + RTL8812_TRANS_END_STEPS] = { /* FW behavior */ RTL8812_TRANS_LPS_TO_ACT RTL8812_TRANS_END }; /* drivers should parse below arrays and do the corresponding actions */ /*3 Power on Array*/ struct wlan_pwr_cfg rtl8821A_power_on_flow[RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS + RTL8821A_TRANS_END_STEPS] = { RTL8821A_TRANS_CARDEMU_TO_ACT RTL8821A_TRANS_END }; /*3Radio off GPIO Array */ struct wlan_pwr_cfg rtl8821A_radio_off_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + RTL8821A_TRANS_END_STEPS] = { RTL8821A_TRANS_ACT_TO_CARDEMU RTL8821A_TRANS_END }; /*3Card Disable Array*/ struct wlan_pwr_cfg rtl8821A_card_disable_flow [RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS + RTL8821A_TRANS_END_STEPS] = { RTL8821A_TRANS_ACT_TO_CARDEMU RTL8821A_TRANS_CARDEMU_TO_CARDDIS RTL8821A_TRANS_END }; /*3 Card Enable Array*/ /*RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS*/ struct wlan_pwr_cfg rtl8821A_card_enable_flow [RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS + RTL8821A_TRANS_END_STEPS] = { RTL8821A_TRANS_CARDDIS_TO_CARDEMU RTL8821A_TRANS_CARDEMU_TO_ACT RTL8821A_TRANS_END }; /*3Suspend Array*/ struct wlan_pwr_cfg rtl8821A_suspend_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS + RTL8821A_TRANS_END_STEPS] = { RTL8821A_TRANS_ACT_TO_CARDEMU RTL8821A_TRANS_CARDEMU_TO_SUS RTL8821A_TRANS_END }; /*3 Resume Array*/ struct wlan_pwr_cfg rtl8821A_resume_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS + RTL8821A_TRANS_END_STEPS] = { RTL8821A_TRANS_SUS_TO_CARDEMU RTL8821A_TRANS_CARDEMU_TO_ACT RTL8821A_TRANS_END }; /*3HWPDN Array*/ struct wlan_pwr_cfg rtl8821A_hwpdn_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS + RTL8821A_TRANS_END_STEPS] = { RTL8821A_TRANS_ACT_TO_CARDEMU RTL8821A_TRANS_CARDEMU_TO_PDN RTL8821A_TRANS_END }; /*3 Enter LPS */ struct wlan_pwr_cfg rtl8821A_enter_lps_flow[RTL8821A_TRANS_ACT_TO_LPS_STEPS + RTL8821A_TRANS_END_STEPS] = { /*FW behavior*/ RTL8821A_TRANS_ACT_TO_LPS RTL8821A_TRANS_END }; /*3 Leave LPS */ struct wlan_pwr_cfg rtl8821A_leave_lps_flow[RTL8821A_TRANS_LPS_TO_ACT_STEPS + RTL8821A_TRANS_END_STEPS] = { /*FW behavior*/ RTL8821A_TRANS_LPS_TO_ACT RTL8821A_TRANS_END };
gpl-2.0
akellar/android_kernel_motorola_shamu
net/sctp/probe.c
2155
5621
/* * sctp_probe - Observe the SCTP flow with kprobes. * * The idea for this came from Werner Almesberger's umlsim * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org> * * Modified for SCTP from Stephen Hemminger's code * Copyright (C) 2010, Wei Yongjun <yjwei@cn.fujitsu.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/socket.h> #include <linux/sctp.h> #include <linux/proc_fs.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/kfifo.h> #include <linux/time.h> #include <net/net_namespace.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>"); MODULE_DESCRIPTION("SCTP snooper"); MODULE_LICENSE("GPL"); static int port __read_mostly = 0; MODULE_PARM_DESC(port, "Port to match (0=all)"); module_param(port, int, 0); static int bufsize __read_mostly = 64 * 1024; MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)"); module_param(bufsize, int, 0); static int full __read_mostly = 1; MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); module_param(full, int, 0); static const char procname[] = "sctpprobe"; static struct { struct kfifo fifo; spinlock_t lock; wait_queue_head_t wait; struct timespec tstart; } sctpw; static __printf(1, 2) void printl(const char *fmt, ...) { va_list args; int len; char tbuf[256]; va_start(args, fmt); len = vscnprintf(tbuf, sizeof(tbuf), fmt, args); va_end(args); kfifo_in_locked(&sctpw.fifo, tbuf, len, &sctpw.lock); wake_up(&sctpw.wait); } static int sctpprobe_open(struct inode *inode, struct file *file) { kfifo_reset(&sctpw.fifo); getnstimeofday(&sctpw.tstart); return 0; } static ssize_t sctpprobe_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { int error = 0, cnt = 0; unsigned char *tbuf; if (!buf) return -EINVAL; if (len == 0) return 0; tbuf = vmalloc(len); if (!tbuf) return -ENOMEM; error = wait_event_interruptible(sctpw.wait, kfifo_len(&sctpw.fifo) != 0); if (error) goto out_free; cnt = kfifo_out_locked(&sctpw.fifo, tbuf, len, &sctpw.lock); error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0; out_free: vfree(tbuf); return error ? error : cnt; } static const struct file_operations sctpprobe_fops = { .owner = THIS_MODULE, .open = sctpprobe_open, .read = sctpprobe_read, .llseek = noop_llseek, }; static sctp_disposition_t jsctp_sf_eat_sack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_transport *sp; static __u32 lcwnd = 0; struct timespec now; sp = asoc->peer.primary_path; if ((full || sp->cwnd != lcwnd) && (!port || asoc->peer.port == port || ep->base.bind_addr.port == port)) { lcwnd = sp->cwnd; getnstimeofday(&now); now = timespec_sub(now, sctpw.tstart); printl("%lu.%06lu ", (unsigned long) now.tv_sec, (unsigned long) now.tv_nsec / NSEC_PER_USEC); printl("%p %5d %5d %5d %8d %5d ", asoc, ep->base.bind_addr.port, asoc->peer.port, asoc->pathmtu, asoc->peer.rwnd, asoc->unack_data); list_for_each_entry(sp, &asoc->peer.transport_addr_list, transports) { if (sp == asoc->peer.primary_path) printl("*"); if (sp->ipaddr.sa.sa_family == AF_INET) printl("%pI4 ", &sp->ipaddr.v4.sin_addr); else printl("%pI6 ", &sp->ipaddr.v6.sin6_addr); printl("%2u %8u %8u %8u %8u %8u ", sp->state, sp->cwnd, sp->ssthresh, sp->flight_size, sp->partial_bytes_acked, sp->pathmtu); } printl("\n"); } jprobe_return(); return 0; } static struct jprobe sctp_recv_probe = { .kp = { .symbol_name = "sctp_sf_eat_sack_6_2", }, .entry = jsctp_sf_eat_sack, }; static __init int sctpprobe_init(void) { int ret = -ENOMEM; /* Warning: if the function signature of sctp_sf_eat_sack_6_2, * has been changed, you also have to change the signature of * jsctp_sf_eat_sack, otherwise you end up right here! */ BUILD_BUG_ON(__same_type(sctp_sf_eat_sack_6_2, jsctp_sf_eat_sack) == 0); init_waitqueue_head(&sctpw.wait); spin_lock_init(&sctpw.lock); if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL)) return ret; if (!proc_create(procname, S_IRUSR, init_net.proc_net, &sctpprobe_fops)) goto free_kfifo; ret = register_jprobe(&sctp_recv_probe); if (ret) goto remove_proc; pr_info("probe registered (port=%d)\n", port); return 0; remove_proc: remove_proc_entry(procname, init_net.proc_net); free_kfifo: kfifo_free(&sctpw.fifo); return ret; } static __exit void sctpprobe_exit(void) { kfifo_free(&sctpw.fifo); remove_proc_entry(procname, init_net.proc_net); unregister_jprobe(&sctp_recv_probe); } module_init(sctpprobe_init); module_exit(sctpprobe_exit);
gpl-2.0
profglavcho/mt6577-kernel-3.10.65
drivers/misc/phantom.c
2411
13574
/* * Copyright (C) 2005-2007 Jiri Slaby <jirislaby@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * You need a userspace library to cooperate with this driver. It (and other * info) may be obtained here: * http://www.fi.muni.cz/~xslaby/phantom.html * or alternatively, you might use OpenHaptics provided by Sensable. */ #include <linux/compat.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/pci.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/interrupt.h> #include <linux/cdev.h> #include <linux/slab.h> #include <linux/phantom.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/atomic.h> #include <asm/io.h> #define PHANTOM_VERSION "n0.9.8" #define PHANTOM_MAX_MINORS 8 #define PHN_IRQCTL 0x4c /* irq control in caddr space */ #define PHB_RUNNING 1 #define PHB_NOT_OH 2 static DEFINE_MUTEX(phantom_mutex); static struct class *phantom_class; static int phantom_major; struct phantom_device { unsigned int opened; void __iomem *caddr; u32 __iomem *iaddr; u32 __iomem *oaddr; unsigned long status; atomic_t counter; wait_queue_head_t wait; struct cdev cdev; struct mutex open_lock; spinlock_t regs_lock; /* used in NOT_OH mode */ struct phm_regs oregs; u32 ctl_reg; }; static unsigned char phantom_devices[PHANTOM_MAX_MINORS]; static int phantom_status(struct phantom_device *dev, unsigned long newstat) { pr_debug("phantom_status %lx %lx\n", dev->status, newstat); if (!(dev->status & PHB_RUNNING) && (newstat & PHB_RUNNING)) { atomic_set(&dev->counter, 0); iowrite32(PHN_CTL_IRQ, dev->iaddr + PHN_CONTROL); iowrite32(0x43, dev->caddr + PHN_IRQCTL); ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */ } else if ((dev->status & PHB_RUNNING) && !(newstat & PHB_RUNNING)) { iowrite32(0, dev->caddr + PHN_IRQCTL); ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */ } dev->status = newstat; return 0; } /* * File ops */ static long phantom_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct phantom_device *dev = file->private_data; struct phm_regs rs; struct phm_reg r; void __user *argp = (void __user *)arg; unsigned long flags; unsigned int i; switch (cmd) { case PHN_SETREG: case PHN_SET_REG: if (copy_from_user(&r, argp, sizeof(r))) return -EFAULT; if (r.reg > 7) return -EINVAL; spin_lock_irqsave(&dev->regs_lock, flags); if (r.reg == PHN_CONTROL && (r.value & PHN_CTL_IRQ) && phantom_status(dev, dev->status | PHB_RUNNING)){ spin_unlock_irqrestore(&dev->regs_lock, flags); return -ENODEV; } pr_debug("phantom: writing %x to %u\n", r.value, r.reg); /* preserve amp bit (don't allow to change it when in NOT_OH) */ if (r.reg == PHN_CONTROL && (dev->status & PHB_NOT_OH)) { r.value &= ~PHN_CTL_AMP; r.value |= dev->ctl_reg & PHN_CTL_AMP; dev->ctl_reg = r.value; } iowrite32(r.value, dev->iaddr + r.reg); ioread32(dev->iaddr); /* PCI posting */ if (r.reg == PHN_CONTROL && !(r.value & PHN_CTL_IRQ)) phantom_status(dev, dev->status & ~PHB_RUNNING); spin_unlock_irqrestore(&dev->regs_lock, flags); break; case PHN_SETREGS: case PHN_SET_REGS: if (copy_from_user(&rs, argp, sizeof(rs))) return -EFAULT; pr_debug("phantom: SRS %u regs %x\n", rs.count, rs.mask); spin_lock_irqsave(&dev->regs_lock, flags); if (dev->status & PHB_NOT_OH) memcpy(&dev->oregs, &rs, sizeof(rs)); else { u32 m = min(rs.count, 8U); for (i = 0; i < m; i++) if (rs.mask & BIT(i)) iowrite32(rs.values[i], dev->oaddr + i); ioread32(dev->iaddr); /* PCI posting */ } spin_unlock_irqrestore(&dev->regs_lock, flags); break; case PHN_GETREG: case PHN_GET_REG: if (copy_from_user(&r, argp, sizeof(r))) return -EFAULT; if (r.reg > 7) return -EINVAL; r.value = ioread32(dev->iaddr + r.reg); if (copy_to_user(argp, &r, sizeof(r))) return -EFAULT; break; case PHN_GETREGS: case PHN_GET_REGS: { u32 m; if (copy_from_user(&rs, argp, sizeof(rs))) return -EFAULT; m = min(rs.count, 8U); pr_debug("phantom: GRS %u regs %x\n", rs.count, rs.mask); spin_lock_irqsave(&dev->regs_lock, flags); for (i = 0; i < m; i++) if (rs.mask & BIT(i)) rs.values[i] = ioread32(dev->iaddr + i); atomic_set(&dev->counter, 0); spin_unlock_irqrestore(&dev->regs_lock, flags); if (copy_to_user(argp, &rs, sizeof(rs))) return -EFAULT; break; } case PHN_NOT_OH: spin_lock_irqsave(&dev->regs_lock, flags); if (dev->status & PHB_RUNNING) { printk(KERN_ERR "phantom: you need to set NOT_OH " "before you start the device!\n"); spin_unlock_irqrestore(&dev->regs_lock, flags); return -EINVAL; } dev->status |= PHB_NOT_OH; spin_unlock_irqrestore(&dev->regs_lock, flags); break; default: return -ENOTTY; } return 0; } #ifdef CONFIG_COMPAT static long phantom_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { if (_IOC_NR(cmd) <= 3 && _IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { cmd &= ~(_IOC_SIZEMASK << _IOC_SIZESHIFT); cmd |= sizeof(void *) << _IOC_SIZESHIFT; } return phantom_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); } #else #define phantom_compat_ioctl NULL #endif static int phantom_open(struct inode *inode, struct file *file) { struct phantom_device *dev = container_of(inode->i_cdev, struct phantom_device, cdev); mutex_lock(&phantom_mutex); nonseekable_open(inode, file); if (mutex_lock_interruptible(&dev->open_lock)) { mutex_unlock(&phantom_mutex); return -ERESTARTSYS; } if (dev->opened) { mutex_unlock(&dev->open_lock); mutex_unlock(&phantom_mutex); return -EINVAL; } WARN_ON(dev->status & PHB_NOT_OH); file->private_data = dev; atomic_set(&dev->counter, 0); dev->opened++; mutex_unlock(&dev->open_lock); mutex_unlock(&phantom_mutex); return 0; } static int phantom_release(struct inode *inode, struct file *file) { struct phantom_device *dev = file->private_data; mutex_lock(&dev->open_lock); dev->opened = 0; phantom_status(dev, dev->status & ~PHB_RUNNING); dev->status &= ~PHB_NOT_OH; mutex_unlock(&dev->open_lock); return 0; } static unsigned int phantom_poll(struct file *file, poll_table *wait) { struct phantom_device *dev = file->private_data; unsigned int mask = 0; pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter)); poll_wait(file, &dev->wait, wait); if (!(dev->status & PHB_RUNNING)) mask = POLLERR; else if (atomic_read(&dev->counter)) mask = POLLIN | POLLRDNORM; pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter)); return mask; } static const struct file_operations phantom_file_ops = { .open = phantom_open, .release = phantom_release, .unlocked_ioctl = phantom_ioctl, .compat_ioctl = phantom_compat_ioctl, .poll = phantom_poll, .llseek = no_llseek, }; static irqreturn_t phantom_isr(int irq, void *data) { struct phantom_device *dev = data; unsigned int i; u32 ctl; spin_lock(&dev->regs_lock); ctl = ioread32(dev->iaddr + PHN_CONTROL); if (!(ctl & PHN_CTL_IRQ)) { spin_unlock(&dev->regs_lock); return IRQ_NONE; } iowrite32(0, dev->iaddr); iowrite32(0xc0, dev->iaddr); if (dev->status & PHB_NOT_OH) { struct phm_regs *r = &dev->oregs; u32 m = min(r->count, 8U); for (i = 0; i < m; i++) if (r->mask & BIT(i)) iowrite32(r->values[i], dev->oaddr + i); dev->ctl_reg ^= PHN_CTL_AMP; iowrite32(dev->ctl_reg, dev->iaddr + PHN_CONTROL); } spin_unlock(&dev->regs_lock); ioread32(dev->iaddr); /* PCI posting */ atomic_inc(&dev->counter); wake_up_interruptible(&dev->wait); return IRQ_HANDLED; } /* * Init and deinit driver */ static unsigned int phantom_get_free(void) { unsigned int i; for (i = 0; i < PHANTOM_MAX_MINORS; i++) if (phantom_devices[i] == 0) break; return i; } static int phantom_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { struct phantom_device *pht; unsigned int minor; int retval; retval = pci_enable_device(pdev); if (retval) { dev_err(&pdev->dev, "pci_enable_device failed!\n"); goto err; } minor = phantom_get_free(); if (minor == PHANTOM_MAX_MINORS) { dev_err(&pdev->dev, "too many devices found!\n"); retval = -EIO; goto err_dis; } phantom_devices[minor] = 1; retval = pci_request_regions(pdev, "phantom"); if (retval) { dev_err(&pdev->dev, "pci_request_regions failed!\n"); goto err_null; } retval = -ENOMEM; pht = kzalloc(sizeof(*pht), GFP_KERNEL); if (pht == NULL) { dev_err(&pdev->dev, "unable to allocate device\n"); goto err_reg; } pht->caddr = pci_iomap(pdev, 0, 0); if (pht->caddr == NULL) { dev_err(&pdev->dev, "can't remap conf space\n"); goto err_fr; } pht->iaddr = pci_iomap(pdev, 2, 0); if (pht->iaddr == NULL) { dev_err(&pdev->dev, "can't remap input space\n"); goto err_unmc; } pht->oaddr = pci_iomap(pdev, 3, 0); if (pht->oaddr == NULL) { dev_err(&pdev->dev, "can't remap output space\n"); goto err_unmi; } mutex_init(&pht->open_lock); spin_lock_init(&pht->regs_lock); init_waitqueue_head(&pht->wait); cdev_init(&pht->cdev, &phantom_file_ops); pht->cdev.owner = THIS_MODULE; iowrite32(0, pht->caddr + PHN_IRQCTL); ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */ retval = request_irq(pdev->irq, phantom_isr, IRQF_SHARED | IRQF_DISABLED, "phantom", pht); if (retval) { dev_err(&pdev->dev, "can't establish ISR\n"); goto err_unmo; } retval = cdev_add(&pht->cdev, MKDEV(phantom_major, minor), 1); if (retval) { dev_err(&pdev->dev, "chardev registration failed\n"); goto err_irq; } if (IS_ERR(device_create(phantom_class, &pdev->dev, MKDEV(phantom_major, minor), NULL, "phantom%u", minor))) dev_err(&pdev->dev, "can't create device\n"); pci_set_drvdata(pdev, pht); return 0; err_irq: free_irq(pdev->irq, pht); err_unmo: pci_iounmap(pdev, pht->oaddr); err_unmi: pci_iounmap(pdev, pht->iaddr); err_unmc: pci_iounmap(pdev, pht->caddr); err_fr: kfree(pht); err_reg: pci_release_regions(pdev); err_null: phantom_devices[minor] = 0; err_dis: pci_disable_device(pdev); err: return retval; } static void phantom_remove(struct pci_dev *pdev) { struct phantom_device *pht = pci_get_drvdata(pdev); unsigned int minor = MINOR(pht->cdev.dev); device_destroy(phantom_class, MKDEV(phantom_major, minor)); cdev_del(&pht->cdev); iowrite32(0, pht->caddr + PHN_IRQCTL); ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */ free_irq(pdev->irq, pht); pci_iounmap(pdev, pht->oaddr); pci_iounmap(pdev, pht->iaddr); pci_iounmap(pdev, pht->caddr); kfree(pht); pci_release_regions(pdev); phantom_devices[minor] = 0; pci_disable_device(pdev); } #ifdef CONFIG_PM static int phantom_suspend(struct pci_dev *pdev, pm_message_t state) { struct phantom_device *dev = pci_get_drvdata(pdev); iowrite32(0, dev->caddr + PHN_IRQCTL); ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */ synchronize_irq(pdev->irq); return 0; } static int phantom_resume(struct pci_dev *pdev) { struct phantom_device *dev = pci_get_drvdata(pdev); iowrite32(0, dev->caddr + PHN_IRQCTL); return 0; } #else #define phantom_suspend NULL #define phantom_resume NULL #endif static struct pci_device_id phantom_pci_tbl[] = { { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050, .subvendor = PCI_VENDOR_ID_PLX, .subdevice = PCI_DEVICE_ID_PLX_9050, .class = PCI_CLASS_BRIDGE_OTHER << 8, .class_mask = 0xffff00 }, { 0, } }; MODULE_DEVICE_TABLE(pci, phantom_pci_tbl); static struct pci_driver phantom_pci_driver = { .name = "phantom", .id_table = phantom_pci_tbl, .probe = phantom_probe, .remove = phantom_remove, .suspend = phantom_suspend, .resume = phantom_resume }; static CLASS_ATTR_STRING(version, 0444, PHANTOM_VERSION); static int __init phantom_init(void) { int retval; dev_t dev; phantom_class = class_create(THIS_MODULE, "phantom"); if (IS_ERR(phantom_class)) { retval = PTR_ERR(phantom_class); printk(KERN_ERR "phantom: can't register phantom class\n"); goto err; } retval = class_create_file(phantom_class, &class_attr_version.attr); if (retval) { printk(KERN_ERR "phantom: can't create sysfs version file\n"); goto err_class; } retval = alloc_chrdev_region(&dev, 0, PHANTOM_MAX_MINORS, "phantom"); if (retval) { printk(KERN_ERR "phantom: can't register character device\n"); goto err_attr; } phantom_major = MAJOR(dev); retval = pci_register_driver(&phantom_pci_driver); if (retval) { printk(KERN_ERR "phantom: can't register pci driver\n"); goto err_unchr; } printk(KERN_INFO "Phantom Linux Driver, version " PHANTOM_VERSION ", " "init OK\n"); return 0; err_unchr: unregister_chrdev_region(dev, PHANTOM_MAX_MINORS); err_attr: class_remove_file(phantom_class, &class_attr_version.attr); err_class: class_destroy(phantom_class); err: return retval; } static void __exit phantom_exit(void) { pci_unregister_driver(&phantom_pci_driver); unregister_chrdev_region(MKDEV(phantom_major, 0), PHANTOM_MAX_MINORS); class_remove_file(phantom_class, &class_attr_version.attr); class_destroy(phantom_class); pr_debug("phantom: module successfully removed\n"); } module_init(phantom_init); module_exit(phantom_exit); MODULE_AUTHOR("Jiri Slaby <jirislaby@gmail.com>"); MODULE_DESCRIPTION("Sensable Phantom driver (PCI devices)"); MODULE_LICENSE("GPL"); MODULE_VERSION(PHANTOM_VERSION);
gpl-2.0
aatjitra/M7
net/sunrpc/auth_gss/auth_gss.c
4715
45149
/* * linux/net/sunrpc/auth_gss/auth_gss.c * * RPCSEC_GSS client authentication. * * Copyright (c) 2000 The Regents of the University of Michigan. * All rights reserved. * * Dug Song <dugsong@monkey.org> * Andy Adamson <andros@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/pagemap.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/auth.h> #include <linux/sunrpc/auth_gss.h> #include <linux/sunrpc/svcauth_gss.h> #include <linux/sunrpc/gss_err.h> #include <linux/workqueue.h> #include <linux/sunrpc/rpc_pipe_fs.h> #include <linux/sunrpc/gss_api.h> #include <asm/uaccess.h> static const struct rpc_authops authgss_ops; static const struct rpc_credops gss_credops; static const struct rpc_credops gss_nullops; #define GSS_RETRY_EXPIRED 5 static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED; #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH #endif #define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2) /* length of a krb5 verifier (48), plus data added before arguments when * using integrity (two 4-byte integers): */ #define GSS_VERF_SLACK 100 struct gss_auth { struct kref kref; struct rpc_auth rpc_auth; struct gss_api_mech *mech; enum rpc_gss_svc service; struct rpc_clnt *client; /* * There are two upcall pipes; dentry[1], named "gssd", is used * for the new text-based upcall; dentry[0] is named after the * mechanism (for example, "krb5") and exists for * backwards-compatibility with older gssd's. */ struct rpc_pipe *pipe[2]; }; /* pipe_version >= 0 if and only if someone has a pipe open. */ static int pipe_version = -1; static atomic_t pipe_users = ATOMIC_INIT(0); static DEFINE_SPINLOCK(pipe_version_lock); static struct rpc_wait_queue pipe_version_rpc_waitqueue; static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); static void gss_free_ctx(struct gss_cl_ctx *); static const struct rpc_pipe_ops gss_upcall_ops_v0; static const struct rpc_pipe_ops gss_upcall_ops_v1; static inline struct gss_cl_ctx * gss_get_ctx(struct gss_cl_ctx *ctx) { atomic_inc(&ctx->count); return ctx; } static inline void gss_put_ctx(struct gss_cl_ctx *ctx) { if (atomic_dec_and_test(&ctx->count)) gss_free_ctx(ctx); } /* gss_cred_set_ctx: * called by gss_upcall_callback and gss_create_upcall in order * to set the gss context. The actual exchange of an old context * and a new one is protected by the pipe->lock. */ static void gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) { struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) return; gss_get_ctx(ctx); rcu_assign_pointer(gss_cred->gc_ctx, ctx); set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); smp_mb__before_clear_bit(); clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); } static const void * simple_get_bytes(const void *p, const void *end, void *res, size_t len) { const void *q = (const void *)((const char *)p + len); if (unlikely(q > end || q < p)) return ERR_PTR(-EFAULT); memcpy(res, p, len); return q; } static inline const void * simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) { const void *q; unsigned int len; p = simple_get_bytes(p, end, &len, sizeof(len)); if (IS_ERR(p)) return p; q = (const void *)((const char *)p + len); if (unlikely(q > end || q < p)) return ERR_PTR(-EFAULT); dest->data = kmemdup(p, len, GFP_NOFS); if (unlikely(dest->data == NULL)) return ERR_PTR(-ENOMEM); dest->len = len; return q; } static struct gss_cl_ctx * gss_cred_get_ctx(struct rpc_cred *cred) { struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_cl_ctx *ctx = NULL; rcu_read_lock(); if (gss_cred->gc_ctx) ctx = gss_get_ctx(gss_cred->gc_ctx); rcu_read_unlock(); return ctx; } static struct gss_cl_ctx * gss_alloc_context(void) { struct gss_cl_ctx *ctx; ctx = kzalloc(sizeof(*ctx), GFP_NOFS); if (ctx != NULL) { ctx->gc_proc = RPC_GSS_PROC_DATA; ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ spin_lock_init(&ctx->gc_seq_lock); atomic_set(&ctx->count,1); } return ctx; } #define GSSD_MIN_TIMEOUT (60 * 60) static const void * gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm) { const void *q; unsigned int seclen; unsigned int timeout; u32 window_size; int ret; /* First unsigned int gives the lifetime (in seconds) of the cred */ p = simple_get_bytes(p, end, &timeout, sizeof(timeout)); if (IS_ERR(p)) goto err; if (timeout == 0) timeout = GSSD_MIN_TIMEOUT; ctx->gc_expiry = jiffies + (unsigned long)timeout * HZ * 3 / 4; /* Sequence number window. Determines the maximum number of simultaneous requests */ p = simple_get_bytes(p, end, &window_size, sizeof(window_size)); if (IS_ERR(p)) goto err; ctx->gc_win = window_size; /* gssd signals an error by passing ctx->gc_win = 0: */ if (ctx->gc_win == 0) { /* * in which case, p points to an error code. Anything other * than -EKEYEXPIRED gets converted to -EACCES. */ p = simple_get_bytes(p, end, &ret, sizeof(ret)); if (!IS_ERR(p)) p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) : ERR_PTR(-EACCES); goto err; } /* copy the opaque wire context */ p = simple_get_netobj(p, end, &ctx->gc_wire_ctx); if (IS_ERR(p)) goto err; /* import the opaque security context */ p = simple_get_bytes(p, end, &seclen, sizeof(seclen)); if (IS_ERR(p)) goto err; q = (const void *)((const char *)p + seclen); if (unlikely(q > end || q < p)) { p = ERR_PTR(-EFAULT); goto err; } ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, GFP_NOFS); if (ret < 0) { p = ERR_PTR(ret); goto err; } return q; err: dprintk("RPC: gss_fill_context returning %ld\n", -PTR_ERR(p)); return p; } #define UPCALL_BUF_LEN 128 struct gss_upcall_msg { atomic_t count; uid_t uid; struct rpc_pipe_msg msg; struct list_head list; struct gss_auth *auth; struct rpc_pipe *pipe; struct rpc_wait_queue rpc_waitqueue; wait_queue_head_t waitqueue; struct gss_cl_ctx *ctx; char databuf[UPCALL_BUF_LEN]; }; static int get_pipe_version(void) { int ret; spin_lock(&pipe_version_lock); if (pipe_version >= 0) { atomic_inc(&pipe_users); ret = pipe_version; } else ret = -EAGAIN; spin_unlock(&pipe_version_lock); return ret; } static void put_pipe_version(void) { if (atomic_dec_and_lock(&pipe_users, &pipe_version_lock)) { pipe_version = -1; spin_unlock(&pipe_version_lock); } } static void gss_release_msg(struct gss_upcall_msg *gss_msg) { if (!atomic_dec_and_test(&gss_msg->count)) return; put_pipe_version(); BUG_ON(!list_empty(&gss_msg->list)); if (gss_msg->ctx != NULL) gss_put_ctx(gss_msg->ctx); rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue); kfree(gss_msg); } static struct gss_upcall_msg * __gss_find_upcall(struct rpc_pipe *pipe, uid_t uid) { struct gss_upcall_msg *pos; list_for_each_entry(pos, &pipe->in_downcall, list) { if (pos->uid != uid) continue; atomic_inc(&pos->count); dprintk("RPC: gss_find_upcall found msg %p\n", pos); return pos; } dprintk("RPC: gss_find_upcall found nothing\n"); return NULL; } /* Try to add an upcall to the pipefs queue. * If an upcall owned by our uid already exists, then we return a reference * to that upcall instead of adding the new upcall. */ static inline struct gss_upcall_msg * gss_add_msg(struct gss_upcall_msg *gss_msg) { struct rpc_pipe *pipe = gss_msg->pipe; struct gss_upcall_msg *old; spin_lock(&pipe->lock); old = __gss_find_upcall(pipe, gss_msg->uid); if (old == NULL) { atomic_inc(&gss_msg->count); list_add(&gss_msg->list, &pipe->in_downcall); } else gss_msg = old; spin_unlock(&pipe->lock); return gss_msg; } static void __gss_unhash_msg(struct gss_upcall_msg *gss_msg) { list_del_init(&gss_msg->list); rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); wake_up_all(&gss_msg->waitqueue); atomic_dec(&gss_msg->count); } static void gss_unhash_msg(struct gss_upcall_msg *gss_msg) { struct rpc_pipe *pipe = gss_msg->pipe; if (list_empty(&gss_msg->list)) return; spin_lock(&pipe->lock); if (!list_empty(&gss_msg->list)) __gss_unhash_msg(gss_msg); spin_unlock(&pipe->lock); } static void gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg) { switch (gss_msg->msg.errno) { case 0: if (gss_msg->ctx == NULL) break; clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx); break; case -EKEYEXPIRED: set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); } gss_cred->gc_upcall_timestamp = jiffies; gss_cred->gc_upcall = NULL; rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); } static void gss_upcall_callback(struct rpc_task *task) { struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred, struct gss_cred, gc_base); struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; struct rpc_pipe *pipe = gss_msg->pipe; spin_lock(&pipe->lock); gss_handle_downcall_result(gss_cred, gss_msg); spin_unlock(&pipe->lock); task->tk_status = gss_msg->msg.errno; gss_release_msg(gss_msg); } static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg) { gss_msg->msg.data = &gss_msg->uid; gss_msg->msg.len = sizeof(gss_msg->uid); } static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, struct rpc_clnt *clnt, const char *service_name) { struct gss_api_mech *mech = gss_msg->auth->mech; char *p = gss_msg->databuf; int len = 0; gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ", mech->gm_name, gss_msg->uid); p += gss_msg->msg.len; if (clnt->cl_principal) { len = sprintf(p, "target=%s ", clnt->cl_principal); p += len; gss_msg->msg.len += len; } if (service_name != NULL) { len = sprintf(p, "service=%s ", service_name); p += len; gss_msg->msg.len += len; } if (mech->gm_upcall_enctypes) { len = sprintf(p, "enctypes=%s ", mech->gm_upcall_enctypes); p += len; gss_msg->msg.len += len; } len = sprintf(p, "\n"); gss_msg->msg.len += len; gss_msg->msg.data = gss_msg->databuf; BUG_ON(gss_msg->msg.len > UPCALL_BUF_LEN); } static void gss_encode_msg(struct gss_upcall_msg *gss_msg, struct rpc_clnt *clnt, const char *service_name) { if (pipe_version == 0) gss_encode_v0_msg(gss_msg); else /* pipe_version == 1 */ gss_encode_v1_msg(gss_msg, clnt, service_name); } static struct gss_upcall_msg * gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt, uid_t uid, const char *service_name) { struct gss_upcall_msg *gss_msg; int vers; gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS); if (gss_msg == NULL) return ERR_PTR(-ENOMEM); vers = get_pipe_version(); if (vers < 0) { kfree(gss_msg); return ERR_PTR(vers); } gss_msg->pipe = gss_auth->pipe[vers]; INIT_LIST_HEAD(&gss_msg->list); rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); init_waitqueue_head(&gss_msg->waitqueue); atomic_set(&gss_msg->count, 1); gss_msg->uid = uid; gss_msg->auth = gss_auth; gss_encode_msg(gss_msg, clnt, service_name); return gss_msg; } static struct gss_upcall_msg * gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cred *cred) { struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_upcall_msg *gss_new, *gss_msg; uid_t uid = cred->cr_uid; gss_new = gss_alloc_msg(gss_auth, clnt, uid, gss_cred->gc_principal); if (IS_ERR(gss_new)) return gss_new; gss_msg = gss_add_msg(gss_new); if (gss_msg == gss_new) { int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg); if (res) { gss_unhash_msg(gss_new); gss_msg = ERR_PTR(res); } } else gss_release_msg(gss_new); return gss_msg; } static void warn_gssd(void) { static unsigned long ratelimit; unsigned long now = jiffies; if (time_after(now, ratelimit)) { printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n" "Please check user daemon is running.\n"); ratelimit = now + 15*HZ; } } static inline int gss_refresh_upcall(struct rpc_task *task) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_upcall_msg *gss_msg; struct rpc_pipe *pipe; int err = 0; dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, cred->cr_uid); gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); if (PTR_ERR(gss_msg) == -EAGAIN) { /* XXX: warning on the first, under the assumption we * shouldn't normally hit this case on a refresh. */ warn_gssd(); task->tk_timeout = 15*HZ; rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL); return -EAGAIN; } if (IS_ERR(gss_msg)) { err = PTR_ERR(gss_msg); goto out; } pipe = gss_msg->pipe; spin_lock(&pipe->lock); if (gss_cred->gc_upcall != NULL) rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { task->tk_timeout = 0; gss_cred->gc_upcall = gss_msg; /* gss_upcall_callback will release the reference to gss_upcall_msg */ atomic_inc(&gss_msg->count); rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback); } else { gss_handle_downcall_result(gss_cred, gss_msg); err = gss_msg->msg.errno; } spin_unlock(&pipe->lock); gss_release_msg(gss_msg); out: dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", task->tk_pid, cred->cr_uid, err); return err; } static inline int gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) { struct rpc_pipe *pipe; struct rpc_cred *cred = &gss_cred->gc_base; struct gss_upcall_msg *gss_msg; DEFINE_WAIT(wait); int err = 0; dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid); retry: gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); if (PTR_ERR(gss_msg) == -EAGAIN) { err = wait_event_interruptible_timeout(pipe_version_waitqueue, pipe_version >= 0, 15*HZ); if (pipe_version < 0) { warn_gssd(); err = -EACCES; } if (err) goto out; goto retry; } if (IS_ERR(gss_msg)) { err = PTR_ERR(gss_msg); goto out; } pipe = gss_msg->pipe; for (;;) { prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE); spin_lock(&pipe->lock); if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { break; } spin_unlock(&pipe->lock); if (fatal_signal_pending(current)) { err = -ERESTARTSYS; goto out_intr; } schedule(); } if (gss_msg->ctx) gss_cred_set_ctx(cred, gss_msg->ctx); else err = gss_msg->msg.errno; spin_unlock(&pipe->lock); out_intr: finish_wait(&gss_msg->waitqueue, &wait); gss_release_msg(gss_msg); out: dprintk("RPC: gss_create_upcall for uid %u result %d\n", cred->cr_uid, err); return err; } #define MSG_BUF_MAXSIZE 1024 static ssize_t gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) { const void *p, *end; void *buf; struct gss_upcall_msg *gss_msg; struct rpc_pipe *pipe = RPC_I(filp->f_dentry->d_inode)->pipe; struct gss_cl_ctx *ctx; uid_t uid; ssize_t err = -EFBIG; if (mlen > MSG_BUF_MAXSIZE) goto out; err = -ENOMEM; buf = kmalloc(mlen, GFP_NOFS); if (!buf) goto out; err = -EFAULT; if (copy_from_user(buf, src, mlen)) goto err; end = (const void *)((char *)buf + mlen); p = simple_get_bytes(buf, end, &uid, sizeof(uid)); if (IS_ERR(p)) { err = PTR_ERR(p); goto err; } err = -ENOMEM; ctx = gss_alloc_context(); if (ctx == NULL) goto err; err = -ENOENT; /* Find a matching upcall */ spin_lock(&pipe->lock); gss_msg = __gss_find_upcall(pipe, uid); if (gss_msg == NULL) { spin_unlock(&pipe->lock); goto err_put_ctx; } list_del_init(&gss_msg->list); spin_unlock(&pipe->lock); p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); if (IS_ERR(p)) { err = PTR_ERR(p); switch (err) { case -EACCES: case -EKEYEXPIRED: gss_msg->msg.errno = err; err = mlen; break; case -EFAULT: case -ENOMEM: case -EINVAL: case -ENOSYS: gss_msg->msg.errno = -EAGAIN; break; default: printk(KERN_CRIT "%s: bad return from " "gss_fill_context: %zd\n", __func__, err); BUG(); } goto err_release_msg; } gss_msg->ctx = gss_get_ctx(ctx); err = mlen; err_release_msg: spin_lock(&pipe->lock); __gss_unhash_msg(gss_msg); spin_unlock(&pipe->lock); gss_release_msg(gss_msg); err_put_ctx: gss_put_ctx(ctx); err: kfree(buf); out: dprintk("RPC: gss_pipe_downcall returning %Zd\n", err); return err; } static int gss_pipe_open(struct inode *inode, int new_version) { int ret = 0; spin_lock(&pipe_version_lock); if (pipe_version < 0) { /* First open of any gss pipe determines the version: */ pipe_version = new_version; rpc_wake_up(&pipe_version_rpc_waitqueue); wake_up(&pipe_version_waitqueue); } else if (pipe_version != new_version) { /* Trying to open a pipe of a different version */ ret = -EBUSY; goto out; } atomic_inc(&pipe_users); out: spin_unlock(&pipe_version_lock); return ret; } static int gss_pipe_open_v0(struct inode *inode) { return gss_pipe_open(inode, 0); } static int gss_pipe_open_v1(struct inode *inode) { return gss_pipe_open(inode, 1); } static void gss_pipe_release(struct inode *inode) { struct rpc_pipe *pipe = RPC_I(inode)->pipe; struct gss_upcall_msg *gss_msg; restart: spin_lock(&pipe->lock); list_for_each_entry(gss_msg, &pipe->in_downcall, list) { if (!list_empty(&gss_msg->msg.list)) continue; gss_msg->msg.errno = -EPIPE; atomic_inc(&gss_msg->count); __gss_unhash_msg(gss_msg); spin_unlock(&pipe->lock); gss_release_msg(gss_msg); goto restart; } spin_unlock(&pipe->lock); put_pipe_version(); } static void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) { struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg); if (msg->errno < 0) { dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n", gss_msg); atomic_inc(&gss_msg->count); gss_unhash_msg(gss_msg); if (msg->errno == -ETIMEDOUT) warn_gssd(); gss_release_msg(gss_msg); } } static void gss_pipes_dentries_destroy(struct rpc_auth *auth) { struct gss_auth *gss_auth; gss_auth = container_of(auth, struct gss_auth, rpc_auth); if (gss_auth->pipe[0]->dentry) rpc_unlink(gss_auth->pipe[0]->dentry); if (gss_auth->pipe[1]->dentry) rpc_unlink(gss_auth->pipe[1]->dentry); } static int gss_pipes_dentries_create(struct rpc_auth *auth) { int err; struct gss_auth *gss_auth; struct rpc_clnt *clnt; gss_auth = container_of(auth, struct gss_auth, rpc_auth); clnt = gss_auth->client; gss_auth->pipe[1]->dentry = rpc_mkpipe_dentry(clnt->cl_dentry, "gssd", clnt, gss_auth->pipe[1]); if (IS_ERR(gss_auth->pipe[1]->dentry)) return PTR_ERR(gss_auth->pipe[1]->dentry); gss_auth->pipe[0]->dentry = rpc_mkpipe_dentry(clnt->cl_dentry, gss_auth->mech->gm_name, clnt, gss_auth->pipe[0]); if (IS_ERR(gss_auth->pipe[0]->dentry)) { err = PTR_ERR(gss_auth->pipe[0]->dentry); goto err_unlink_pipe_1; } return 0; err_unlink_pipe_1: rpc_unlink(gss_auth->pipe[1]->dentry); return err; } static void gss_pipes_dentries_destroy_net(struct rpc_clnt *clnt, struct rpc_auth *auth) { struct net *net = rpc_net_ns(clnt); struct super_block *sb; sb = rpc_get_sb_net(net); if (sb) { if (clnt->cl_dentry) gss_pipes_dentries_destroy(auth); rpc_put_sb_net(net); } } static int gss_pipes_dentries_create_net(struct rpc_clnt *clnt, struct rpc_auth *auth) { struct net *net = rpc_net_ns(clnt); struct super_block *sb; int err = 0; sb = rpc_get_sb_net(net); if (sb) { if (clnt->cl_dentry) err = gss_pipes_dentries_create(auth); rpc_put_sb_net(net); } return err; } /* * NOTE: we have the opportunity to use different * parameters based on the input flavor (which must be a pseudoflavor) */ static struct rpc_auth * gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) { struct gss_auth *gss_auth; struct rpc_auth * auth; int err = -ENOMEM; /* XXX? */ dprintk("RPC: creating GSS authenticator for client %p\n", clnt); if (!try_module_get(THIS_MODULE)) return ERR_PTR(err); if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL))) goto out_dec; gss_auth->client = clnt; err = -EINVAL; gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor); if (!gss_auth->mech) { printk(KERN_WARNING "%s: Pseudoflavor %d not found!\n", __func__, flavor); goto err_free; } gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); if (gss_auth->service == 0) goto err_put_mech; auth = &gss_auth->rpc_auth; auth->au_cslack = GSS_CRED_SLACK >> 2; auth->au_rslack = GSS_VERF_SLACK >> 2; auth->au_ops = &authgss_ops; auth->au_flavor = flavor; atomic_set(&auth->au_count, 1); kref_init(&gss_auth->kref); /* * Note: if we created the old pipe first, then someone who * examined the directory at the right moment might conclude * that we supported only the old pipe. So we instead create * the new pipe first. */ gss_auth->pipe[1] = rpc_mkpipe_data(&gss_upcall_ops_v1, RPC_PIPE_WAIT_FOR_OPEN); if (IS_ERR(gss_auth->pipe[1])) { err = PTR_ERR(gss_auth->pipe[1]); goto err_put_mech; } gss_auth->pipe[0] = rpc_mkpipe_data(&gss_upcall_ops_v0, RPC_PIPE_WAIT_FOR_OPEN); if (IS_ERR(gss_auth->pipe[0])) { err = PTR_ERR(gss_auth->pipe[0]); goto err_destroy_pipe_1; } err = gss_pipes_dentries_create_net(clnt, auth); if (err) goto err_destroy_pipe_0; err = rpcauth_init_credcache(auth); if (err) goto err_unlink_pipes; return auth; err_unlink_pipes: gss_pipes_dentries_destroy_net(clnt, auth); err_destroy_pipe_0: rpc_destroy_pipe_data(gss_auth->pipe[0]); err_destroy_pipe_1: rpc_destroy_pipe_data(gss_auth->pipe[1]); err_put_mech: gss_mech_put(gss_auth->mech); err_free: kfree(gss_auth); out_dec: module_put(THIS_MODULE); return ERR_PTR(err); } static void gss_free(struct gss_auth *gss_auth) { gss_pipes_dentries_destroy_net(gss_auth->client, &gss_auth->rpc_auth); rpc_destroy_pipe_data(gss_auth->pipe[0]); rpc_destroy_pipe_data(gss_auth->pipe[1]); gss_mech_put(gss_auth->mech); kfree(gss_auth); module_put(THIS_MODULE); } static void gss_free_callback(struct kref *kref) { struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref); gss_free(gss_auth); } static void gss_destroy(struct rpc_auth *auth) { struct gss_auth *gss_auth; dprintk("RPC: destroying GSS authenticator %p flavor %d\n", auth, auth->au_flavor); rpcauth_destroy_credcache(auth); gss_auth = container_of(auth, struct gss_auth, rpc_auth); kref_put(&gss_auth->kref, gss_free_callback); } /* * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call * to the server with the GSS control procedure field set to * RPC_GSS_PROC_DESTROY. This should normally cause the server to release * all RPCSEC_GSS state associated with that context. */ static int gss_destroying_context(struct rpc_cred *cred) { struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); struct rpc_task *task; if (gss_cred->gc_ctx == NULL || test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) return 0; gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY; cred->cr_ops = &gss_nullops; /* Take a reference to ensure the cred will be destroyed either * by the RPC call or by the put_rpccred() below */ get_rpccred(cred); task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT); if (!IS_ERR(task)) rpc_put_task(task); put_rpccred(cred); return 1; } /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure * to create a new cred or context, so they check that things have been * allocated before freeing them. */ static void gss_do_free_ctx(struct gss_cl_ctx *ctx) { dprintk("RPC: gss_free_ctx\n"); gss_delete_sec_context(&ctx->gc_gss_ctx); kfree(ctx->gc_wire_ctx.data); kfree(ctx); } static void gss_free_ctx_callback(struct rcu_head *head) { struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu); gss_do_free_ctx(ctx); } static void gss_free_ctx(struct gss_cl_ctx *ctx) { call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); } static void gss_free_cred(struct gss_cred *gss_cred) { dprintk("RPC: gss_free_cred %p\n", gss_cred); kfree(gss_cred); } static void gss_free_cred_callback(struct rcu_head *head) { struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu); gss_free_cred(gss_cred); } static void gss_destroy_nullcred(struct rpc_cred *cred) { struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); struct gss_cl_ctx *ctx = gss_cred->gc_ctx; RCU_INIT_POINTER(gss_cred->gc_ctx, NULL); call_rcu(&cred->cr_rcu, gss_free_cred_callback); if (ctx) gss_put_ctx(ctx); kref_put(&gss_auth->kref, gss_free_callback); } static void gss_destroy_cred(struct rpc_cred *cred) { if (gss_destroying_context(cred)) return; gss_destroy_nullcred(cred); } /* * Lookup RPCSEC_GSS cred for the current process */ static struct rpc_cred * gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) { return rpcauth_lookup_credcache(auth, acred, flags); } static struct rpc_cred * gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) { struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); struct gss_cred *cred = NULL; int err = -ENOMEM; dprintk("RPC: gss_create_cred for uid %d, flavor %d\n", acred->uid, auth->au_flavor); if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS))) goto out_err; rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops); /* * Note: in order to force a call to call_refresh(), we deliberately * fail to flag the credential as RPCAUTH_CRED_UPTODATE. */ cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW; cred->gc_service = gss_auth->service; cred->gc_principal = NULL; if (acred->machine_cred) cred->gc_principal = acred->principal; kref_get(&gss_auth->kref); return &cred->gc_base; out_err: dprintk("RPC: gss_create_cred failed with error %d\n", err); return ERR_PTR(err); } static int gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred) { struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base); int err; do { err = gss_create_upcall(gss_auth, gss_cred); } while (err == -EAGAIN); return err; } static int gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags) { struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags)) goto out; /* Don't match with creds that have expired. */ if (time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) return 0; if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags)) return 0; out: if (acred->principal != NULL) { if (gss_cred->gc_principal == NULL) return 0; return strcmp(acred->principal, gss_cred->gc_principal) == 0; } if (gss_cred->gc_principal != NULL) return 0; return rc->cr_uid == acred->uid; } /* * Marshal credentials. * Maybe we should keep a cached credential for performance reasons. */ static __be32 * gss_marshal(struct rpc_task *task, __be32 *p) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_cred *cred = req->rq_cred; struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); __be32 *cred_len; u32 maj_stat = 0; struct xdr_netobj mic; struct kvec iov; struct xdr_buf verf_buf; dprintk("RPC: %5u gss_marshal\n", task->tk_pid); *p++ = htonl(RPC_AUTH_GSS); cred_len = p++; spin_lock(&ctx->gc_seq_lock); req->rq_seqno = ctx->gc_seq++; spin_unlock(&ctx->gc_seq_lock); *p++ = htonl((u32) RPC_GSS_VERSION); *p++ = htonl((u32) ctx->gc_proc); *p++ = htonl((u32) req->rq_seqno); *p++ = htonl((u32) gss_cred->gc_service); p = xdr_encode_netobj(p, &ctx->gc_wire_ctx); *cred_len = htonl((p - (cred_len + 1)) << 2); /* We compute the checksum for the verifier over the xdr-encoded bytes * starting with the xid and ending at the end of the credential: */ iov.iov_base = xprt_skip_transport_header(task->tk_xprt, req->rq_snd_buf.head[0].iov_base); iov.iov_len = (u8 *)p - (u8 *)iov.iov_base; xdr_buf_from_iov(&iov, &verf_buf); /* set verifier flavor*/ *p++ = htonl(RPC_AUTH_GSS); mic.data = (u8 *)(p + 1); maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) { clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); } else if (maj_stat != 0) { printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); goto out_put_ctx; } p = xdr_encode_opaque(p, NULL, mic.len); gss_put_ctx(ctx); return p; out_put_ctx: gss_put_ctx(ctx); return NULL; } static int gss_renew_cred(struct rpc_task *task) { struct rpc_cred *oldcred = task->tk_rqstp->rq_cred; struct gss_cred *gss_cred = container_of(oldcred, struct gss_cred, gc_base); struct rpc_auth *auth = oldcred->cr_auth; struct auth_cred acred = { .uid = oldcred->cr_uid, .principal = gss_cred->gc_principal, .machine_cred = (gss_cred->gc_principal != NULL ? 1 : 0), }; struct rpc_cred *new; new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW); if (IS_ERR(new)) return PTR_ERR(new); task->tk_rqstp->rq_cred = new; put_rpccred(oldcred); return 0; } static int gss_cred_is_negative_entry(struct rpc_cred *cred) { if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) { unsigned long now = jiffies; unsigned long begin, expire; struct gss_cred *gss_cred; gss_cred = container_of(cred, struct gss_cred, gc_base); begin = gss_cred->gc_upcall_timestamp; expire = begin + gss_expired_cred_retry_delay * HZ; if (time_in_range_open(now, begin, expire)) return 1; } return 0; } /* * Refresh credentials. XXX - finish */ static int gss_refresh(struct rpc_task *task) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; int ret = 0; if (gss_cred_is_negative_entry(cred)) return -EKEYEXPIRED; if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { ret = gss_renew_cred(task); if (ret < 0) goto out; cred = task->tk_rqstp->rq_cred; } if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) ret = gss_refresh_upcall(task); out: return ret; } /* Dummy refresh routine: used only when destroying the context */ static int gss_refresh_null(struct rpc_task *task) { return -EACCES; } static __be32 * gss_validate(struct rpc_task *task, __be32 *p) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); __be32 seq; struct kvec iov; struct xdr_buf verf_buf; struct xdr_netobj mic; u32 flav,len; u32 maj_stat; dprintk("RPC: %5u gss_validate\n", task->tk_pid); flav = ntohl(*p++); if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) goto out_bad; if (flav != RPC_AUTH_GSS) goto out_bad; seq = htonl(task->tk_rqstp->rq_seqno); iov.iov_base = &seq; iov.iov_len = sizeof(seq); xdr_buf_from_iov(&iov, &verf_buf); mic.data = (u8 *)p; mic.len = len; maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); if (maj_stat) { dprintk("RPC: %5u gss_validate: gss_verify_mic returned " "error 0x%08x\n", task->tk_pid, maj_stat); goto out_bad; } /* We leave it to unwrap to calculate au_rslack. For now we just * calculate the length of the verifier: */ cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; gss_put_ctx(ctx); dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n", task->tk_pid); return p + XDR_QUADLEN(len); out_bad: gss_put_ctx(ctx); dprintk("RPC: %5u gss_validate failed.\n", task->tk_pid); return NULL; } static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj) { struct xdr_stream xdr; xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p); encode(rqstp, &xdr, obj); } static inline int gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, kxdreproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj) { struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; struct xdr_buf integ_buf; __be32 *integ_len = NULL; struct xdr_netobj mic; u32 offset; __be32 *q; struct kvec *iov; u32 maj_stat = 0; int status = -EIO; integ_len = p++; offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; *p++ = htonl(rqstp->rq_seqno); gss_wrap_req_encode(encode, rqstp, p, obj); if (xdr_buf_subsegment(snd_buf, &integ_buf, offset, snd_buf->len - offset)) return status; *integ_len = htonl(integ_buf.len); /* guess whether we're in the head or the tail: */ if (snd_buf->page_len || snd_buf->tail[0].iov_len) iov = snd_buf->tail; else iov = snd_buf->head; p = iov->iov_base + iov->iov_len; mic.data = (u8 *)(p + 1); maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); status = -EIO; /* XXX? */ if (maj_stat == GSS_S_CONTEXT_EXPIRED) clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); else if (maj_stat) return status; q = xdr_encode_opaque(p, NULL, mic.len); offset = (u8 *)q - (u8 *)p; iov->iov_len += offset; snd_buf->len += offset; return 0; } static void priv_release_snd_buf(struct rpc_rqst *rqstp) { int i; for (i=0; i < rqstp->rq_enc_pages_num; i++) __free_page(rqstp->rq_enc_pages[i]); kfree(rqstp->rq_enc_pages); } static int alloc_enc_pages(struct rpc_rqst *rqstp) { struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; int first, last, i; if (snd_buf->page_len == 0) { rqstp->rq_enc_pages_num = 0; return 0; } first = snd_buf->page_base >> PAGE_CACHE_SHIFT; last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT; rqstp->rq_enc_pages_num = last - first + 1 + 1; rqstp->rq_enc_pages = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), GFP_NOFS); if (!rqstp->rq_enc_pages) goto out; for (i=0; i < rqstp->rq_enc_pages_num; i++) { rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS); if (rqstp->rq_enc_pages[i] == NULL) goto out_free; } rqstp->rq_release_snd_buf = priv_release_snd_buf; return 0; out_free: rqstp->rq_enc_pages_num = i; priv_release_snd_buf(rqstp); out: return -EAGAIN; } static inline int gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, kxdreproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj) { struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; u32 offset; u32 maj_stat; int status; __be32 *opaque_len; struct page **inpages; int first; int pad; struct kvec *iov; char *tmp; opaque_len = p++; offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; *p++ = htonl(rqstp->rq_seqno); gss_wrap_req_encode(encode, rqstp, p, obj); status = alloc_enc_pages(rqstp); if (status) return status; first = snd_buf->page_base >> PAGE_CACHE_SHIFT; inpages = snd_buf->pages + first; snd_buf->pages = rqstp->rq_enc_pages; snd_buf->page_base -= first << PAGE_CACHE_SHIFT; /* * Give the tail its own page, in case we need extra space in the * head when wrapping: * * call_allocate() allocates twice the slack space required * by the authentication flavor to rq_callsize. * For GSS, slack is GSS_CRED_SLACK. */ if (snd_buf->page_len || snd_buf->tail[0].iov_len) { tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); snd_buf->tail[0].iov_base = tmp; } maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); /* slack space should prevent this ever happening: */ BUG_ON(snd_buf->len > snd_buf->buflen); status = -EIO; /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was * done anyway, so it's safe to put the request on the wire: */ if (maj_stat == GSS_S_CONTEXT_EXPIRED) clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); else if (maj_stat) return status; *opaque_len = htonl(snd_buf->len - offset); /* guess whether we're in the head or the tail: */ if (snd_buf->page_len || snd_buf->tail[0].iov_len) iov = snd_buf->tail; else iov = snd_buf->head; p = iov->iov_base + iov->iov_len; pad = 3 - ((snd_buf->len - offset - 1) & 3); memset(p, 0, pad); iov->iov_len += pad; snd_buf->len += pad; return 0; } static int gss_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *p, void *obj) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); int status = -EIO; dprintk("RPC: %5u gss_wrap_req\n", task->tk_pid); if (ctx->gc_proc != RPC_GSS_PROC_DATA) { /* The spec seems a little ambiguous here, but I think that not * wrapping context destruction requests makes the most sense. */ gss_wrap_req_encode(encode, rqstp, p, obj); status = 0; goto out; } switch (gss_cred->gc_service) { case RPC_GSS_SVC_NONE: gss_wrap_req_encode(encode, rqstp, p, obj); status = 0; break; case RPC_GSS_SVC_INTEGRITY: status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj); break; case RPC_GSS_SVC_PRIVACY: status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj); break; } out: gss_put_ctx(ctx); dprintk("RPC: %5u gss_wrap_req returning %d\n", task->tk_pid, status); return status; } static inline int gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp, __be32 **p) { struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; struct xdr_buf integ_buf; struct xdr_netobj mic; u32 data_offset, mic_offset; u32 integ_len; u32 maj_stat; int status = -EIO; integ_len = ntohl(*(*p)++); if (integ_len & 3) return status; data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; mic_offset = integ_len + data_offset; if (mic_offset > rcv_buf->len) return status; if (ntohl(*(*p)++) != rqstp->rq_seqno) return status; if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, mic_offset - data_offset)) return status; if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset)) return status; maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); if (maj_stat != GSS_S_COMPLETE) return status; return 0; } static inline int gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp, __be32 **p) { struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; u32 offset; u32 opaque_len; u32 maj_stat; int status = -EIO; opaque_len = ntohl(*(*p)++); offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; if (offset + opaque_len > rcv_buf->len) return status; /* remove padding: */ rcv_buf->len = offset + opaque_len; maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); if (maj_stat == GSS_S_CONTEXT_EXPIRED) clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); if (maj_stat != GSS_S_COMPLETE) return status; if (ntohl(*(*p)++) != rqstp->rq_seqno) return status; return 0; } static int gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp, __be32 *p, void *obj) { struct xdr_stream xdr; xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); return decode(rqstp, &xdr, obj); } static int gss_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); __be32 *savedp = p; struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head; int savedlen = head->iov_len; int status = -EIO; if (ctx->gc_proc != RPC_GSS_PROC_DATA) goto out_decode; switch (gss_cred->gc_service) { case RPC_GSS_SVC_NONE: break; case RPC_GSS_SVC_INTEGRITY: status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p); if (status) goto out; break; case RPC_GSS_SVC_PRIVACY: status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); if (status) goto out; break; } /* take into account extra slack for integrity and privacy cases: */ cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) + (savedlen - head->iov_len); out_decode: status = gss_unwrap_req_decode(decode, rqstp, p, obj); out: gss_put_ctx(ctx); dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, status); return status; } static const struct rpc_authops authgss_ops = { .owner = THIS_MODULE, .au_flavor = RPC_AUTH_GSS, .au_name = "RPCSEC_GSS", .create = gss_create, .destroy = gss_destroy, .lookup_cred = gss_lookup_cred, .crcreate = gss_create_cred, .pipes_create = gss_pipes_dentries_create, .pipes_destroy = gss_pipes_dentries_destroy, }; static const struct rpc_credops gss_credops = { .cr_name = "AUTH_GSS", .crdestroy = gss_destroy_cred, .cr_init = gss_cred_init, .crbind = rpcauth_generic_bind_cred, .crmatch = gss_match, .crmarshal = gss_marshal, .crrefresh = gss_refresh, .crvalidate = gss_validate, .crwrap_req = gss_wrap_req, .crunwrap_resp = gss_unwrap_resp, }; static const struct rpc_credops gss_nullops = { .cr_name = "AUTH_GSS", .crdestroy = gss_destroy_nullcred, .crbind = rpcauth_generic_bind_cred, .crmatch = gss_match, .crmarshal = gss_marshal, .crrefresh = gss_refresh_null, .crvalidate = gss_validate, .crwrap_req = gss_wrap_req, .crunwrap_resp = gss_unwrap_resp, }; static const struct rpc_pipe_ops gss_upcall_ops_v0 = { .upcall = rpc_pipe_generic_upcall, .downcall = gss_pipe_downcall, .destroy_msg = gss_pipe_destroy_msg, .open_pipe = gss_pipe_open_v0, .release_pipe = gss_pipe_release, }; static const struct rpc_pipe_ops gss_upcall_ops_v1 = { .upcall = rpc_pipe_generic_upcall, .downcall = gss_pipe_downcall, .destroy_msg = gss_pipe_destroy_msg, .open_pipe = gss_pipe_open_v1, .release_pipe = gss_pipe_release, }; static __net_init int rpcsec_gss_init_net(struct net *net) { return gss_svc_init_net(net); } static __net_exit void rpcsec_gss_exit_net(struct net *net) { gss_svc_shutdown_net(net); } static struct pernet_operations rpcsec_gss_net_ops = { .init = rpcsec_gss_init_net, .exit = rpcsec_gss_exit_net, }; /* * Initialize RPCSEC_GSS module */ static int __init init_rpcsec_gss(void) { int err = 0; err = rpcauth_register(&authgss_ops); if (err) goto out; err = gss_svc_init(); if (err) goto out_unregister; err = register_pernet_subsys(&rpcsec_gss_net_ops); if (err) goto out_svc_exit; rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version"); return 0; out_svc_exit: gss_svc_shutdown(); out_unregister: rpcauth_unregister(&authgss_ops); out: return err; } static void __exit exit_rpcsec_gss(void) { unregister_pernet_subsys(&rpcsec_gss_net_ops); gss_svc_shutdown(); rpcauth_unregister(&authgss_ops); rcu_barrier(); /* Wait for completion of call_rcu()'s */ } MODULE_LICENSE("GPL"); module_param_named(expired_cred_retry_delay, gss_expired_cred_retry_delay, uint, 0644); MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until " "the RPC engine retries an expired credential"); module_init(init_rpcsec_gss) module_exit(exit_rpcsec_gss)
gpl-2.0
Phoenix-Kernel/android_kernel_lge_vee1
arch/mips/txx9/rbtx4939/setup.c
4715
14745
/* * Toshiba RBTX4939 setup routines. * Based on linux/arch/mips/txx9/rbtx4938/setup.c, * and RBTX49xx patch from CELF patch archive. * * Copyright (C) 2000-2001,2005-2007 Toshiba Corporation * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the * terms of the GNU General Public License version 2. This program is * licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/leds.h> #include <linux/interrupt.h> #include <linux/smc91x.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/map.h> #include <asm/reboot.h> #include <asm/txx9/generic.h> #include <asm/txx9/pci.h> #include <asm/txx9/rbtx4939.h> static void rbtx4939_machine_restart(char *command) { local_irq_disable(); writeb(1, rbtx4939_reseten_addr); writeb(1, rbtx4939_softreset_addr); while (1) ; } static void __init rbtx4939_time_init(void) { tx4939_time_init(0); } #if defined(__BIG_ENDIAN) && \ (defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)) #define HAVE_RBTX4939_IOSWAB #define IS_CE1_ADDR(addr) \ ((((unsigned long)(addr) - IO_BASE) & 0xfff00000) == TXX9_CE(1)) static u16 rbtx4939_ioswabw(volatile u16 *a, u16 x) { return IS_CE1_ADDR(a) ? x : le16_to_cpu(x); } static u16 rbtx4939_mem_ioswabw(volatile u16 *a, u16 x) { return !IS_CE1_ADDR(a) ? x : le16_to_cpu(x); } #endif /* __BIG_ENDIAN && CONFIG_SMC91X */ static void __init rbtx4939_pci_setup(void) { #ifdef CONFIG_PCI int extarb = !(__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_PCIARB); struct pci_controller *c = &txx9_primary_pcic; register_pci_controller(c); tx4939_report_pciclk(); tx4927_pcic_setup(tx4939_pcicptr, c, extarb); if (!(__raw_readq(&tx4939_ccfgptr->pcfg) & TX4939_PCFG_ATA1MODE) && (__raw_readq(&tx4939_ccfgptr->pcfg) & (TX4939_PCFG_ET0MODE | TX4939_PCFG_ET1MODE))) { tx4939_report_pci1clk(); /* mem:64K(max), io:64K(max) (enough for ETH0,ETH1) */ c = txx9_alloc_pci_controller(NULL, 0, 0x10000, 0, 0x10000); register_pci_controller(c); tx4927_pcic_setup(tx4939_pcic1ptr, c, 0); } tx4939_setup_pcierr_irq(); #endif /* CONFIG_PCI */ } static unsigned long long default_ebccr[] __initdata = { 0x01c0000000007608ULL, /* 64M ROM */ 0x017f000000007049ULL, /* 1M IOC */ 0x0180000000408608ULL, /* ISA */ 0, }; static void __init rbtx4939_ebusc_setup(void) { int i; unsigned int sp; /* use user-configured speed */ sp = TX4939_EBUSC_CR(0) & 0x30; default_ebccr[0] |= sp; default_ebccr[1] |= sp; default_ebccr[2] |= sp; /* initialise by myself */ for (i = 0; i < ARRAY_SIZE(default_ebccr); i++) { if (default_ebccr[i]) ____raw_writeq(default_ebccr[i], &tx4939_ebuscptr->cr[i]); else ____raw_writeq(____raw_readq(&tx4939_ebuscptr->cr[i]) & ~8, &tx4939_ebuscptr->cr[i]); } } static void __init rbtx4939_update_ioc_pen(void) { __u64 pcfg = ____raw_readq(&tx4939_ccfgptr->pcfg); __u64 ccfg = ____raw_readq(&tx4939_ccfgptr->ccfg); __u8 pe1 = readb(rbtx4939_pe1_addr); __u8 pe2 = readb(rbtx4939_pe2_addr); __u8 pe3 = readb(rbtx4939_pe3_addr); if (pcfg & TX4939_PCFG_ATA0MODE) pe1 |= RBTX4939_PE1_ATA(0); else pe1 &= ~RBTX4939_PE1_ATA(0); if (pcfg & TX4939_PCFG_ATA1MODE) { pe1 |= RBTX4939_PE1_ATA(1); pe1 &= ~(RBTX4939_PE1_RMII(0) | RBTX4939_PE1_RMII(1)); } else { pe1 &= ~RBTX4939_PE1_ATA(1); if (pcfg & TX4939_PCFG_ET0MODE) pe1 |= RBTX4939_PE1_RMII(0); else pe1 &= ~RBTX4939_PE1_RMII(0); if (pcfg & TX4939_PCFG_ET1MODE) pe1 |= RBTX4939_PE1_RMII(1); else pe1 &= ~RBTX4939_PE1_RMII(1); } if (ccfg & TX4939_CCFG_PTSEL) pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_P | RBTX4939_PE3_VP_S); else { __u64 vmode = pcfg & (TX4939_PCFG_VSSMODE | TX4939_PCFG_VPSMODE); if (vmode == 0) pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_P | RBTX4939_PE3_VP_S); else if (vmode == TX4939_PCFG_VPSMODE) { pe3 |= RBTX4939_PE3_VP_P; pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_S); } else if (vmode == TX4939_PCFG_VSSMODE) { pe3 |= RBTX4939_PE3_VP | RBTX4939_PE3_VP_S; pe3 &= ~RBTX4939_PE3_VP_P; } else { pe3 |= RBTX4939_PE3_VP | RBTX4939_PE3_VP_P; pe3 &= ~RBTX4939_PE3_VP_S; } } if (pcfg & TX4939_PCFG_SPIMODE) { if (pcfg & TX4939_PCFG_SIO2MODE_GPIO) pe2 &= ~(RBTX4939_PE2_SIO2 | RBTX4939_PE2_SIO0); else { if (pcfg & TX4939_PCFG_SIO2MODE_SIO2) { pe2 |= RBTX4939_PE2_SIO2; pe2 &= ~RBTX4939_PE2_SIO0; } else { pe2 |= RBTX4939_PE2_SIO0; pe2 &= ~RBTX4939_PE2_SIO2; } } if (pcfg & TX4939_PCFG_SIO3MODE) pe2 |= RBTX4939_PE2_SIO3; else pe2 &= ~RBTX4939_PE2_SIO3; pe2 &= ~RBTX4939_PE2_SPI; } else { pe2 |= RBTX4939_PE2_SPI; pe2 &= ~(RBTX4939_PE2_SIO3 | RBTX4939_PE2_SIO2 | RBTX4939_PE2_SIO0); } if ((pcfg & TX4939_PCFG_I2SMODE_MASK) == TX4939_PCFG_I2SMODE_GPIO) pe2 |= RBTX4939_PE2_GPIO; else pe2 &= ~RBTX4939_PE2_GPIO; writeb(pe1, rbtx4939_pe1_addr); writeb(pe2, rbtx4939_pe2_addr); writeb(pe3, rbtx4939_pe3_addr); } #define RBTX4939_MAX_7SEGLEDS 8 #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) static u8 led_val[RBTX4939_MAX_7SEGLEDS]; struct rbtx4939_led_data { struct led_classdev cdev; char name[32]; unsigned int num; }; /* Use "dot" in 7seg LEDs */ static void rbtx4939_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) { struct rbtx4939_led_data *led_dat = container_of(led_cdev, struct rbtx4939_led_data, cdev); unsigned int num = led_dat->num; unsigned long flags; local_irq_save(flags); led_val[num] = (led_val[num] & 0x7f) | (value ? 0x80 : 0); writeb(led_val[num], rbtx4939_7seg_addr(num / 4, num % 4)); local_irq_restore(flags); } static int __init rbtx4939_led_probe(struct platform_device *pdev) { struct rbtx4939_led_data *leds_data; int i; static char *default_triggers[] __initdata = { "heartbeat", "ide-disk", "nand-disk", }; leds_data = kzalloc(sizeof(*leds_data) * RBTX4939_MAX_7SEGLEDS, GFP_KERNEL); if (!leds_data) return -ENOMEM; for (i = 0; i < RBTX4939_MAX_7SEGLEDS; i++) { int rc; struct rbtx4939_led_data *led_dat = &leds_data[i]; led_dat->num = i; led_dat->cdev.brightness_set = rbtx4939_led_brightness_set; sprintf(led_dat->name, "rbtx4939:amber:%u", i); led_dat->cdev.name = led_dat->name; if (i < ARRAY_SIZE(default_triggers)) led_dat->cdev.default_trigger = default_triggers[i]; rc = led_classdev_register(&pdev->dev, &led_dat->cdev); if (rc < 0) return rc; led_dat->cdev.brightness_set(&led_dat->cdev, 0); } return 0; } static struct platform_driver rbtx4939_led_driver = { .driver = { .name = "rbtx4939-led", .owner = THIS_MODULE, }, }; static void __init rbtx4939_led_setup(void) { platform_device_register_simple("rbtx4939-led", -1, NULL, 0); platform_driver_probe(&rbtx4939_led_driver, rbtx4939_led_probe); } #else static inline void rbtx4939_led_setup(void) { } #endif static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val) { #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) unsigned long flags; local_irq_save(flags); /* bit7: reserved for LED class */ led_val[pos] = (led_val[pos] & 0x80) | (val & 0x7f); val = led_val[pos]; local_irq_restore(flags); #endif writeb(val, rbtx4939_7seg_addr(pos / 4, pos % 4)); } static void rbtx4939_7segled_putc(unsigned int pos, unsigned char val) { /* convert from map_to_seg7() notation */ val = (val & 0x88) | ((val & 0x40) >> 6) | ((val & 0x20) >> 4) | ((val & 0x10) >> 2) | ((val & 0x04) << 2) | ((val & 0x02) << 4) | ((val & 0x01) << 6); __rbtx4939_7segled_putc(pos, val); } #if defined(CONFIG_MTD_RBTX4939) || defined(CONFIG_MTD_RBTX4939_MODULE) /* special mapping for boot rom */ static unsigned long rbtx4939_flash_fixup_ofs(unsigned long ofs) { u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f; unsigned char shift; if (bdipsw & 8) { /* BOOT Mode: USER ROM1 / USER ROM2 */ shift = bdipsw & 3; /* rotate A[23:22] */ return (ofs & ~0xc00000) | ((((ofs >> 22) + shift) & 3) << 22); } #ifdef __BIG_ENDIAN if (bdipsw == 0) /* BOOT Mode: Monitor ROM */ ofs ^= 0x400000; /* swap A[22] */ #endif return ofs; } static map_word rbtx4939_flash_read16(struct map_info *map, unsigned long ofs) { map_word r; ofs = rbtx4939_flash_fixup_ofs(ofs); r.x[0] = __raw_readw(map->virt + ofs); return r; } static void rbtx4939_flash_write16(struct map_info *map, const map_word datum, unsigned long ofs) { ofs = rbtx4939_flash_fixup_ofs(ofs); __raw_writew(datum.x[0], map->virt + ofs); mb(); /* see inline_map_write() in mtd/map.h */ } static void rbtx4939_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f; unsigned char shift; ssize_t curlen; from += (unsigned long)map->virt; if (bdipsw & 8) { /* BOOT Mode: USER ROM1 / USER ROM2 */ shift = bdipsw & 3; while (len) { curlen = min_t(unsigned long, len, 0x400000 - (from & (0x400000 - 1))); memcpy(to, (void *)((from & ~0xc00000) | ((((from >> 22) + shift) & 3) << 22)), curlen); len -= curlen; from += curlen; to += curlen; } return; } #ifdef __BIG_ENDIAN if (bdipsw == 0) { /* BOOT Mode: Monitor ROM */ while (len) { curlen = min_t(unsigned long, len, 0x400000 - (from & (0x400000 - 1))); memcpy(to, (void *)(from ^ 0x400000), curlen); len -= curlen; from += curlen; to += curlen; } return; } #endif memcpy(to, (void *)from, len); } static void rbtx4939_flash_map_init(struct map_info *map) { map->read = rbtx4939_flash_read16; map->write = rbtx4939_flash_write16; map->copy_from = rbtx4939_flash_copy_from; } static void __init rbtx4939_mtd_init(void) { static struct { struct platform_device dev; struct resource res; struct rbtx4939_flash_data data; } pdevs[4]; int i; static char names[4][8]; static struct mtd_partition parts[4]; struct rbtx4939_flash_data *boot_pdata = &pdevs[0].data; u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f; if (bdipsw & 8) { /* BOOT Mode: USER ROM1 / USER ROM2 */ boot_pdata->nr_parts = 4; for (i = 0; i < boot_pdata->nr_parts; i++) { sprintf(names[i], "img%d", 4 - i); parts[i].name = names[i]; parts[i].size = 0x400000; parts[i].offset = MTDPART_OFS_NXTBLK; } } else if (bdipsw == 0) { /* BOOT Mode: Monitor ROM */ boot_pdata->nr_parts = 2; strcpy(names[0], "big"); strcpy(names[1], "little"); for (i = 0; i < boot_pdata->nr_parts; i++) { parts[i].name = names[i]; parts[i].size = 0x400000; parts[i].offset = MTDPART_OFS_NXTBLK; } } else { /* BOOT Mode: ROM Emulator */ boot_pdata->nr_parts = 2; parts[0].name = "boot"; parts[0].offset = 0xc00000; parts[0].size = 0x400000; parts[1].name = "user"; parts[1].offset = 0; parts[1].size = 0xc00000; } boot_pdata->parts = parts; boot_pdata->map_init = rbtx4939_flash_map_init; for (i = 0; i < ARRAY_SIZE(pdevs); i++) { struct resource *r = &pdevs[i].res; struct platform_device *dev = &pdevs[i].dev; r->start = 0x1f000000 - i * 0x1000000; r->end = r->start + 0x1000000 - 1; r->flags = IORESOURCE_MEM; pdevs[i].data.width = 2; dev->num_resources = 1; dev->resource = r; dev->id = i; dev->name = "rbtx4939-flash"; dev->dev.platform_data = &pdevs[i].data; platform_device_register(dev); } } #else static void __init rbtx4939_mtd_init(void) { } #endif static void __init rbtx4939_arch_init(void) { rbtx4939_pci_setup(); } static void __init rbtx4939_device_init(void) { unsigned long smc_addr = RBTX4939_ETHER_ADDR - IO_BASE; struct resource smc_res[] = { { .start = smc_addr, .end = smc_addr + 0x10 - 1, .flags = IORESOURCE_MEM, }, { .start = RBTX4939_IRQ_ETHER, /* override default irq flag defined in smc91x.h */ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; struct smc91x_platdata smc_pdata = { .flags = SMC91X_USE_16BIT, }; struct platform_device *pdev; #if defined(CONFIG_TC35815) || defined(CONFIG_TC35815_MODULE) int i, j; unsigned char ethaddr[2][6]; u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f; for (i = 0; i < 2; i++) { unsigned long area = CKSEG1 + 0x1fff0000 + (i * 0x10); if (bdipsw == 0) memcpy(ethaddr[i], (void *)area, 6); else { u16 buf[3]; if (bdipsw & 8) area -= 0x03000000; else area -= 0x01000000; for (j = 0; j < 3; j++) buf[j] = le16_to_cpup((u16 *)(area + j * 2)); memcpy(ethaddr[i], buf, 6); } } tx4939_ethaddr_init(ethaddr[0], ethaddr[1]); #endif pdev = platform_device_alloc("smc91x", -1); if (!pdev || platform_device_add_resources(pdev, smc_res, ARRAY_SIZE(smc_res)) || platform_device_add_data(pdev, &smc_pdata, sizeof(smc_pdata)) || platform_device_add(pdev)) platform_device_put(pdev); rbtx4939_mtd_init(); /* TC58DVM82A1FT: tDH=10ns, tWP=tRP=tREADID=35ns */ tx4939_ndfmc_init(10, 35, (1 << 1) | (1 << 2), (1 << 2)); /* ch1:8bit, ch2:16bit */ rbtx4939_led_setup(); tx4939_wdt_init(); tx4939_ata_init(); tx4939_rtc_init(); tx4939_dmac_init(0, 2); tx4939_aclc_init(); platform_device_register_simple("txx9aclc-generic", -1, NULL, 0); tx4939_sramc_init(); tx4939_rng_init(); } static void __init rbtx4939_setup(void) { int i; rbtx4939_ebusc_setup(); /* always enable ATA0 */ txx9_set64(&tx4939_ccfgptr->pcfg, TX4939_PCFG_ATA0MODE); if (txx9_master_clock == 0) txx9_master_clock = 20000000; tx4939_setup(); rbtx4939_update_ioc_pen(); #ifdef HAVE_RBTX4939_IOSWAB ioswabw = rbtx4939_ioswabw; __mem_ioswabw = rbtx4939_mem_ioswabw; #endif _machine_restart = rbtx4939_machine_restart; txx9_7segled_init(RBTX4939_MAX_7SEGLEDS, rbtx4939_7segled_putc); for (i = 0; i < RBTX4939_MAX_7SEGLEDS; i++) txx9_7segled_putc(i, '-'); pr_info("RBTX4939 (Rev %02x) --- FPGA(Rev %02x) DIPSW:%02x,%02x\n", readb(rbtx4939_board_rev_addr), readb(rbtx4939_ioc_rev_addr), readb(rbtx4939_udipsw_addr), readb(rbtx4939_bdipsw_addr)); #ifdef CONFIG_PCI txx9_alloc_pci_controller(&txx9_primary_pcic, 0, 0, 0, 0); txx9_board_pcibios_setup = tx4927_pcibios_setup; #else set_io_port_base(RBTX4939_ETHER_BASE); #endif tx4939_sio_init(TX4939_SCLK0(txx9_master_clock), 0); } struct txx9_board_vec rbtx4939_vec __initdata = { .system = "Toshiba RBTX4939", .prom_init = rbtx4939_prom_init, .mem_setup = rbtx4939_setup, .irq_setup = rbtx4939_irq_setup, .time_init = rbtx4939_time_init, .device_init = rbtx4939_device_init, .arch_init = rbtx4939_arch_init, #ifdef CONFIG_PCI .pci_map_irq = tx4939_pci_map_irq, #endif };
gpl-2.0
CyanogenMod/android_kernel_samsung_klte
drivers/regulator/dbx500-prcmu.c
4971
5956
/* * Copyright (C) ST-Ericsson SA 2010 * * License Terms: GNU General Public License v2 * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson * * UX500 common part of Power domain regulators */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/regulator/driver.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include "dbx500-prcmu.h" /* * power state reference count */ static int power_state_active_cnt; /* will initialize to zero */ static DEFINE_SPINLOCK(power_state_active_lock); int power_state_active_get(void) { unsigned long flags; int cnt; spin_lock_irqsave(&power_state_active_lock, flags); cnt = power_state_active_cnt; spin_unlock_irqrestore(&power_state_active_lock, flags); return cnt; } void power_state_active_enable(void) { unsigned long flags; spin_lock_irqsave(&power_state_active_lock, flags); power_state_active_cnt++; spin_unlock_irqrestore(&power_state_active_lock, flags); } int power_state_active_disable(void) { int ret = 0; unsigned long flags; spin_lock_irqsave(&power_state_active_lock, flags); if (power_state_active_cnt <= 0) { pr_err("power state: unbalanced enable/disable calls\n"); ret = -EINVAL; goto out; } power_state_active_cnt--; out: spin_unlock_irqrestore(&power_state_active_lock, flags); return ret; } #ifdef CONFIG_REGULATOR_DEBUG static struct ux500_regulator_debug { struct dentry *dir; struct dentry *status_file; struct dentry *power_state_cnt_file; struct dbx500_regulator_info *regulator_array; int num_regulators; u8 *state_before_suspend; u8 *state_after_suspend; } rdebug; void ux500_regulator_suspend_debug(void) { int i; for (i = 0; i < rdebug.num_regulators; i++) rdebug.state_before_suspend[i] = rdebug.regulator_array[i].is_enabled; } void ux500_regulator_resume_debug(void) { int i; for (i = 0; i < rdebug.num_regulators; i++) rdebug.state_after_suspend[i] = rdebug.regulator_array[i].is_enabled; } static int ux500_regulator_power_state_cnt_print(struct seq_file *s, void *p) { struct device *dev = s->private; int err; /* print power state count */ err = seq_printf(s, "ux500-regulator power state count: %i\n", power_state_active_get()); if (err < 0) dev_err(dev, "seq_printf overflow\n"); return 0; } static int ux500_regulator_power_state_cnt_open(struct inode *inode, struct file *file) { return single_open(file, ux500_regulator_power_state_cnt_print, inode->i_private); } static const struct file_operations ux500_regulator_power_state_cnt_fops = { .open = ux500_regulator_power_state_cnt_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static int ux500_regulator_status_print(struct seq_file *s, void *p) { struct device *dev = s->private; int err; int i; /* print dump header */ err = seq_printf(s, "ux500-regulator status:\n"); if (err < 0) dev_err(dev, "seq_printf overflow\n"); err = seq_printf(s, "%31s : %8s : %8s\n", "current", "before", "after"); if (err < 0) dev_err(dev, "seq_printf overflow\n"); for (i = 0; i < rdebug.num_regulators; i++) { struct dbx500_regulator_info *info; /* Access per-regulator data */ info = &rdebug.regulator_array[i]; /* print status */ err = seq_printf(s, "%20s : %8s : %8s : %8s\n", info->desc.name, info->is_enabled ? "enabled" : "disabled", rdebug.state_before_suspend[i] ? "enabled" : "disabled", rdebug.state_after_suspend[i] ? "enabled" : "disabled"); if (err < 0) dev_err(dev, "seq_printf overflow\n"); } return 0; } static int ux500_regulator_status_open(struct inode *inode, struct file *file) { return single_open(file, ux500_regulator_status_print, inode->i_private); } static const struct file_operations ux500_regulator_status_fops = { .open = ux500_regulator_status_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; int __attribute__((weak)) dbx500_regulator_testcase( struct dbx500_regulator_info *regulator_info, int num_regulators) { return 0; } int __devinit ux500_regulator_debug_init(struct platform_device *pdev, struct dbx500_regulator_info *regulator_info, int num_regulators) { /* create directory */ rdebug.dir = debugfs_create_dir("ux500-regulator", NULL); if (!rdebug.dir) goto exit_no_debugfs; /* create "status" file */ rdebug.status_file = debugfs_create_file("status", S_IRUGO, rdebug.dir, &pdev->dev, &ux500_regulator_status_fops); if (!rdebug.status_file) goto exit_destroy_dir; /* create "power-state-count" file */ rdebug.power_state_cnt_file = debugfs_create_file("power-state-count", S_IRUGO, rdebug.dir, &pdev->dev, &ux500_regulator_power_state_cnt_fops); if (!rdebug.power_state_cnt_file) goto exit_destroy_status; rdebug.regulator_array = regulator_info; rdebug.num_regulators = num_regulators; rdebug.state_before_suspend = kzalloc(num_regulators, GFP_KERNEL); if (!rdebug.state_before_suspend) { dev_err(&pdev->dev, "could not allocate memory for saving state\n"); goto exit_destroy_power_state; } rdebug.state_after_suspend = kzalloc(num_regulators, GFP_KERNEL); if (!rdebug.state_after_suspend) { dev_err(&pdev->dev, "could not allocate memory for saving state\n"); goto exit_free; } dbx500_regulator_testcase(regulator_info, num_regulators); return 0; exit_free: kfree(rdebug.state_before_suspend); exit_destroy_power_state: debugfs_remove(rdebug.power_state_cnt_file); exit_destroy_status: debugfs_remove(rdebug.status_file); exit_destroy_dir: debugfs_remove(rdebug.dir); exit_no_debugfs: dev_err(&pdev->dev, "failed to create debugfs entries.\n"); return -ENOMEM; } int __devexit ux500_regulator_debug_exit(void) { debugfs_remove_recursive(rdebug.dir); kfree(rdebug.state_after_suspend); kfree(rdebug.state_before_suspend); return 0; } #endif
gpl-2.0
rex-xxx/mt6572_x201
kernel/drivers/net/can/sja1000/sja1000_platform.c
4971
4678
/* * Copyright (C) 2005 Sascha Hauer, Pengutronix * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the version 2 of the GNU General Public License * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/can/dev.h> #include <linux/can/platform/sja1000.h> #include <linux/io.h> #include "sja1000.h" #define DRV_NAME "sja1000_platform" MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus"); MODULE_LICENSE("GPL v2"); static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg) { return ioread8(priv->reg_base + reg); } static void sp_write_reg8(const struct sja1000_priv *priv, int reg, u8 val) { iowrite8(val, priv->reg_base + reg); } static u8 sp_read_reg16(const struct sja1000_priv *priv, int reg) { return ioread8(priv->reg_base + reg * 2); } static void sp_write_reg16(const struct sja1000_priv *priv, int reg, u8 val) { iowrite8(val, priv->reg_base + reg * 2); } static u8 sp_read_reg32(const struct sja1000_priv *priv, int reg) { return ioread8(priv->reg_base + reg * 4); } static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val) { iowrite8(val, priv->reg_base + reg * 4); } static int sp_probe(struct platform_device *pdev) { int err; void __iomem *addr; struct net_device *dev; struct sja1000_priv *priv; struct resource *res_mem, *res_irq; struct sja1000_platform_data *pdata; pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "No platform data provided!\n"); err = -ENODEV; goto exit; } res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res_mem || !res_irq) { err = -ENODEV; goto exit; } if (!request_mem_region(res_mem->start, resource_size(res_mem), DRV_NAME)) { err = -EBUSY; goto exit; } addr = ioremap_nocache(res_mem->start, resource_size(res_mem)); if (!addr) { err = -ENOMEM; goto exit_release; } dev = alloc_sja1000dev(0); if (!dev) { err = -ENOMEM; goto exit_iounmap; } priv = netdev_priv(dev); dev->irq = res_irq->start; priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED); priv->reg_base = addr; /* The CAN clock frequency is half the oscillator clock frequency */ priv->can.clock.freq = pdata->osc_freq / 2; priv->ocr = pdata->ocr; priv->cdr = pdata->cdr; switch (res_mem->flags & IORESOURCE_MEM_TYPE_MASK) { case IORESOURCE_MEM_32BIT: priv->read_reg = sp_read_reg32; priv->write_reg = sp_write_reg32; break; case IORESOURCE_MEM_16BIT: priv->read_reg = sp_read_reg16; priv->write_reg = sp_write_reg16; break; case IORESOURCE_MEM_8BIT: default: priv->read_reg = sp_read_reg8; priv->write_reg = sp_write_reg8; break; } dev_set_drvdata(&pdev->dev, dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); goto exit_free; } dev_info(&pdev->dev, "%s device registered (reg_base=%p, irq=%d)\n", DRV_NAME, priv->reg_base, dev->irq); return 0; exit_free: free_sja1000dev(dev); exit_iounmap: iounmap(addr); exit_release: release_mem_region(res_mem->start, resource_size(res_mem)); exit: return err; } static int sp_remove(struct platform_device *pdev) { struct net_device *dev = dev_get_drvdata(&pdev->dev); struct sja1000_priv *priv = netdev_priv(dev); struct resource *res; unregister_sja1000dev(dev); dev_set_drvdata(&pdev->dev, NULL); if (priv->reg_base) iounmap(priv->reg_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); free_sja1000dev(dev); return 0; } static struct platform_driver sp_driver = { .probe = sp_probe, .remove = sp_remove, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, }; module_platform_driver(sp_driver);
gpl-2.0
zeroblade1984/Galbi
drivers/media/dvb/mantis/mantis_core.c
8299
5893
/* Mantis PCI bridge driver Copyright (C) Manu Abraham (abraham.manu@gmail.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "mantis_common.h" #include "mantis_core.h" #include "mantis_vp1033.h" #include "mantis_vp1034.h" #include "mantis_vp1041.h" #include "mantis_vp2033.h" #include "mantis_vp2040.h" #include "mantis_vp3030.h" static int read_eeprom_byte(struct mantis_pci *mantis, u8 *data, u8 length) { int err; struct i2c_msg msg[] = { { .addr = 0x50, .flags = 0, .buf = data, .len = 1 }, { .addr = 0x50, .flags = I2C_M_RD, .buf = data, .len = length }, }; err = i2c_transfer(&mantis->adapter, msg, 2); if (err < 0) { dprintk(verbose, MANTIS_ERROR, 1, "ERROR: i2c read: < err=%i d0=0x%02x d1=0x%02x >", err, data[0], data[1]); return err; } return 0; } static int write_eeprom_byte(struct mantis_pci *mantis, u8 *data, u8 length) { int err; struct i2c_msg msg = { .addr = 0x50, .flags = 0, .buf = data, .len = length }; err = i2c_transfer(&mantis->adapter, &msg, 1); if (err < 0) { dprintk(verbose, MANTIS_ERROR, 1, "ERROR: i2c write: < err=%i length=0x%02x d0=0x%02x, d1=0x%02x >", err, length, data[0], data[1]); return err; } return 0; } static int get_mac_address(struct mantis_pci *mantis) { int err; mantis->mac_address[0] = 0x08; err = read_eeprom_byte(mantis, &mantis->mac_address[0], 6); if (err < 0) { dprintk(verbose, MANTIS_ERROR, 1, "Mantis EEPROM read error"); return err; } dprintk(verbose, MANTIS_ERROR, 0, " MAC Address=[%pM]\n", mantis->mac_address); return 0; } #define MANTIS_MODEL_UNKNOWN "UNKNOWN" #define MANTIS_DEV_UNKNOWN "UNKNOWN" struct mantis_hwconfig unknown_device = { .model_name = MANTIS_MODEL_UNKNOWN, .dev_type = MANTIS_DEV_UNKNOWN, }; static void mantis_load_config(struct mantis_pci *mantis) { switch (mantis->subsystem_device) { case MANTIS_VP_1033_DVB_S: /* VP-1033 */ mantis->hwconfig = &vp1033_mantis_config; break; case MANTIS_VP_1034_DVB_S: /* VP-1034 */ mantis->hwconfig = &vp1034_mantis_config; break; case MANTIS_VP_1041_DVB_S2: /* VP-1041 */ case TECHNISAT_SKYSTAR_HD2: mantis->hwconfig = &vp1041_mantis_config; break; case MANTIS_VP_2033_DVB_C: /* VP-2033 */ mantis->hwconfig = &vp2033_mantis_config; break; case MANTIS_VP_2040_DVB_C: /* VP-2040 */ case TERRATEC_CINERGY_C_PCI: /* VP-2040 clone */ case TECHNISAT_CABLESTAR_HD2: mantis->hwconfig = &vp2040_mantis_config; break; case MANTIS_VP_3030_DVB_T: /* VP-3030 */ mantis->hwconfig = &vp3030_mantis_config; break; default: mantis->hwconfig = &unknown_device; break; } } int mantis_core_init(struct mantis_pci *mantis) { int err = 0; mantis_load_config(mantis); dprintk(verbose, MANTIS_ERROR, 0, "found a %s PCI %s device on (%02x:%02x.%x),\n", mantis->hwconfig->model_name, mantis->hwconfig->dev_type, mantis->pdev->bus->number, PCI_SLOT(mantis->pdev->devfn), PCI_FUNC(mantis->pdev->devfn)); dprintk(verbose, MANTIS_ERROR, 0, " Mantis Rev %d [%04x:%04x], ", mantis->revision, mantis->subsystem_vendor, mantis->subsystem_device); dprintk(verbose, MANTIS_ERROR, 0, "irq: %d, latency: %d\n memory: 0x%lx, mmio: 0x%p\n", mantis->pdev->irq, mantis->latency, mantis->mantis_addr, mantis->mantis_mmio); err = mantis_i2c_init(mantis); if (err < 0) { dprintk(verbose, MANTIS_ERROR, 1, "Mantis I2C init failed"); return err; } err = get_mac_address(mantis); if (err < 0) { dprintk(verbose, MANTIS_ERROR, 1, "get MAC address failed"); return err; } err = mantis_dma_init(mantis); if (err < 0) { dprintk(verbose, MANTIS_ERROR, 1, "Mantis DMA init failed"); return err; } err = mantis_dvb_init(mantis); if (err < 0) { dprintk(verbose, MANTIS_DEBUG, 1, "Mantis DVB init failed"); return err; } err = mantis_uart_init(mantis); if (err < 0) { dprintk(verbose, MANTIS_DEBUG, 1, "Mantis UART init failed"); return err; } return 0; } int mantis_core_exit(struct mantis_pci *mantis) { mantis_dma_stop(mantis); dprintk(verbose, MANTIS_ERROR, 1, "DMA engine stopping"); mantis_uart_exit(mantis); dprintk(verbose, MANTIS_ERROR, 1, "UART exit failed"); if (mantis_dma_exit(mantis) < 0) dprintk(verbose, MANTIS_ERROR, 1, "DMA exit failed"); if (mantis_dvb_exit(mantis) < 0) dprintk(verbose, MANTIS_ERROR, 1, "DVB exit failed"); if (mantis_i2c_exit(mantis) < 0) dprintk(verbose, MANTIS_ERROR, 1, "I2C adapter delete.. failed"); return 0; } /* Turn the given bit on or off. */ void gpio_set_bits(struct mantis_pci *mantis, u32 bitpos, u8 value) { u32 cur; cur = mmread(MANTIS_GPIF_ADDR); if (value) mantis->gpio_status = cur | (1 << bitpos); else mantis->gpio_status = cur & (~(1 << bitpos)); mmwrite(mantis->gpio_status, MANTIS_GPIF_ADDR); mmwrite(0x00, MANTIS_GPIF_DOUT); udelay(100); } /* direction = 0 , no CI passthrough ; 1 , CI passthrough */ void mantis_set_direction(struct mantis_pci *mantis, int direction) { u32 reg; reg = mmread(0x28); dprintk(verbose, MANTIS_DEBUG, 1, "TS direction setup"); if (direction == 0x01) { /* to CI */ reg |= 0x04; mmwrite(reg, 0x28); reg &= 0xff - 0x04; mmwrite(reg, 0x28); } else { reg &= 0xff - 0x04; mmwrite(reg, 0x28); reg |= 0x04; mmwrite(reg, 0x28); } }
gpl-2.0
pvittman/donkey-cm12
drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c
12395
10234
/* $Date: 2005/10/24 23:18:13 $ $RCSfile: mv88e1xxx.c,v $ $Revision: 1.49 $ */ #include "common.h" #include "mv88e1xxx.h" #include "cphy.h" #include "elmer0.h" /* MV88E1XXX MDI crossover register values */ #define CROSSOVER_MDI 0 #define CROSSOVER_MDIX 1 #define CROSSOVER_AUTO 3 #define INTR_ENABLE_MASK 0x6CA0 /* * Set the bits given by 'bitval' in PHY register 'reg'. */ static void mdio_set_bit(struct cphy *cphy, int reg, u32 bitval) { u32 val; (void) simple_mdio_read(cphy, reg, &val); (void) simple_mdio_write(cphy, reg, val | bitval); } /* * Clear the bits given by 'bitval' in PHY register 'reg'. */ static void mdio_clear_bit(struct cphy *cphy, int reg, u32 bitval) { u32 val; (void) simple_mdio_read(cphy, reg, &val); (void) simple_mdio_write(cphy, reg, val & ~bitval); } /* * NAME: phy_reset * * DESC: Reset the given PHY's port. NOTE: This is not a global * chip reset. * * PARAMS: cphy - Pointer to PHY instance data. * * RETURN: 0 - Successful reset. * -1 - Timeout. */ static int mv88e1xxx_reset(struct cphy *cphy, int wait) { u32 ctl; int time_out = 1000; mdio_set_bit(cphy, MII_BMCR, BMCR_RESET); do { (void) simple_mdio_read(cphy, MII_BMCR, &ctl); ctl &= BMCR_RESET; if (ctl) udelay(1); } while (ctl && --time_out); return ctl ? -1 : 0; } static int mv88e1xxx_interrupt_enable(struct cphy *cphy) { /* Enable PHY interrupts. */ (void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, INTR_ENABLE_MASK); /* Enable Marvell interrupts through Elmer0. */ if (t1_is_asic(cphy->adapter)) { u32 elmer; t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); elmer |= ELMER0_GP_BIT1; if (is_T2(cphy->adapter)) elmer |= ELMER0_GP_BIT2 | ELMER0_GP_BIT3 | ELMER0_GP_BIT4; t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); } return 0; } static int mv88e1xxx_interrupt_disable(struct cphy *cphy) { /* Disable all phy interrupts. */ (void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, 0); /* Disable Marvell interrupts through Elmer0. */ if (t1_is_asic(cphy->adapter)) { u32 elmer; t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); elmer &= ~ELMER0_GP_BIT1; if (is_T2(cphy->adapter)) elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); } return 0; } static int mv88e1xxx_interrupt_clear(struct cphy *cphy) { u32 elmer; /* Clear PHY interrupts by reading the register. */ (void) simple_mdio_read(cphy, MV88E1XXX_INTERRUPT_STATUS_REGISTER, &elmer); /* Clear Marvell interrupts through Elmer0. */ if (t1_is_asic(cphy->adapter)) { t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); elmer |= ELMER0_GP_BIT1; if (is_T2(cphy->adapter)) elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); } return 0; } /* * Set the PHY speed and duplex. This also disables auto-negotiation, except * for 1Gb/s, where auto-negotiation is mandatory. */ static int mv88e1xxx_set_speed_duplex(struct cphy *phy, int speed, int duplex) { u32 ctl; (void) simple_mdio_read(phy, MII_BMCR, &ctl); if (speed >= 0) { ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); if (speed == SPEED_100) ctl |= BMCR_SPEED100; else if (speed == SPEED_1000) ctl |= BMCR_SPEED1000; } if (duplex >= 0) { ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE); if (duplex == DUPLEX_FULL) ctl |= BMCR_FULLDPLX; } if (ctl & BMCR_SPEED1000) /* auto-negotiation required for 1Gb/s */ ctl |= BMCR_ANENABLE; (void) simple_mdio_write(phy, MII_BMCR, ctl); return 0; } static int mv88e1xxx_crossover_set(struct cphy *cphy, int crossover) { u32 data32; (void) simple_mdio_read(cphy, MV88E1XXX_SPECIFIC_CNTRL_REGISTER, &data32); data32 &= ~V_PSCR_MDI_XOVER_MODE(M_PSCR_MDI_XOVER_MODE); data32 |= V_PSCR_MDI_XOVER_MODE(crossover); (void) simple_mdio_write(cphy, MV88E1XXX_SPECIFIC_CNTRL_REGISTER, data32); return 0; } static int mv88e1xxx_autoneg_enable(struct cphy *cphy) { u32 ctl; (void) mv88e1xxx_crossover_set(cphy, CROSSOVER_AUTO); (void) simple_mdio_read(cphy, MII_BMCR, &ctl); /* restart autoneg for change to take effect */ ctl |= BMCR_ANENABLE | BMCR_ANRESTART; (void) simple_mdio_write(cphy, MII_BMCR, ctl); return 0; } static int mv88e1xxx_autoneg_disable(struct cphy *cphy) { u32 ctl; /* * Crossover *must* be set to manual in order to disable auto-neg. * The Alaska FAQs document highlights this point. */ (void) mv88e1xxx_crossover_set(cphy, CROSSOVER_MDI); /* * Must include autoneg reset when disabling auto-neg. This * is described in the Alaska FAQ document. */ (void) simple_mdio_read(cphy, MII_BMCR, &ctl); ctl &= ~BMCR_ANENABLE; (void) simple_mdio_write(cphy, MII_BMCR, ctl | BMCR_ANRESTART); return 0; } static int mv88e1xxx_autoneg_restart(struct cphy *cphy) { mdio_set_bit(cphy, MII_BMCR, BMCR_ANRESTART); return 0; } static int mv88e1xxx_advertise(struct cphy *phy, unsigned int advertise_map) { u32 val = 0; if (advertise_map & (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) { (void) simple_mdio_read(phy, MII_GBCR, &val); val &= ~(GBCR_ADV_1000HALF | GBCR_ADV_1000FULL); if (advertise_map & ADVERTISED_1000baseT_Half) val |= GBCR_ADV_1000HALF; if (advertise_map & ADVERTISED_1000baseT_Full) val |= GBCR_ADV_1000FULL; } (void) simple_mdio_write(phy, MII_GBCR, val); val = 1; if (advertise_map & ADVERTISED_10baseT_Half) val |= ADVERTISE_10HALF; if (advertise_map & ADVERTISED_10baseT_Full) val |= ADVERTISE_10FULL; if (advertise_map & ADVERTISED_100baseT_Half) val |= ADVERTISE_100HALF; if (advertise_map & ADVERTISED_100baseT_Full) val |= ADVERTISE_100FULL; if (advertise_map & ADVERTISED_PAUSE) val |= ADVERTISE_PAUSE; if (advertise_map & ADVERTISED_ASYM_PAUSE) val |= ADVERTISE_PAUSE_ASYM; (void) simple_mdio_write(phy, MII_ADVERTISE, val); return 0; } static int mv88e1xxx_set_loopback(struct cphy *cphy, int on) { if (on) mdio_set_bit(cphy, MII_BMCR, BMCR_LOOPBACK); else mdio_clear_bit(cphy, MII_BMCR, BMCR_LOOPBACK); return 0; } static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok, int *speed, int *duplex, int *fc) { u32 status; int sp = -1, dplx = -1, pause = 0; (void) simple_mdio_read(cphy, MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); if ((status & V_PSSR_STATUS_RESOLVED) != 0) { if (status & V_PSSR_RX_PAUSE) pause |= PAUSE_RX; if (status & V_PSSR_TX_PAUSE) pause |= PAUSE_TX; dplx = (status & V_PSSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; sp = G_PSSR_SPEED(status); if (sp == 0) sp = SPEED_10; else if (sp == 1) sp = SPEED_100; else sp = SPEED_1000; } if (link_ok) *link_ok = (status & V_PSSR_LINK) != 0; if (speed) *speed = sp; if (duplex) *duplex = dplx; if (fc) *fc = pause; return 0; } static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable) { u32 val; (void) simple_mdio_read(cphy, MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, &val); /* * Set the downshift counter to 2 so we try to establish Gb link * twice before downshifting. */ val &= ~(V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(M_DOWNSHIFT_CNT)); if (downshift_enable) val |= V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(2); (void) simple_mdio_write(cphy, MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, val); return 0; } static int mv88e1xxx_interrupt_handler(struct cphy *cphy) { int cphy_cause = 0; u32 status; /* * Loop until cause reads zero. Need to handle bouncing interrupts. */ while (1) { u32 cause; (void) simple_mdio_read(cphy, MV88E1XXX_INTERRUPT_STATUS_REGISTER, &cause); cause &= INTR_ENABLE_MASK; if (!cause) break; if (cause & MV88E1XXX_INTR_LINK_CHNG) { (void) simple_mdio_read(cphy, MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); if (status & MV88E1XXX_INTR_LINK_CHNG) cphy->state |= PHY_LINK_UP; else { cphy->state &= ~PHY_LINK_UP; if (cphy->state & PHY_AUTONEG_EN) cphy->state &= ~PHY_AUTONEG_RDY; cphy_cause |= cphy_cause_link_change; } } if (cause & MV88E1XXX_INTR_AUTONEG_DONE) cphy->state |= PHY_AUTONEG_RDY; if ((cphy->state & (PHY_LINK_UP | PHY_AUTONEG_RDY)) == (PHY_LINK_UP | PHY_AUTONEG_RDY)) cphy_cause |= cphy_cause_link_change; } return cphy_cause; } static void mv88e1xxx_destroy(struct cphy *cphy) { kfree(cphy); } static struct cphy_ops mv88e1xxx_ops = { .destroy = mv88e1xxx_destroy, .reset = mv88e1xxx_reset, .interrupt_enable = mv88e1xxx_interrupt_enable, .interrupt_disable = mv88e1xxx_interrupt_disable, .interrupt_clear = mv88e1xxx_interrupt_clear, .interrupt_handler = mv88e1xxx_interrupt_handler, .autoneg_enable = mv88e1xxx_autoneg_enable, .autoneg_disable = mv88e1xxx_autoneg_disable, .autoneg_restart = mv88e1xxx_autoneg_restart, .advertise = mv88e1xxx_advertise, .set_loopback = mv88e1xxx_set_loopback, .set_speed_duplex = mv88e1xxx_set_speed_duplex, .get_link_status = mv88e1xxx_get_link_status, }; static struct cphy *mv88e1xxx_phy_create(struct net_device *dev, int phy_addr, const struct mdio_ops *mdio_ops) { struct adapter *adapter = netdev_priv(dev); struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); if (!cphy) return NULL; cphy_init(cphy, dev, phy_addr, &mv88e1xxx_ops, mdio_ops); /* Configure particular PHY's to run in a different mode. */ if ((board_info(adapter)->caps & SUPPORTED_TP) && board_info(adapter)->chip_phy == CHBT_PHY_88E1111) { /* * Configure the PHY transmitter as class A to reduce EMI. */ (void) simple_mdio_write(cphy, MV88E1XXX_EXTENDED_ADDR_REGISTER, 0xB); (void) simple_mdio_write(cphy, MV88E1XXX_EXTENDED_REGISTER, 0x8004); } (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */ /* LED */ if (is_T2(adapter)) { (void) simple_mdio_write(cphy, MV88E1XXX_LED_CONTROL_REGISTER, 0x1); } return cphy; } static int mv88e1xxx_phy_reset(adapter_t* adapter) { return 0; } const struct gphy t1_mv88e1xxx_ops = { .create = mv88e1xxx_phy_create, .reset = mv88e1xxx_phy_reset };
gpl-2.0
phenyl-sphinx/linux
fs/ntfs/bitmap.c
14443
5603
/* * bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project. * * Copyright (c) 2004-2005 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef NTFS_RW #include <linux/pagemap.h> #include "bitmap.h" #include "debug.h" #include "aops.h" #include "ntfs.h" /** * __ntfs_bitmap_set_bits_in_run - set a run of bits in a bitmap to a value * @vi: vfs inode describing the bitmap * @start_bit: first bit to set * @count: number of bits to set * @value: value to set the bits to (i.e. 0 or 1) * @is_rollback: if 'true' this is a rollback operation * * Set @count bits starting at bit @start_bit in the bitmap described by the * vfs inode @vi to @value, where @value is either 0 or 1. * * @is_rollback should always be 'false', it is for internal use to rollback * errors. You probably want to use ntfs_bitmap_set_bits_in_run() instead. * * Return 0 on success and -errno on error. */ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, const s64 count, const u8 value, const bool is_rollback) { s64 cnt = count; pgoff_t index, end_index; struct address_space *mapping; struct page *page; u8 *kaddr; int pos, len; u8 bit; BUG_ON(!vi); ntfs_debug("Entering for i_ino 0x%lx, start_bit 0x%llx, count 0x%llx, " "value %u.%s", vi->i_ino, (unsigned long long)start_bit, (unsigned long long)cnt, (unsigned int)value, is_rollback ? " (rollback)" : ""); BUG_ON(start_bit < 0); BUG_ON(cnt < 0); BUG_ON(value > 1); /* * Calculate the indices for the pages containing the first and last * bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively. */ index = start_bit >> (3 + PAGE_CACHE_SHIFT); end_index = (start_bit + cnt - 1) >> (3 + PAGE_CACHE_SHIFT); /* Get the page containing the first bit (@start_bit). */ mapping = vi->i_mapping; page = ntfs_map_page(mapping, index); if (IS_ERR(page)) { if (!is_rollback) ntfs_error(vi->i_sb, "Failed to map first page (error " "%li), aborting.", PTR_ERR(page)); return PTR_ERR(page); } kaddr = page_address(page); /* Set @pos to the position of the byte containing @start_bit. */ pos = (start_bit >> 3) & ~PAGE_CACHE_MASK; /* Calculate the position of @start_bit in the first byte. */ bit = start_bit & 7; /* If the first byte is partial, modify the appropriate bits in it. */ if (bit) { u8 *byte = kaddr + pos; while ((bit & 7) && cnt) { cnt--; if (value) *byte |= 1 << bit++; else *byte &= ~(1 << bit++); } /* If we are done, unmap the page and return success. */ if (!cnt) goto done; /* Update @pos to the new position. */ pos++; } /* * Depending on @value, modify all remaining whole bytes in the page up * to @cnt. */ len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE - pos); memset(kaddr + pos, value ? 0xff : 0, len); cnt -= len << 3; /* Update @len to point to the first not-done byte in the page. */ if (cnt < 8) len += pos; /* If we are not in the last page, deal with all subsequent pages. */ while (index < end_index) { BUG_ON(cnt <= 0); /* Update @index and get the next page. */ flush_dcache_page(page); set_page_dirty(page); ntfs_unmap_page(page); page = ntfs_map_page(mapping, ++index); if (IS_ERR(page)) goto rollback; kaddr = page_address(page); /* * Depending on @value, modify all remaining whole bytes in the * page up to @cnt. */ len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE); memset(kaddr, value ? 0xff : 0, len); cnt -= len << 3; } /* * The currently mapped page is the last one. If the last byte is * partial, modify the appropriate bits in it. Note, @len is the * position of the last byte inside the page. */ if (cnt) { u8 *byte; BUG_ON(cnt > 7); bit = cnt; byte = kaddr + len; while (bit--) { if (value) *byte |= 1 << bit; else *byte &= ~(1 << bit); } } done: /* We are done. Unmap the page and return success. */ flush_dcache_page(page); set_page_dirty(page); ntfs_unmap_page(page); ntfs_debug("Done."); return 0; rollback: /* * Current state: * - no pages are mapped * - @count - @cnt is the number of bits that have been modified */ if (is_rollback) return PTR_ERR(page); if (count != cnt) pos = __ntfs_bitmap_set_bits_in_run(vi, start_bit, count - cnt, value ? 0 : 1, true); else pos = 0; if (!pos) { /* Rollback was successful. */ ntfs_error(vi->i_sb, "Failed to map subsequent page (error " "%li), aborting.", PTR_ERR(page)); } else { /* Rollback failed. */ ntfs_error(vi->i_sb, "Failed to map subsequent page (error " "%li) and rollback failed (error %i). " "Aborting and leaving inconsistent metadata. " "Unmount and run chkdsk.", PTR_ERR(page), pos); NVolSetErrors(NTFS_SB(vi->i_sb)); } return PTR_ERR(page); } #endif /* NTFS_RW */
gpl-2.0
gabrielleLQX/SAM3_codeqemu
roms/openbios/arch/x86/context.c
108
3205
/* tag: x86 context switching * * 2003-10 by SONE Takeshi * * See the file "COPYING" for further information about * the copyright and warranty status of this work. */ #include "config.h" #include "kernel/kernel.h" #include "segment.h" #include "context.h" #include "libopenbios/sys_info.h" #include "boot.h" #include "openbios.h" #define MAIN_STACK_SIZE 16384 #define IMAGE_STACK_SIZE 4096 #define debug printk static void start_main(void); /* forward decl. */ void __exit_context(void); /* assembly routine */ /* * Main context structure * It is placed at the bottom of our stack, and loaded by assembly routine * to start us up. */ static struct context main_ctx __attribute__((section (".initctx"))) = { .gdt_base = (uint32_t) gdt, .gdt_limit = GDT_LIMIT, .cs = FLAT_CS, .ds = FLAT_DS, .es = FLAT_DS, .fs = FLAT_DS, .gs = FLAT_DS, .ss = FLAT_DS, .esp = (uint32_t) ESP_LOC(&main_ctx), .eip = (uint32_t) start_main, .return_addr = (uint32_t) __exit_context, }; /* This is used by assembly routine to load/store the context which * it is to switch/switched. */ struct context *__context = &main_ctx; /* Stack for loaded ELF image */ static uint8_t image_stack[IMAGE_STACK_SIZE]; /* Pointer to startup context (physical address) */ unsigned long __boot_ctx; /* * Main starter * This is the C function that runs first. */ static void start_main(void) { int retval; /* Save startup context, so we can refer to it later. * We have to keep it in physical address since we will relocate. */ __boot_ctx = virt_to_phys(__context); init_exceptions(); /* Start the real fun */ retval = openbios(); /* Pass return value to startup context. Bootloader may see it. */ boot_ctx->eax = retval; /* Returning from here should jump to __exit_context */ __context = boot_ctx; } /* Setup a new context using the given stack. */ struct context * init_context(uint8_t *stack, uint32_t stack_size, int num_params) { struct context *ctx; ctx = (struct context *) (stack + stack_size - (sizeof(*ctx) + num_params*sizeof(uint32_t))); memset(ctx, 0, sizeof(*ctx)); /* Fill in reasonable default for flat memory model */ ctx->gdt_base = virt_to_phys(gdt); ctx->gdt_limit = GDT_LIMIT; ctx->cs = FLAT_CS; ctx->ds = FLAT_DS; ctx->es = FLAT_DS; ctx->fs = FLAT_DS; ctx->gs = FLAT_DS; ctx->ss = FLAT_DS; ctx->esp = virt_to_phys(ESP_LOC(ctx)); ctx->return_addr = virt_to_phys(__exit_context); return ctx; } /* Switch to another context. */ struct context *switch_to(struct context *ctx) { struct context *save, *ret; debug("switching to new context:\n"); save = __context; __context = ctx; asm ("pushl %cs; call __switch_context"); ret = __context; __context = save; return ret; } /* Start ELF Boot image */ unsigned int start_elf(unsigned long entry_point, unsigned long param) { struct context *ctx; ctx = init_context(image_stack, sizeof image_stack, 1); ctx->eip = entry_point; ctx->param[0] = param; ctx->eax = 0xe1fb007; ctx->ebx = param; ctx = switch_to(ctx); return ctx->eax; }
gpl-2.0
Split-Screen/android_kernel_lge_msm8974
net/ipv4/tcp_ipv4.c
108
69789
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * IPv4 specific functions * * * code split from: * linux/ipv4/tcp.c * linux/ipv4/tcp_input.c * linux/ipv4/tcp_output.c * * See tcp.c for author information * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* * Changes: * David S. Miller : New socket lookup architecture. * This code is dedicated to John Dyson. * David S. Miller : Change semantics of established hash, * half is devoted to TIME_WAIT sockets * and the rest go in the other half. * Andi Kleen : Add support for syncookies and fixed * some bugs: ip options weren't passed to * the TCP layer, missed a check for an * ACK bit. * Andi Kleen : Implemented fast path mtu discovery. * Fixed many serious bugs in the * request_sock handling and moved * most of it into the af independent code. * Added tail drop and some other bugfixes. * Added new listen semantics. * Mike McLagan : Routing by source * Juan Jose Ciarlante: ip_dynaddr bits * Andi Kleen: various fixes. * Vitaly E. Lavrov : Transparent proxy revived after year * coma. * Andi Kleen : Fix new listen. * Andi Kleen : Fix accept error reporting. * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind * a single port at the same time. */ #define pr_fmt(fmt) "TCP: " fmt #include <linux/bottom_half.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/module.h> #include <linux/random.h> #include <linux/cache.h> #include <linux/jhash.h> #include <linux/init.h> #include <linux/times.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/icmp.h> #include <net/inet_hashtables.h> #include <net/tcp.h> #include <net/transp_v6.h> #include <net/ipv6.h> #include <net/inet_common.h> #include <net/timewait_sock.h> #include <net/xfrm.h> #include <net/netdma.h> #include <net/secure_seq.h> #include <net/tcp_memcontrol.h> #include <linux/inet.h> #include <linux/ipv6.h> #include <linux/stddef.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/crypto.h> #include <linux/scatterlist.h> int sysctl_tcp_tw_reuse __read_mostly; int sysctl_tcp_low_latency __read_mostly; EXPORT_SYMBOL(sysctl_tcp_low_latency); #ifdef CONFIG_TCP_MD5SIG static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th); #endif struct inet_hashinfo tcp_hashinfo; EXPORT_SYMBOL(tcp_hashinfo); static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb) { return secure_tcp_sequence_number(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, tcp_hdr(skb)->dest, tcp_hdr(skb)->source); } int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) { const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw); struct tcp_sock *tp = tcp_sk(sk); /* With PAWS, it is safe from the viewpoint of data integrity. Even without PAWS it is safe provided sequence spaces do not overlap i.e. at data rates <= 80Mbit/sec. Actually, the idea is close to VJ's one, only timestamp cache is held not per host, but per port pair and TW bucket is used as state holder. If TW bucket has been already destroyed we fall back to VJ's scheme and use initial timestamp retrieved from peer table. */ if (tcptw->tw_ts_recent_stamp && (twp == NULL || (sysctl_tcp_tw_reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; if (tp->write_seq == 0) tp->write_seq = 1; tp->rx_opt.ts_recent = tcptw->tw_ts_recent; tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; sock_hold(sktw); return 1; } return 0; } EXPORT_SYMBOL_GPL(tcp_twsk_unique); /* This will initiate an outgoing connection. */ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); __be16 orig_sport, orig_dport; __be32 daddr, nexthop; struct flowi4 *fl4; struct rtable *rt; int err; struct ip_options_rcu *inet_opt; if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; nexthop = daddr = usin->sin_addr.s_addr; inet_opt = rcu_dereference_protected(inet->inet_opt, sock_owned_by_user(sk)); if (inet_opt && inet_opt->opt.srr) { if (!daddr) return -EINVAL; nexthop = inet_opt->opt.faddr; } orig_sport = inet->inet_sport; orig_dport = usin->sin_port; fl4 = &inet->cork.fl.u.ip4; rt = ip_route_connect(fl4, nexthop, inet->inet_saddr, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, IPPROTO_TCP, orig_sport, orig_dport, sk, true); if (IS_ERR(rt)) { err = PTR_ERR(rt); if (err == -ENETUNREACH) IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); return err; } if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { ip_rt_put(rt); return -ENETUNREACH; } if (!inet_opt || !inet_opt->opt.srr) daddr = fl4->daddr; if (!inet->inet_saddr) inet->inet_saddr = fl4->saddr; inet->inet_rcv_saddr = inet->inet_saddr; if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { /* Reset inherited state */ tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; tp->write_seq = 0; } if (tcp_death_row.sysctl_tw_recycle && !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) { struct inet_peer *peer = rt_get_peer(rt, fl4->daddr); /* * VJ's idea. We save last timestamp seen from * the destination in peer table, when entering state * TIME-WAIT * and initialize rx_opt.ts_recent from it, * when trying new connection. */ if (peer) { inet_peer_refcheck(peer); if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) { tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; tp->rx_opt.ts_recent = peer->tcp_ts; } } } inet->inet_dport = usin->sin_port; inet->inet_daddr = daddr; inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; /* Socket identity is still unknown (sport may be zero). * However we set state to SYN-SENT and not releasing socket * lock select source port, enter ourselves into the hash tables and * complete initialization after this. */ tcp_set_state(sk, TCP_SYN_SENT); err = inet_hash_connect(&tcp_death_row, sk); if (err) goto failure; rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, inet->inet_sport, inet->inet_dport, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto failure; } /* OK, now commit destination to socket. */ sk->sk_gso_type = SKB_GSO_TCPV4; sk_setup_caps(sk, &rt->dst); if (!tp->write_seq) tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, inet->inet_daddr, inet->inet_sport, usin->sin_port); inet->inet_id = tp->write_seq ^ jiffies; err = tcp_connect(sk); rt = NULL; if (err) goto failure; return 0; failure: /* * This unhashes the socket and releases the local port, * if necessary. */ tcp_set_state(sk, TCP_CLOSE); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; return err; } EXPORT_SYMBOL(tcp_v4_connect); /* * This routine does path mtu discovery as defined in RFC1191. */ static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu) { struct dst_entry *dst; struct inet_sock *inet = inet_sk(sk); /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs * send out by Linux are always <576bytes so they should go through * unfragmented). */ if (sk->sk_state == TCP_LISTEN) return; /* We don't check in the destentry if pmtu discovery is forbidden * on this route. We just assume that no packet_to_big packets * are send back when pmtu discovery is not active. * There is a small race when the user changes this flag in the * route, but I think that's acceptable. */ if ((dst = __sk_dst_check(sk, 0)) == NULL) return; dst->ops->update_pmtu(dst, mtu); /* Something is about to be wrong... Remember soft error * for the case, if this connection will not able to recover. */ if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) sk->sk_err_soft = EMSGSIZE; mtu = dst_mtu(dst); if (inet->pmtudisc != IP_PMTUDISC_DONT && inet_csk(sk)->icsk_pmtu_cookie > mtu) { tcp_sync_mss(sk, mtu); /* Resend the TCP packet because it's * clear that the old packet has been * dropped. This is the new "fast" path mtu * discovery. */ tcp_simple_retransmit(sk); } /* else let the usual retransmit timer handle it */ } /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. After adjustment * header points to the first 8 bytes of the tcp header. We need * to find the appropriate port. * * The locking strategy used here is very "optimistic". When * someone else accesses the socket the ICMP is just dropped * and for some paths there is no check at all. * A more general error queue to queue errors for later handling * is probably better. * */ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) { const struct iphdr *iph = (const struct iphdr *)icmp_skb->data; struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2)); struct inet_connection_sock *icsk; struct tcp_sock *tp; struct inet_sock *inet; const int type = icmp_hdr(icmp_skb)->type; const int code = icmp_hdr(icmp_skb)->code; struct sock *sk; struct sk_buff *skb; __u32 seq; __u32 remaining; int err; struct net *net = dev_net(icmp_skb->dev); if (icmp_skb->len < (iph->ihl << 2) + 8) { ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); return; } sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest, iph->saddr, th->source, inet_iif(icmp_skb)); if (!sk) { ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); return; } if (sk->sk_state == TCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. */ if (sock_owned_by_user(sk)) NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == TCP_CLOSE) goto out; if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); goto out; } icsk = inet_csk(sk); tp = tcp_sk(sk); seq = ntohl(th->seq); if (sk->sk_state != TCP_LISTEN && !between(seq, tp->snd_una, tp->snd_nxt)) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } switch (type) { case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ goto out; case ICMP_PARAMETERPROB: err = EPROTO; break; case ICMP_DEST_UNREACH: if (code > NR_ICMP_UNREACH) goto out; if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ if (!sock_owned_by_user(sk)) do_pmtu_discovery(sk, iph, info); goto out; } err = icmp_err_convert[code].errno; /* check if icmp_skb allows revert of backoff * (see draft-zimmermann-tcp-lcd) */ if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH) break; if (seq != tp->snd_una || !icsk->icsk_retransmits || !icsk->icsk_backoff) break; if (sock_owned_by_user(sk)) break; icsk->icsk_backoff--; inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT) << icsk->icsk_backoff; tcp_bound_rto(sk); skb = tcp_write_queue_head(sk); BUG_ON(!skb); remaining = icsk->icsk_rto - min(icsk->icsk_rto, tcp_time_stamp - TCP_SKB_CB(skb)->when); if (remaining) { inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, remaining, TCP_RTO_MAX); } else { /* RTO revert clocked out retransmission. * Will retransmit now */ tcp_retransmit_timer(sk); } break; case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; default: goto out; } switch (sk->sk_state) { struct request_sock *req, **prev; case TCP_LISTEN: if (sock_owned_by_user(sk)) goto out; req = inet_csk_search_req(sk, &prev, th->dest, iph->daddr, iph->saddr); if (!req) goto out; /* ICMPs are not backlogged, hence we cannot get an established socket here. */ WARN_ON(req->sk); if (seq != tcp_rsk(req)->snt_isn) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } /* * Still in SYN_RECV, just remove it silently. * There is no good way to pass the error to the newly * created socket, and POSIX does not want network * errors returned from accept(). */ inet_csk_reqsk_queue_drop(sk, req, prev); goto out; case TCP_SYN_SENT: case TCP_SYN_RECV: /* Cannot happen. It can f.e. if SYNs crossed. */ if (!sock_owned_by_user(sk)) { sk->sk_err = err; sk->sk_error_report(sk); tcp_done(sk); } else { sk->sk_err_soft = err; } goto out; } /* If we've already connected we will keep trying * until we time out, or the user gives up. * * rfc1122 4.2.3.9 allows to consider as hard errors * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, * but it is obsoleted by pmtu discovery). * * Note, that in modern internet, where routing is unreliable * and in each dark corner broken firewalls sit, sending random * errors ordered by their masters even this two messages finally lose * their original sense (even Linux sends invalid PORT_UNREACHs) * * Now we are in compliance with RFCs. * --ANK (980905) */ inet = inet_sk(sk); if (!sock_owned_by_user(sk) && inet->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else { /* Only an error on timeout */ sk->sk_err_soft = err; } out: bh_unlock_sock(sk); sock_put(sk); } static void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr) { struct tcphdr *th = tcp_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); } else { th->check = tcp_v4_check(skb->len, saddr, daddr, csum_partial(th, th->doff << 2, skb->csum)); } } /* This routine computes an IPv4 TCP checksum. */ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) { const struct inet_sock *inet = inet_sk(sk); __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); } EXPORT_SYMBOL(tcp_v4_send_check); int tcp_v4_gso_send_check(struct sk_buff *skb) { const struct iphdr *iph; struct tcphdr *th; if (!pskb_may_pull(skb, sizeof(*th))) return -EINVAL; iph = ip_hdr(skb); th = tcp_hdr(skb); th->check = 0; skb->ip_summed = CHECKSUM_PARTIAL; __tcp_v4_send_check(skb, iph->saddr, iph->daddr); return 0; } /* * This routine will send an RST to the other tcp. * * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.) * for reset. * Answer: if a packet caused RST, it is not for a socket * existing in our system, if it is matched to a socket, * it is just duplicate segment or bug in other side's TCP. * So that we build reply only basing on parameters * arrived with segment. * Exception: precedence violation. We do not implement it in any case. */ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); struct { struct tcphdr th; #ifdef CONFIG_TCP_MD5SIG __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)]; #endif } rep; struct ip_reply_arg arg; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key; const __u8 *hash_location = NULL; unsigned char newhash[16]; int genhash; struct sock *sk1 = NULL; #endif struct net *net; /* Never send a reset in response to a reset. */ if (th->rst) return; if (skb_rtable(skb)->rt_type != RTN_LOCAL) return; /* Swap the send and the receive. */ memset(&rep, 0, sizeof(rep)); rep.th.dest = th->source; rep.th.source = th->dest; rep.th.doff = sizeof(struct tcphdr) / 4; rep.th.rst = 1; if (th->ack) { rep.th.seq = th->ack_seq; } else { rep.th.ack = 1; rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + skb->len - (th->doff << 2)); } memset(&arg, 0, sizeof(arg)); arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); #ifdef CONFIG_TCP_MD5SIG hash_location = tcp_parse_md5sig_option(th); if (!sk && hash_location) { /* * active side is lost. Try to find listening socket through * source port, and then find md5 key through listening socket. * we are not loose security here: * Incoming packet is checked with md5 hash with finding key, * no RST generated if md5 hash doesn't match. */ sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev), &tcp_hashinfo, ip_hdr(skb)->daddr, ntohs(th->source), inet_iif(skb)); /* don't send rst if it can't find key */ if (!sk1) return; rcu_read_lock(); key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *) &ip_hdr(skb)->saddr, AF_INET); if (!key) goto release_sk1; genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) goto release_sk1; } else { key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *) &ip_hdr(skb)->saddr, AF_INET) : NULL; } if (key) { rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); /* Update length and the length the header thinks exists */ arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; rep.th.doff = arg.iov[0].iov_len / 4; tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1], key, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &rep.th); } #endif arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, /* XXX */ arg.iov[0].iov_len, IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; /* When socket is gone, all binding information is lost. * routing might fail in this case. using iif for oif to * make sure we can deliver it */ arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb); net = dev_net(skb_dst(skb)->dev); arg.tos = ip_hdr(skb)->tos; ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, &arg, arg.iov[0].iov_len); TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); #ifdef CONFIG_TCP_MD5SIG release_sk1: if (sk1) { rcu_read_unlock(); sock_put(sk1); } #endif } /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states outside socket context is ugly, certainly. What can I do? */ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, int oif, struct tcp_md5sig_key *key, int reply_flags, u8 tos) { const struct tcphdr *th = tcp_hdr(skb); struct { struct tcphdr th; __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) #ifdef CONFIG_TCP_MD5SIG + (TCPOLEN_MD5SIG_ALIGNED >> 2) #endif ]; } rep; struct ip_reply_arg arg; struct net *net = dev_net(skb_dst(skb)->dev); memset(&rep.th, 0, sizeof(struct tcphdr)); memset(&arg, 0, sizeof(arg)); arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); if (ts) { rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); rep.opt[1] = htonl(tcp_time_stamp); rep.opt[2] = htonl(ts); arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED; } /* Swap the send and the receive. */ rep.th.dest = th->source; rep.th.source = th->dest; rep.th.doff = arg.iov[0].iov_len / 4; rep.th.seq = htonl(seq); rep.th.ack_seq = htonl(ack); rep.th.ack = 1; rep.th.window = htons(win); #ifdef CONFIG_TCP_MD5SIG if (key) { int offset = (ts) ? 3 : 0; rep.opt[offset++] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; rep.th.doff = arg.iov[0].iov_len/4; tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset], key, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &rep.th); } #endif arg.flags = reply_flags; arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, /* XXX */ arg.iov[0].iov_len, IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; if (oif) arg.bound_dev_if = oif; arg.tos = tos; ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, &arg, arg.iov[0].iov_len); TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); } static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) { struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0, tw->tw_tos ); inet_twsk_put(tw); } static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, struct request_sock *req) { tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent, 0, tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, AF_INET), inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, ip_hdr(skb)->tos); } /* * Send a SYN-ACK after having received a SYN. * This still operates on a request_sock only, not on a big * socket. */ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, struct request_sock *req, struct request_values *rvp) { const struct inet_request_sock *ireq = inet_rsk(req); struct flowi4 fl4; int err = -1; struct sk_buff * skb; /* First, grab a route. */ if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) return -1; skb = tcp_make_synack(sk, dst, req, rvp); if (skb) { __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, ireq->rmt_addr, ireq->opt); err = net_xmit_eval(err); } dst_release(dst); return err; } static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, struct request_values *rvp) { TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); return tcp_v4_send_synack(sk, NULL, req, rvp); } /* * IPv4 request_sock destructor. */ static void tcp_v4_reqsk_destructor(struct request_sock *req) { kfree(inet_rsk(req)->opt); } /* * Return 1 if a syncookie should be sent */ int tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb, const char *proto) { const char *msg = "Dropping request"; int want_cookie = 0; struct listen_sock *lopt; #ifdef CONFIG_SYN_COOKIES if (sysctl_tcp_syncookies) { msg = "Sending cookies"; want_cookie = 1; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); } else #endif NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; if (!lopt->synflood_warned) { lopt->synflood_warned = 1; pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", proto, ntohs(tcp_hdr(skb)->dest), msg); } return want_cookie; } EXPORT_SYMBOL(tcp_syn_flood_action); /* * Save and compile IPv4 options into the request_sock if needed. */ static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk, struct sk_buff *skb) { const struct ip_options *opt = &(IPCB(skb)->opt); struct ip_options_rcu *dopt = NULL; if (opt && opt->optlen) { int opt_size = sizeof(*dopt) + opt->optlen; dopt = kmalloc(opt_size, GFP_ATOMIC); if (dopt) { if (ip_options_echo(&dopt->opt, skb)) { kfree(dopt); dopt = NULL; } } } return dopt; } #ifdef CONFIG_TCP_MD5SIG /* * RFC2385 MD5 checksumming requires a mapping of * IP address->MD5 Key. * We need to maintain these in the sk structure. */ /* Find the Key structure for an address. */ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, const union tcp_md5_addr *addr, int family) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; struct hlist_node *pos; unsigned int size = sizeof(struct in_addr); struct tcp_md5sig_info *md5sig; /* caller either holds rcu_read_lock() or socket lock */ md5sig = rcu_dereference_check(tp->md5sig_info, sock_owned_by_user(sk) || lockdep_is_held(&sk->sk_lock.slock)); if (!md5sig) return NULL; #if IS_ENABLED(CONFIG_IPV6) if (family == AF_INET6) size = sizeof(struct in6_addr); #endif hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) { if (key->family != family) continue; if (!memcmp(&key->addr, addr, size)) return key; } return NULL; } EXPORT_SYMBOL(tcp_md5_do_lookup); struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, struct sock *addr_sk) { union tcp_md5_addr *addr; addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr; return tcp_md5_do_lookup(sk, addr, AF_INET); } EXPORT_SYMBOL(tcp_v4_md5_lookup); static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk, struct request_sock *req) { union tcp_md5_addr *addr; addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr; return tcp_md5_do_lookup(sk, addr, AF_INET); } /* This can be called on a newly created socket, from other files */ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, int family, const u8 *newkey, u8 newkeylen, gfp_t gfp) { /* Add Key to the list */ struct tcp_md5sig_key *key; struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_info *md5sig; key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); if (key) { /* Pre-existing entry - just update that one. */ memcpy(key->key, newkey, newkeylen); key->keylen = newkeylen; return 0; } md5sig = rcu_dereference_protected(tp->md5sig_info, sock_owned_by_user(sk)); if (!md5sig) { md5sig = kmalloc(sizeof(*md5sig), gfp); if (!md5sig) return -ENOMEM; sk_nocaps_add(sk, NETIF_F_GSO_MASK); INIT_HLIST_HEAD(&md5sig->head); rcu_assign_pointer(tp->md5sig_info, md5sig); } key = sock_kmalloc(sk, sizeof(*key), gfp); if (!key) return -ENOMEM; if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) { sock_kfree_s(sk, key, sizeof(*key)); return -ENOMEM; } memcpy(key->key, newkey, newkeylen); key->keylen = newkeylen; key->family = family; memcpy(&key->addr, addr, (family == AF_INET6) ? sizeof(struct in6_addr) : sizeof(struct in_addr)); hlist_add_head_rcu(&key->node, &md5sig->head); return 0; } EXPORT_SYMBOL(tcp_md5_do_add); int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; struct tcp_md5sig_info *md5sig; key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); if (!key) return -ENOENT; hlist_del_rcu(&key->node); atomic_sub(sizeof(*key), &sk->sk_omem_alloc); kfree_rcu(key, rcu); md5sig = rcu_dereference_protected(tp->md5sig_info, sock_owned_by_user(sk)); if (hlist_empty(&md5sig->head)) tcp_free_md5sig_pool(); return 0; } EXPORT_SYMBOL(tcp_md5_do_del); void tcp_clear_md5_list(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; struct hlist_node *pos, *n; struct tcp_md5sig_info *md5sig; md5sig = rcu_dereference_protected(tp->md5sig_info, 1); if (!hlist_empty(&md5sig->head)) tcp_free_md5sig_pool(); hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) { hlist_del_rcu(&key->node); atomic_sub(sizeof(*key), &sk->sk_omem_alloc); kfree_rcu(key, rcu); } } static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, int optlen) { struct tcp_md5sig cmd; struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; if (optlen < sizeof(cmd)) return -EINVAL; if (copy_from_user(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin->sin_family != AF_INET) return -EINVAL; if (!cmd.tcpm_key || !cmd.tcpm_keylen) return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, AF_INET); if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) return -EINVAL; return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); } static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, __be32 daddr, __be32 saddr, int nbytes) { struct tcp4_pseudohdr *bp; struct scatterlist sg; bp = &hp->md5_blk.ip4; /* * 1. the TCP pseudo-header (in the order: source IP address, * destination IP address, zero-padded protocol number, and * segment length) */ bp->saddr = saddr; bp->daddr = daddr; bp->pad = 0; bp->protocol = IPPROTO_TCP; bp->len = cpu_to_be16(nbytes); sg_init_one(&sg, bp, sizeof(*bp)); return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); } static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th) { struct tcp_md5sig_pool *hp; struct hash_desc *desc; hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; desc = &hp->md5_desc; if (crypto_hash_init(desc)) goto clear_hash; if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_header(hp, th)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; if (crypto_hash_final(desc, md5_hash)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; } int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, const struct sock *sk, const struct request_sock *req, const struct sk_buff *skb) { struct tcp_md5sig_pool *hp; struct hash_desc *desc; const struct tcphdr *th = tcp_hdr(skb); __be32 saddr, daddr; if (sk) { saddr = inet_sk(sk)->inet_saddr; daddr = inet_sk(sk)->inet_daddr; } else if (req) { saddr = inet_rsk(req)->loc_addr; daddr = inet_rsk(req)->rmt_addr; } else { const struct iphdr *iph = ip_hdr(skb); saddr = iph->saddr; daddr = iph->daddr; } hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; desc = &hp->md5_desc; if (crypto_hash_init(desc)) goto clear_hash; if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) goto clear_hash; if (tcp_md5_hash_header(hp, th)) goto clear_hash; if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; if (crypto_hash_final(desc, md5_hash)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; } EXPORT_SYMBOL(tcp_v4_md5_hash_skb); static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) { /* * This gets called for each TCP segment that arrives * so we want to be efficient. * We have 3 drop cases: * o No MD5 hash and one expected. * o MD5 hash and we're not expecting one. * o MD5 hash and its wrong. */ const __u8 *hash_location = NULL; struct tcp_md5sig_key *hash_expected; const struct iphdr *iph = ip_hdr(skb); const struct tcphdr *th = tcp_hdr(skb); int genhash; unsigned char newhash[16]; hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr, AF_INET); hash_location = tcp_parse_md5sig_option(th); /* We've parsed the options - do we have a hash? */ if (!hash_expected && !hash_location) return 0; if (hash_expected && !hash_location) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); return 1; } if (!hash_expected && hash_location) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); return 1; } /* Okay, so this is hash_expected and hash_location - * so we need to calculate the checksum. */ genhash = tcp_v4_md5_hash_skb(newhash, hash_expected, NULL, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { if (net_ratelimit()) { pr_info("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", &iph->saddr, ntohs(th->source), &iph->daddr, ntohs(th->dest), genhash ? " tcp_v4_calc_md5_hash failed" : ""); } return 1; } return 0; } #endif struct request_sock_ops tcp_request_sock_ops __read_mostly = { .family = PF_INET, .obj_size = sizeof(struct tcp_request_sock), .rtx_syn_ack = tcp_v4_rtx_synack, .send_ack = tcp_v4_reqsk_send_ack, .destructor = tcp_v4_reqsk_destructor, .send_reset = tcp_v4_send_reset, .syn_ack_timeout = tcp_syn_ack_timeout, }; #ifdef CONFIG_TCP_MD5SIG static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { .md5_lookup = tcp_v4_reqsk_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, }; #endif int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { struct tcp_extend_values tmp_ext; struct tcp_options_received tmp_opt; const u8 *hash_location; struct request_sock *req; struct inet_request_sock *ireq; struct tcp_sock *tp = tcp_sk(sk); struct dst_entry *dst = NULL; __be32 saddr = ip_hdr(skb)->saddr; __be32 daddr = ip_hdr(skb)->daddr; __u32 isn = TCP_SKB_CB(skb)->when; int want_cookie = 0; /* Never answer to SYNs send to broadcast or multicast */ if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto drop; /* TW buckets are converted to open requests without * limitations, they conserve resources and peer is * evidently real one. */ if (inet_csk_reqsk_queue_is_full(sk) && !isn) { want_cookie = tcp_syn_flood_action(sk, skb, "TCP"); if (!want_cookie) goto drop; } /* Accept backlog is full. If we have already queued enough * of warm entries in syn queue, drop request. It is better than * clogging syn queue with openreqs with exponentially increasing * timeout. */ if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) goto drop; req = inet_reqsk_alloc(&tcp_request_sock_ops); if (!req) goto drop; #ifdef CONFIG_TCP_MD5SIG tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; #endif tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = TCP_MSS_DEFAULT; tmp_opt.user_mss = tp->rx_opt.user_mss; tcp_parse_options(skb, &tmp_opt, &hash_location, 0); if (tmp_opt.cookie_plus > 0 && tmp_opt.saw_tstamp && !tp->rx_opt.cookie_out_never && (sysctl_tcp_cookie_size > 0 || (tp->cookie_values != NULL && tp->cookie_values->cookie_desired > 0))) { u8 *c; u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS]; int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE; if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0) goto drop_and_release; /* Secret recipe starts with IP addresses */ *mess++ ^= (__force u32)daddr; *mess++ ^= (__force u32)saddr; /* plus variable length Initiator Cookie */ c = (u8 *)mess; while (l-- > 0) *c++ ^= *hash_location++; want_cookie = 0; /* not our kind of cookie */ tmp_ext.cookie_out_never = 0; /* false */ tmp_ext.cookie_plus = tmp_opt.cookie_plus; } else if (!tp->rx_opt.cookie_in_always) { /* redundant indications, but ensure initialization. */ tmp_ext.cookie_out_never = 1; /* true */ tmp_ext.cookie_plus = 0; } else { goto drop_and_release; } tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always; if (want_cookie && !tmp_opt.saw_tstamp) tcp_clear_options(&tmp_opt); tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; tcp_openreq_init(req, &tmp_opt, skb); ireq = inet_rsk(req); ireq->loc_addr = daddr; ireq->rmt_addr = saddr; ireq->no_srccheck = inet_sk(sk)->transparent; ireq->opt = tcp_v4_save_options(sk, skb); ireq->ir_mark = inet_request_mark(sk, skb); if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; if (!want_cookie || tmp_opt.tstamp_ok) TCP_ECN_create_request(req, tcp_hdr(skb)); if (want_cookie) { isn = cookie_v4_init_sequence(sk, skb, &req->mss); req->cookie_ts = tmp_opt.tstamp_ok; } else if (!isn) { struct inet_peer *peer = NULL; struct flowi4 fl4; /* VJ's idea. We save last timestamp seen * from the destination in peer table, when entering * state TIME-WAIT, and check against it before * accepting new connection request. * * If "isn" is not zero, this request hit alive * timewait bucket, so that all the necessary checks * are made in the function processing timewait state. */ if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle && (dst = inet_csk_route_req(sk, &fl4, req)) != NULL && fl4.daddr == saddr && (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) { inet_peer_refcheck(peer); if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && (s32)(peer->tcp_ts - req->ts_recent) > TCP_PAWS_WINDOW) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); goto drop_and_release; } } /* Kill the following clause, if you dislike this way. */ else if (!sysctl_tcp_syncookies && (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < (sysctl_max_syn_backlog >> 2)) && (!peer || !peer->tcp_ts_stamp) && (!dst || !dst_metric(dst, RTAX_RTT))) { /* Without syncookies last quarter of * backlog is filled with destinations, * proven to be alive. * It means that we continue to communicate * to destinations, already remembered * to the moment of synflood. */ LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"), &saddr, ntohs(tcp_hdr(skb)->source)); goto drop_and_release; } isn = tcp_v4_init_sequence(skb); } tcp_rsk(req)->snt_isn = isn; tcp_rsk(req)->snt_synack = tcp_time_stamp; if (tcp_v4_send_synack(sk, dst, req, (struct request_values *)&tmp_ext) || want_cookie) goto drop_and_free; inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); return 0; drop_and_release: dst_release(dst); drop_and_free: reqsk_free(req); drop: return 0; } EXPORT_SYMBOL(tcp_v4_conn_request); /* * The three way handshake has completed - we got a valid synack - * now create the new socket. */ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst) { struct inet_request_sock *ireq; struct inet_sock *newinet; struct tcp_sock *newtp; struct sock *newsk; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key; #endif struct ip_options_rcu *inet_opt; if (sk_acceptq_is_full(sk)) goto exit_overflow; newsk = tcp_create_openreq_child(sk, req, skb); if (!newsk) goto exit_nonewsk; newsk->sk_gso_type = SKB_GSO_TCPV4; newtp = tcp_sk(newsk); newinet = inet_sk(newsk); ireq = inet_rsk(req); newinet->inet_daddr = ireq->rmt_addr; newinet->inet_rcv_saddr = ireq->loc_addr; newinet->inet_saddr = ireq->loc_addr; inet_opt = ireq->opt; rcu_assign_pointer(newinet->inet_opt, inet_opt); ireq->opt = NULL; newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; newinet->rcv_tos = ip_hdr(skb)->tos; inet_csk(newsk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; newinet->inet_id = newtp->write_seq ^ jiffies; if (!dst) { dst = inet_csk_route_child_sock(sk, newsk, req); if (!dst) goto put_and_exit; } else { /* syncookie case : see end of cookie_v4_check() */ } sk_setup_caps(newsk, dst); tcp_mtup_init(newsk); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = dst_metric_advmss(dst); if (tcp_sk(sk)->rx_opt.user_mss && tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; tcp_initialize_rcv_mss(newsk); if (tcp_rsk(req)->snt_synack) tcp_valid_rtt_meas(newsk, tcp_time_stamp - tcp_rsk(req)->snt_synack); newtp->total_retrans = req->retrans; #ifdef CONFIG_TCP_MD5SIG /* Copy over the MD5 key from the original socket */ key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, AF_INET); if (key != NULL) { /* * We're using one, so create a matching key * on the newsk structure. If we fail to get * memory, then we end up not copying the key * across. Shucks. */ tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr, AF_INET, key->key, key->keylen, GFP_ATOMIC); sk_nocaps_add(newsk, NETIF_F_GSO_MASK); } #endif if (__inet_inherit_port(sk, newsk) < 0) goto put_and_exit; __inet_hash_nolisten(newsk, NULL); return newsk; exit_overflow: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); exit_nonewsk: dst_release(dst); exit: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return NULL; put_and_exit: tcp_clear_xmit_timers(newsk); tcp_cleanup_congestion_control(newsk); bh_unlock_sock(newsk); sock_put(newsk); goto exit; } EXPORT_SYMBOL(tcp_v4_syn_recv_sock); static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) { struct tcphdr *th = tcp_hdr(skb); const struct iphdr *iph = ip_hdr(skb); struct sock *nsk; struct request_sock **prev; /* Find possible connection requests. */ struct request_sock *req = inet_csk_search_req(sk, &prev, th->source, iph->saddr, iph->daddr); if (req) return tcp_check_req(sk, skb, req, prev); nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr, th->source, iph->daddr, th->dest, inet_iif(skb)); if (nsk) { if (nsk->sk_state != TCP_TIME_WAIT) { bh_lock_sock(nsk); return nsk; } inet_twsk_put(inet_twsk(nsk)); return NULL; } #ifdef CONFIG_SYN_COOKIES if (!th->syn) sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt)); #endif return sk; } static __sum16 tcp_v4_checksum_init(struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); if (skb->ip_summed == CHECKSUM_COMPLETE) { if (!tcp_v4_check(skb->len, iph->saddr, iph->daddr, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; return 0; } } skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->len, IPPROTO_TCP, 0); if (skb->len <= 76) { return __skb_checksum_complete(skb); } return 0; } /* The socket must have it's spinlock held when we get * here. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) { struct sock *rsk; #ifdef CONFIG_TCP_MD5SIG /* * We really want to reject the packet as early as possible * if: * o We're expecting an MD5'd packet and this is no MD5 tcp option * o There is an MD5 option and we're not expecting one */ if (tcp_v4_inbound_md5_hash(sk, skb)) goto discard; #endif if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ sock_rps_save_rxhash(sk, skb); if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { rsk = sk; goto reset; } return 0; } if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { struct sock *nsk = tcp_v4_hnd_req(sk, skb); if (!nsk) goto discard; if (nsk != sk) { sock_rps_save_rxhash(nsk, skb); if (tcp_child_process(sk, nsk, skb)) { rsk = nsk; goto reset; } return 0; } } else sock_rps_save_rxhash(sk, skb); if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { rsk = sk; goto reset; } return 0; reset: tcp_v4_send_reset(rsk, skb); discard: kfree_skb(skb); /* Be careful here. If this function gets more complicated and * gcc suffers from register pressure on the x86, sk (in %ebx) * might be destroyed here. This current version compiles correctly, * but you have been warned. */ return 0; csum_err: TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); goto discard; } EXPORT_SYMBOL(tcp_v4_do_rcv); /* * From tcp_input.c */ int tcp_v4_rcv(struct sk_buff *skb) { const struct iphdr *iph; const struct tcphdr *th; struct sock *sk; int ret; struct net *net = dev_net(skb->dev); if (skb->pkt_type != PACKET_HOST) goto discard_it; /* Count it even if it's bad */ TCP_INC_STATS_BH(net, TCP_MIB_INSEGS); if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; th = tcp_hdr(skb); if (th->doff < sizeof(struct tcphdr) / 4) goto bad_packet; if (!pskb_may_pull(skb, th->doff * 4)) goto discard_it; /* An explanation is required here, I think. * Packet length and doff are validated by header prediction, * provided case of th->doff==0 is eliminated. * So, we defer the checks. */ if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb)) goto bad_packet; th = tcp_hdr(skb); iph = ip_hdr(skb); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff * 4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->when = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); TCP_SKB_CB(skb)->sacked = 0; sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); if (!sk) goto no_tcp_socket; process: if (sk->sk_state == TCP_TIME_WAIT) goto do_time_wait; if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); goto discard_and_relse; } if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; nf_reset(skb); if (sk_filter(sk, skb)) goto discard_and_relse; skb->dev = NULL; bh_lock_sock_nested(sk); ret = 0; if (!sock_owned_by_user(sk)) { #ifdef CONFIG_NET_DMA struct tcp_sock *tp = tcp_sk(sk); if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) tp->ucopy.dma_chan = net_dma_find_channel(); if (tp->ucopy.dma_chan) ret = tcp_v4_do_rcv(sk, skb); else #endif { if (!tcp_prequeue(sk, skb)) ret = tcp_v4_do_rcv(sk, skb); } } else if (unlikely(sk_add_backlog(sk, skb))) { bh_unlock_sock(sk); NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); goto discard_and_relse; } bh_unlock_sock(sk); sock_put(sk); return ret; no_tcp_socket: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { bad_packet: TCP_INC_STATS_BH(net, TCP_MIB_INERRS); } else { tcp_v4_send_reset(NULL, skb); } discard_it: /* Discard frame. */ kfree_skb(skb); return 0; discard_and_relse: sock_put(sk); goto discard_it; do_time_wait: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { inet_twsk_put(inet_twsk(sk)); goto discard_it; } if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { TCP_INC_STATS_BH(net, TCP_MIB_INERRS); inet_twsk_put(inet_twsk(sk)); goto discard_it; } switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { case TCP_TW_SYN: { struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, iph->daddr, th->dest, inet_iif(skb)); if (sk2) { inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row); inet_twsk_put(inet_twsk(sk)); sk = sk2; goto process; } /* Fall through to ACK */ } case TCP_TW_ACK: tcp_v4_timewait_ack(sk, skb); break; case TCP_TW_RST: goto no_tcp_socket; case TCP_TW_SUCCESS:; } goto discard_it; } struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it) { struct rtable *rt = (struct rtable *) __sk_dst_get(sk); struct inet_sock *inet = inet_sk(sk); struct inet_peer *peer; if (!rt || inet->cork.fl.u.ip4.daddr != inet->inet_daddr) { peer = inet_getpeer_v4(inet->inet_daddr, 1); *release_it = true; } else { if (!rt->peer) rt_bind_peer(rt, inet->inet_daddr, 1); peer = rt->peer; *release_it = false; } return peer; } EXPORT_SYMBOL(tcp_v4_get_peer); void *tcp_v4_tw_get_peer(struct sock *sk) { const struct inet_timewait_sock *tw = inet_twsk(sk); return inet_getpeer_v4(tw->tw_daddr, 1); } EXPORT_SYMBOL(tcp_v4_tw_get_peer); static struct timewait_sock_ops tcp_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp_timewait_sock), .twsk_unique = tcp_twsk_unique, .twsk_destructor= tcp_twsk_destructor, .twsk_getpeer = tcp_v4_tw_get_peer, }; const struct inet_connection_sock_af_ops ipv4_specific = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .conn_request = tcp_v4_conn_request, .syn_recv_sock = tcp_v4_syn_recv_sock, .get_peer = tcp_v4_get_peer, .net_header_len = sizeof(struct iphdr), .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, .addr2sockaddr = inet_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in), .bind_conflict = inet_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ip_setsockopt, .compat_getsockopt = compat_ip_getsockopt, #endif }; EXPORT_SYMBOL(ipv4_specific); #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { .md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, .md5_parse = tcp_v4_parse_md5_keys, }; #endif /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ static int tcp_v4_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); skb_queue_head_init(&tp->out_of_order_queue); tcp_init_xmit_timers(sk); tcp_prequeue_init(tp); icsk->icsk_rto = TCP_TIMEOUT_INIT; tp->mdev = TCP_TIMEOUT_INIT; /* So many TCP implementations out there (incorrectly) count the * initial SYN frame in their delayed-ACK and congestion control * algorithms that we must have the following bandaid to talk * efficiently to them. -DaveM */ tp->snd_cwnd = TCP_INIT_CWND; /* See draft-stevens-tcpca-spec-01 for discussion of the * initialization of these values. */ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd_clamp = ~0; tp->mss_cache = TCP_MSS_DEFAULT; tp->reordering = sysctl_tcp_reordering; icsk->icsk_ca_ops = &tcp_init_congestion_ops; sk->sk_state = TCP_CLOSE; sk->sk_write_space = sk_stream_write_space; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); icsk->icsk_af_ops = &ipv4_specific; icsk->icsk_sync_mss = tcp_sync_mss; #ifdef CONFIG_TCP_MD5SIG tp->af_specific = &tcp_sock_ipv4_specific; #endif /* TCP Cookie Transactions */ if (sysctl_tcp_cookie_size > 0) { /* Default, cookies without s_data_payload. */ tp->cookie_values = kzalloc(sizeof(*tp->cookie_values), sk->sk_allocation); if (tp->cookie_values != NULL) kref_init(&tp->cookie_values->kref); } /* Presumed zeroed, in order of appearance: * cookie_in_always, cookie_out_never, * s_data_constant, s_data_in, s_data_out */ sk->sk_sndbuf = sysctl_tcp_wmem[1]; sk->sk_rcvbuf = sysctl_tcp_rmem[1]; local_bh_disable(); sock_update_memcg(sk); sk_sockets_allocated_inc(sk); local_bh_enable(); return 0; } void tcp_v4_destroy_sock(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tcp_clear_xmit_timers(sk); tcp_cleanup_congestion_control(sk); /* Cleanup up the write buffer. */ tcp_write_queue_purge(sk); /* Cleans up our, hopefully empty, out_of_order_queue. */ __skb_queue_purge(&tp->out_of_order_queue); #ifdef CONFIG_TCP_MD5SIG /* Clean up the MD5 key list, if any */ if (tp->md5sig_info) { tcp_clear_md5_list(sk); kfree_rcu(tp->md5sig_info, rcu); tp->md5sig_info = NULL; } #endif #ifdef CONFIG_NET_DMA /* Cleans up our sk_async_wait_queue */ __skb_queue_purge(&sk->sk_async_wait_queue); #endif /* Clean prequeue, it must be empty really */ __skb_queue_purge(&tp->ucopy.prequeue); /* Clean up a referenced TCP bind bucket. */ if (inet_csk(sk)->icsk_bind_hash) inet_put_port(sk); /* * If sendmsg cached page exists, toss it. */ if (sk->sk_sndmsg_page) { __free_page(sk->sk_sndmsg_page); sk->sk_sndmsg_page = NULL; } /* TCP Cookie Transactions */ if (tp->cookie_values != NULL) { kref_put(&tp->cookie_values->kref, tcp_cookie_values_release); tp->cookie_values = NULL; } sk_sockets_allocated_dec(sk); sock_release_memcg(sk); } EXPORT_SYMBOL(tcp_v4_destroy_sock); #ifdef CONFIG_PROC_FS /* Proc filesystem TCP sock list dumping. */ static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head) { return hlist_nulls_empty(head) ? NULL : list_entry(head->first, struct inet_timewait_sock, tw_node); } static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw) { return !is_a_nulls(tw->tw_node.next) ? hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL; } /* * Get next listener socket follow cur. If cur is NULL, get first socket * starting from bucket given in st->bucket; when st->bucket is zero the * very first socket in the hash table is returned. */ static void *listening_get_next(struct seq_file *seq, void *cur) { struct inet_connection_sock *icsk; struct hlist_nulls_node *node; struct sock *sk = cur; struct inet_listen_hashbucket *ilb; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); if (!sk) { ilb = &tcp_hashinfo.listening_hash[st->bucket]; spin_lock_bh(&ilb->lock); sk = sk_nulls_head(&ilb->head); st->offset = 0; goto get_sk; } ilb = &tcp_hashinfo.listening_hash[st->bucket]; ++st->num; ++st->offset; if (st->state == TCP_SEQ_STATE_OPENREQ) { struct request_sock *req = cur; icsk = inet_csk(st->syn_wait_sk); req = req->dl_next; while (1) { while (req) { if (req->rsk_ops->family == st->family) { cur = req; goto out; } req = req->dl_next; } if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries) break; get_req: req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket]; } sk = sk_nulls_next(st->syn_wait_sk); st->state = TCP_SEQ_STATE_LISTENING; read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); } else { icsk = inet_csk(sk); read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); if (reqsk_queue_len(&icsk->icsk_accept_queue)) goto start_req; read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); sk = sk_nulls_next(sk); } get_sk: sk_nulls_for_each_from(sk, node) { if (!net_eq(sock_net(sk), net)) continue; if (sk->sk_family == st->family) { cur = sk; goto out; } icsk = inet_csk(sk); read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); if (reqsk_queue_len(&icsk->icsk_accept_queue)) { start_req: st->uid = sock_i_uid(sk); st->syn_wait_sk = sk; st->state = TCP_SEQ_STATE_OPENREQ; st->sbucket = 0; goto get_req; } read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); } spin_unlock_bh(&ilb->lock); st->offset = 0; if (++st->bucket < INET_LHTABLE_SIZE) { ilb = &tcp_hashinfo.listening_hash[st->bucket]; spin_lock_bh(&ilb->lock); sk = sk_nulls_head(&ilb->head); goto get_sk; } cur = NULL; out: return cur; } static void *listening_get_idx(struct seq_file *seq, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc; st->bucket = 0; st->offset = 0; rc = listening_get_next(seq, NULL); while (rc && *pos) { rc = listening_get_next(seq, rc); --*pos; } return rc; } static inline int empty_bucket(struct tcp_iter_state *st) { return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) && hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain); } /* * Get first established socket starting from bucket given in st->bucket. * If st->bucket is zero, the very first socket in the hash is returned. */ static void *established_get_first(struct seq_file *seq) { struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); void *rc = NULL; st->offset = 0; for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { struct sock *sk; struct hlist_nulls_node *node; struct inet_timewait_sock *tw; spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); /* Lockless fast path for the common case of empty buckets */ if (empty_bucket(st)) continue; spin_lock_bh(lock); sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { if (sk->sk_family != st->family || !net_eq(sock_net(sk), net)) { continue; } rc = sk; goto out; } st->state = TCP_SEQ_STATE_TIME_WAIT; inet_twsk_for_each(tw, node, &tcp_hashinfo.ehash[st->bucket].twchain) { if (tw->tw_family != st->family || !net_eq(twsk_net(tw), net)) { continue; } rc = tw; goto out; } spin_unlock_bh(lock); st->state = TCP_SEQ_STATE_ESTABLISHED; } out: return rc; } static void *established_get_next(struct seq_file *seq, void *cur) { struct sock *sk = cur; struct inet_timewait_sock *tw; struct hlist_nulls_node *node; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); ++st->num; ++st->offset; if (st->state == TCP_SEQ_STATE_TIME_WAIT) { tw = cur; tw = tw_next(tw); get_tw: while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) { tw = tw_next(tw); } if (tw) { cur = tw; goto out; } spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); st->state = TCP_SEQ_STATE_ESTABLISHED; /* Look for next non empty bucket */ st->offset = 0; while (++st->bucket <= tcp_hashinfo.ehash_mask && empty_bucket(st)) ; if (st->bucket > tcp_hashinfo.ehash_mask) return NULL; spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain); } else sk = sk_nulls_next(sk); sk_nulls_for_each_from(sk, node) { if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) goto found; } st->state = TCP_SEQ_STATE_TIME_WAIT; tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain); goto get_tw; found: cur = sk; out: return cur; } static void *established_get_idx(struct seq_file *seq, loff_t pos) { struct tcp_iter_state *st = seq->private; void *rc; st->bucket = 0; rc = established_get_first(seq); while (rc && pos) { rc = established_get_next(seq, rc); --pos; } return rc; } static void *tcp_get_idx(struct seq_file *seq, loff_t pos) { void *rc; struct tcp_iter_state *st = seq->private; st->state = TCP_SEQ_STATE_LISTENING; rc = listening_get_idx(seq, &pos); if (!rc) { st->state = TCP_SEQ_STATE_ESTABLISHED; rc = established_get_idx(seq, pos); } return rc; } static void *tcp_seek_last_pos(struct seq_file *seq) { struct tcp_iter_state *st = seq->private; int offset = st->offset; int orig_num = st->num; void *rc = NULL; switch (st->state) { case TCP_SEQ_STATE_OPENREQ: case TCP_SEQ_STATE_LISTENING: if (st->bucket >= INET_LHTABLE_SIZE) break; st->state = TCP_SEQ_STATE_LISTENING; rc = listening_get_next(seq, NULL); while (offset-- && rc) rc = listening_get_next(seq, rc); if (rc) break; st->bucket = 0; /* Fallthrough */ case TCP_SEQ_STATE_ESTABLISHED: case TCP_SEQ_STATE_TIME_WAIT: st->state = TCP_SEQ_STATE_ESTABLISHED; if (st->bucket > tcp_hashinfo.ehash_mask) break; rc = established_get_first(seq); while (offset-- && rc) rc = established_get_next(seq, rc); } st->num = orig_num; return rc; } static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc; if (*pos && *pos == st->last_pos) { rc = tcp_seek_last_pos(seq); if (rc) goto out; } st->state = TCP_SEQ_STATE_LISTENING; st->num = 0; st->bucket = 0; st->offset = 0; rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; out: st->last_pos = *pos; return rc; } static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc = NULL; if (v == SEQ_START_TOKEN) { rc = tcp_get_idx(seq, 0); goto out; } switch (st->state) { case TCP_SEQ_STATE_OPENREQ: case TCP_SEQ_STATE_LISTENING: rc = listening_get_next(seq, v); if (!rc) { st->state = TCP_SEQ_STATE_ESTABLISHED; st->bucket = 0; st->offset = 0; rc = established_get_first(seq); } break; case TCP_SEQ_STATE_ESTABLISHED: case TCP_SEQ_STATE_TIME_WAIT: rc = established_get_next(seq, v); break; } out: ++*pos; st->last_pos = *pos; return rc; } static void tcp_seq_stop(struct seq_file *seq, void *v) { struct tcp_iter_state *st = seq->private; switch (st->state) { case TCP_SEQ_STATE_OPENREQ: if (v) { struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk); read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); } case TCP_SEQ_STATE_LISTENING: if (v != SEQ_START_TOKEN) spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock); break; case TCP_SEQ_STATE_TIME_WAIT: case TCP_SEQ_STATE_ESTABLISHED: if (v) spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); break; } } int tcp_seq_open(struct inode *inode, struct file *file) { struct tcp_seq_afinfo *afinfo = PDE(inode)->data; struct tcp_iter_state *s; int err; err = seq_open_net(inode, file, &afinfo->seq_ops, sizeof(struct tcp_iter_state)); if (err < 0) return err; s = ((struct seq_file *)file->private_data)->private; s->family = afinfo->family; s->last_pos = 0; return 0; } EXPORT_SYMBOL(tcp_seq_open); int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo) { int rc = 0; struct proc_dir_entry *p; afinfo->seq_ops.start = tcp_seq_start; afinfo->seq_ops.next = tcp_seq_next; afinfo->seq_ops.stop = tcp_seq_stop; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, afinfo->seq_fops, afinfo); if (!p) rc = -ENOMEM; return rc; } EXPORT_SYMBOL(tcp_proc_register); void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo) { proc_net_remove(net, afinfo->name); } EXPORT_SYMBOL(tcp_proc_unregister); static void get_openreq4(const struct sock *sk, const struct request_sock *req, struct seq_file *f, int i, int uid, int *len) { const struct inet_request_sock *ireq = inet_rsk(req); int ttd = req->expires - jiffies; seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n", i, ireq->loc_addr, ntohs(inet_sk(sk)->inet_sport), ireq->rmt_addr, ntohs(ireq->rmt_port), TCP_SYN_RECV, 0, 0, /* could print option size, but that is af dependent. */ 1, /* timers active (only the expire timer) */ jiffies_to_clock_t(ttd), req->retrans, uid, 0, /* non standard timer */ 0, /* open_requests have no inode */ atomic_read(&sk->sk_refcnt), req, len); } static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) { int timer_active; unsigned long timer_expires; const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_sock *inet = inet_sk(sk); __be32 dest = inet->inet_daddr; __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); int rx_queue; if (icsk->icsk_pending == ICSK_TIME_RETRANS) { timer_active = 1; timer_expires = icsk->icsk_timeout; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; timer_expires = icsk->icsk_timeout; } else if (timer_pending(&sk->sk_timer)) { timer_active = 2; timer_expires = sk->sk_timer.expires; } else { timer_active = 0; timer_expires = jiffies; } if (sk->sk_state == TCP_LISTEN) rx_queue = sk->sk_ack_backlog; else /* * because we dont lock socket, we might find a transient negative value */ rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n", i, src, srcp, dest, destp, sk->sk_state, tp->write_seq - tp->snd_una, rx_queue, timer_active, jiffies_to_clock_t(timer_expires - jiffies), icsk->icsk_retransmits, sock_i_uid(sk), icsk->icsk_probes_out, sock_i_ino(sk), atomic_read(&sk->sk_refcnt), sk, jiffies_to_clock_t(icsk->icsk_rto), jiffies_to_clock_t(icsk->icsk_ack.ato), (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, tp->snd_cwnd, tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh, len); } static void get_timewait4_sock(const struct inet_timewait_sock *tw, struct seq_file *f, int i, int *len) { __be32 dest, src; __u16 destp, srcp; int ttd = tw->tw_ttd - jiffies; if (ttd < 0) ttd = 0; dest = tw->tw_daddr; src = tw->tw_rcv_saddr; destp = ntohs(tw->tw_dport); srcp = ntohs(tw->tw_sport); seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n", i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, atomic_read(&tw->tw_refcnt), tw, len); } #define TMPSZ 150 static int tcp4_seq_show(struct seq_file *seq, void *v) { struct tcp_iter_state *st; int len; if (v == SEQ_START_TOKEN) { seq_printf(seq, "%-*s\n", TMPSZ - 1, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode"); goto out; } st = seq->private; switch (st->state) { case TCP_SEQ_STATE_LISTENING: case TCP_SEQ_STATE_ESTABLISHED: get_tcp4_sock(v, seq, st->num, &len); break; case TCP_SEQ_STATE_OPENREQ: get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len); break; case TCP_SEQ_STATE_TIME_WAIT: get_timewait4_sock(v, seq, st->num, &len); break; } seq_printf(seq, "%*s\n", TMPSZ - 1 - len, ""); out: return 0; } static const struct file_operations tcp_afinfo_seq_fops = { .owner = THIS_MODULE, .open = tcp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; static struct tcp_seq_afinfo tcp4_seq_afinfo = { .name = "tcp", .family = AF_INET, .seq_fops = &tcp_afinfo_seq_fops, .seq_ops = { .show = tcp4_seq_show, }, }; static int __net_init tcp4_proc_init_net(struct net *net) { return tcp_proc_register(net, &tcp4_seq_afinfo); } static void __net_exit tcp4_proc_exit_net(struct net *net) { tcp_proc_unregister(net, &tcp4_seq_afinfo); } static struct pernet_operations tcp4_net_ops = { .init = tcp4_proc_init_net, .exit = tcp4_proc_exit_net, }; int __init tcp4_proc_init(void) { return register_pernet_subsys(&tcp4_net_ops); } void tcp4_proc_exit(void) { unregister_pernet_subsys(&tcp4_net_ops); } #endif /* CONFIG_PROC_FS */ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) { const struct iphdr *iph = skb_gro_network_header(skb); switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; break; } /* fall through */ case CHECKSUM_NONE: NAPI_GRO_CB(skb)->flush = 1; return NULL; } return tcp_gro_receive(head, skb); } int tcp4_gro_complete(struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); struct tcphdr *th = tcp_hdr(skb); th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), iph->saddr, iph->daddr, 0); skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; return tcp_gro_complete(skb); } struct proto tcp_prot = { .name = "TCP", .owner = THIS_MODULE, .close = tcp_close, .connect = tcp_v4_connect, .disconnect = tcp_disconnect, .accept = inet_csk_accept, .ioctl = tcp_ioctl, .init = tcp_v4_init_sock, .destroy = tcp_v4_destroy_sock, .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, .sendpage = tcp_sendpage, .backlog_rcv = tcp_v4_do_rcv, .hash = inet_hash, .unhash = inet_unhash, .get_port = inet_csk_get_port, .enter_memory_pressure = tcp_enter_memory_pressure, .sockets_allocated = &tcp_sockets_allocated, .orphan_count = &tcp_orphan_count, .memory_allocated = &tcp_memory_allocated, .memory_pressure = &tcp_memory_pressure, .sysctl_wmem = sysctl_tcp_wmem, .sysctl_rmem = sysctl_tcp_rmem, .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .twsk_prot = &tcp_timewait_sock_ops, .rsk_prot = &tcp_request_sock_ops, .h.hashinfo = &tcp_hashinfo, .no_autobind = true, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, #endif #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM .init_cgroup = tcp_init_cgroup, .destroy_cgroup = tcp_destroy_cgroup, .proto_cgroup = tcp_proto_cgroup, #endif }; EXPORT_SYMBOL(tcp_prot); static int __net_init tcp_sk_init(struct net *net) { return inet_ctl_sock_create(&net->ipv4.tcp_sock, PF_INET, SOCK_RAW, IPPROTO_TCP, net); } static void __net_exit tcp_sk_exit(struct net *net) { inet_ctl_sock_destroy(net->ipv4.tcp_sock); } static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) { inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET); } static struct pernet_operations __net_initdata tcp_sk_ops = { .init = tcp_sk_init, .exit = tcp_sk_exit, .exit_batch = tcp_sk_exit_batch, }; void __init tcp_v4_init(void) { inet_hashinfo_init(&tcp_hashinfo); if (register_pernet_subsys(&tcp_sk_ops)) panic("Failed to create the TCP control socket.\n"); }
gpl-2.0
Ateeq72/hTC_Pico_Kernel
drivers/rtc/rtc-rx8025.c
108
17101
/* * Driver for Epson's RTC module RX-8025 SA/NB * * Copyright (C) 2009 Wolfgang Grandegger <wg@grandegger.com> * * Copyright (C) 2005 by Digi International Inc. * All rights reserved. * * Modified by fengjh at rising.com.cn * <http://lists.lm-sensors.org/mailman/listinfo/lm-sensors> * 2006.11 * * Code cleanup by Sergei Poselenov, <sposelenov@emcraft.com> * Converted to new style by Wolfgang Grandegger <wg@grandegger.com> * Alarm and periodic interrupt added by Dmitry Rakhchev <rda@emcraft.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/bcd.h> #include <linux/i2c.h> #include <linux/list.h> #include <linux/rtc.h> /* Register definitions */ #define RX8025_REG_SEC 0x00 #define RX8025_REG_MIN 0x01 #define RX8025_REG_HOUR 0x02 #define RX8025_REG_WDAY 0x03 #define RX8025_REG_MDAY 0x04 #define RX8025_REG_MONTH 0x05 #define RX8025_REG_YEAR 0x06 #define RX8025_REG_DIGOFF 0x07 #define RX8025_REG_ALWMIN 0x08 #define RX8025_REG_ALWHOUR 0x09 #define RX8025_REG_ALWWDAY 0x0a #define RX8025_REG_ALDMIN 0x0b #define RX8025_REG_ALDHOUR 0x0c /* 0x0d is reserved */ #define RX8025_REG_CTRL1 0x0e #define RX8025_REG_CTRL2 0x0f #define RX8025_BIT_CTRL1_CT (7 << 0) /* 1 Hz periodic level irq */ #define RX8025_BIT_CTRL1_CT_1HZ 4 #define RX8025_BIT_CTRL1_TEST (1 << 3) #define RX8025_BIT_CTRL1_1224 (1 << 5) #define RX8025_BIT_CTRL1_DALE (1 << 6) #define RX8025_BIT_CTRL1_WALE (1 << 7) #define RX8025_BIT_CTRL2_DAFG (1 << 0) #define RX8025_BIT_CTRL2_WAFG (1 << 1) #define RX8025_BIT_CTRL2_CTFG (1 << 2) #define RX8025_BIT_CTRL2_PON (1 << 4) #define RX8025_BIT_CTRL2_XST (1 << 5) #define RX8025_BIT_CTRL2_VDET (1 << 6) /* Clock precision adjustment */ #define RX8025_ADJ_RESOLUTION 3050 /* in ppb */ #define RX8025_ADJ_DATA_MAX 62 #define RX8025_ADJ_DATA_MIN -62 static const struct i2c_device_id rx8025_id[] = { { "rx8025", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, rx8025_id); struct rx8025_data { struct i2c_client *client; struct rtc_device *rtc; struct work_struct work; u8 ctrl1; unsigned exiting:1; }; static int rx8025_read_reg(struct i2c_client *client, int number, u8 *value) { int ret = i2c_smbus_read_byte_data(client, (number << 4) | 0x08); if (ret < 0) { dev_err(&client->dev, "Unable to read register #%d\n", number); return ret; } *value = ret; return 0; } static int rx8025_read_regs(struct i2c_client *client, int number, u8 length, u8 *values) { int ret = i2c_smbus_read_i2c_block_data(client, (number << 4) | 0x08, length, values); if (ret != length) { dev_err(&client->dev, "Unable to read registers #%d..#%d\n", number, number + length - 1); return ret < 0 ? ret : -EIO; } return 0; } static int rx8025_write_reg(struct i2c_client *client, int number, u8 value) { int ret = i2c_smbus_write_byte_data(client, number << 4, value); if (ret) dev_err(&client->dev, "Unable to write register #%d\n", number); return ret; } static int rx8025_write_regs(struct i2c_client *client, int number, u8 length, u8 *values) { int ret = i2c_smbus_write_i2c_block_data(client, (number << 4) | 0x08, length, values); if (ret) dev_err(&client->dev, "Unable to write registers #%d..#%d\n", number, number + length - 1); return ret; } static irqreturn_t rx8025_irq(int irq, void *dev_id) { struct i2c_client *client = dev_id; struct rx8025_data *rx8025 = i2c_get_clientdata(client); disable_irq_nosync(irq); schedule_work(&rx8025->work); return IRQ_HANDLED; } static void rx8025_work(struct work_struct *work) { struct rx8025_data *rx8025 = container_of(work, struct rx8025_data, work); struct i2c_client *client = rx8025->client; struct mutex *lock = &rx8025->rtc->ops_lock; u8 status; mutex_lock(lock); if (rx8025_read_reg(client, RX8025_REG_CTRL2, &status)) goto out; if (!(status & RX8025_BIT_CTRL2_XST)) dev_warn(&client->dev, "Oscillation stop was detected," "you may have to readjust the clock\n"); if (status & RX8025_BIT_CTRL2_CTFG) { /* periodic */ status &= ~RX8025_BIT_CTRL2_CTFG; local_irq_disable(); rtc_update_irq(rx8025->rtc, 1, RTC_PF | RTC_IRQF); local_irq_enable(); } if (status & RX8025_BIT_CTRL2_DAFG) { /* alarm */ status &= RX8025_BIT_CTRL2_DAFG; if (rx8025_write_reg(client, RX8025_REG_CTRL1, rx8025->ctrl1 & ~RX8025_BIT_CTRL1_DALE)) goto out; local_irq_disable(); rtc_update_irq(rx8025->rtc, 1, RTC_AF | RTC_IRQF); local_irq_enable(); } /* acknowledge IRQ */ rx8025_write_reg(client, RX8025_REG_CTRL2, status | RX8025_BIT_CTRL2_XST); out: if (!rx8025->exiting) enable_irq(client->irq); mutex_unlock(lock); } static int rx8025_get_time(struct device *dev, struct rtc_time *dt) { struct rx8025_data *rx8025 = dev_get_drvdata(dev); u8 date[7]; int err; err = rx8025_read_regs(rx8025->client, RX8025_REG_SEC, 7, date); if (err) return err; dev_dbg(dev, "%s: read 0x%02x 0x%02x " "0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", __func__, date[0], date[1], date[2], date[3], date[4], date[5], date[6]); dt->tm_sec = bcd2bin(date[RX8025_REG_SEC] & 0x7f); dt->tm_min = bcd2bin(date[RX8025_REG_MIN] & 0x7f); if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224) dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x3f); else dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x1f) % 12 + (date[RX8025_REG_HOUR] & 0x20 ? 12 : 0); dt->tm_mday = bcd2bin(date[RX8025_REG_MDAY] & 0x3f); dt->tm_mon = bcd2bin(date[RX8025_REG_MONTH] & 0x1f) - 1; dt->tm_year = bcd2bin(date[RX8025_REG_YEAR]); if (dt->tm_year < 70) dt->tm_year += 100; dev_dbg(dev, "%s: date %ds %dm %dh %dmd %dm %dy\n", __func__, dt->tm_sec, dt->tm_min, dt->tm_hour, dt->tm_mday, dt->tm_mon, dt->tm_year); return rtc_valid_tm(dt); } static int rx8025_set_time(struct device *dev, struct rtc_time *dt) { struct rx8025_data *rx8025 = dev_get_drvdata(dev); u8 date[7]; /* * BUG: The HW assumes every year that is a multiple of 4 to be a leap * year. Next time this is wrong is 2100, which will not be a leap * year. */ /* * Here the read-only bits are written as "0". I'm not sure if that * is sound. */ date[RX8025_REG_SEC] = bin2bcd(dt->tm_sec); date[RX8025_REG_MIN] = bin2bcd(dt->tm_min); if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224) date[RX8025_REG_HOUR] = bin2bcd(dt->tm_hour); else date[RX8025_REG_HOUR] = (dt->tm_hour >= 12 ? 0x20 : 0) | bin2bcd((dt->tm_hour + 11) % 12 + 1); date[RX8025_REG_WDAY] = bin2bcd(dt->tm_wday); date[RX8025_REG_MDAY] = bin2bcd(dt->tm_mday); date[RX8025_REG_MONTH] = bin2bcd(dt->tm_mon + 1); date[RX8025_REG_YEAR] = bin2bcd(dt->tm_year % 100); dev_dbg(dev, "%s: write 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", __func__, date[0], date[1], date[2], date[3], date[4], date[5], date[6]); return rx8025_write_regs(rx8025->client, RX8025_REG_SEC, 7, date); } static int rx8025_init_client(struct i2c_client *client, int *need_reset) { struct rx8025_data *rx8025 = i2c_get_clientdata(client); u8 ctrl[2], ctrl2; int need_clear = 0; int err; err = rx8025_read_regs(rx8025->client, RX8025_REG_CTRL1, 2, ctrl); if (err) goto out; /* Keep test bit zero ! */ rx8025->ctrl1 = ctrl[0] & ~RX8025_BIT_CTRL1_TEST; if (ctrl[1] & RX8025_BIT_CTRL2_PON) { dev_warn(&client->dev, "power-on reset was detected, " "you may have to readjust the clock\n"); *need_reset = 1; } if (ctrl[1] & RX8025_BIT_CTRL2_VDET) { dev_warn(&client->dev, "a power voltage drop was detected, " "you may have to readjust the clock\n"); *need_reset = 1; } if (!(ctrl[1] & RX8025_BIT_CTRL2_XST)) { dev_warn(&client->dev, "Oscillation stop was detected," "you may have to readjust the clock\n"); *need_reset = 1; } if (ctrl[1] & (RX8025_BIT_CTRL2_DAFG | RX8025_BIT_CTRL2_WAFG)) { dev_warn(&client->dev, "Alarm was detected\n"); need_clear = 1; } if (!(ctrl[1] & RX8025_BIT_CTRL2_CTFG)) need_clear = 1; if (*need_reset || need_clear) { ctrl2 = ctrl[0]; ctrl2 &= ~(RX8025_BIT_CTRL2_PON | RX8025_BIT_CTRL2_VDET | RX8025_BIT_CTRL2_CTFG | RX8025_BIT_CTRL2_WAFG | RX8025_BIT_CTRL2_DAFG); ctrl2 |= RX8025_BIT_CTRL2_XST; err = rx8025_write_reg(client, RX8025_REG_CTRL2, ctrl2); } out: return err; } /* Alarm support */ static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t) { struct rx8025_data *rx8025 = dev_get_drvdata(dev); struct i2c_client *client = rx8025->client; u8 ctrl2, ald[2]; int err; if (client->irq <= 0) return -EINVAL; err = rx8025_read_regs(client, RX8025_REG_ALDMIN, 2, ald); if (err) return err; err = rx8025_read_reg(client, RX8025_REG_CTRL2, &ctrl2); if (err) return err; dev_dbg(dev, "%s: read alarm 0x%02x 0x%02x ctrl2 %02x\n", __func__, ald[0], ald[1], ctrl2); /* Hardware alarms precision is 1 minute! */ t->time.tm_sec = 0; t->time.tm_min = bcd2bin(ald[0] & 0x7f); if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224) t->time.tm_hour = bcd2bin(ald[1] & 0x3f); else t->time.tm_hour = bcd2bin(ald[1] & 0x1f) % 12 + (ald[1] & 0x20 ? 12 : 0); t->time.tm_wday = -1; t->time.tm_mday = -1; t->time.tm_mon = -1; t->time.tm_year = -1; dev_dbg(dev, "%s: date: %ds %dm %dh %dmd %dm %dy\n", __func__, t->time.tm_sec, t->time.tm_min, t->time.tm_hour, t->time.tm_mday, t->time.tm_mon, t->time.tm_year); t->enabled = !!(rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE); t->pending = (ctrl2 & RX8025_BIT_CTRL2_DAFG) && t->enabled; return err; } static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t) { struct i2c_client *client = to_i2c_client(dev); struct rx8025_data *rx8025 = dev_get_drvdata(dev); u8 ald[2]; int err; if (client->irq <= 0) return -EINVAL; /* Hardware alarm precision is 1 minute! */ ald[0] = bin2bcd(t->time.tm_min); if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224) ald[1] = bin2bcd(t->time.tm_hour); else ald[1] = (t->time.tm_hour >= 12 ? 0x20 : 0) | bin2bcd((t->time.tm_hour + 11) % 12 + 1); dev_dbg(dev, "%s: write 0x%02x 0x%02x\n", __func__, ald[0], ald[1]); if (rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE) { rx8025->ctrl1 &= ~RX8025_BIT_CTRL1_DALE; err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1, rx8025->ctrl1); if (err) return err; } err = rx8025_write_regs(rx8025->client, RX8025_REG_ALDMIN, 2, ald); if (err) return err; if (t->enabled) { rx8025->ctrl1 |= RX8025_BIT_CTRL1_DALE; err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1, rx8025->ctrl1); if (err) return err; } return 0; } static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct rx8025_data *rx8025 = dev_get_drvdata(dev); u8 ctrl1; int err; ctrl1 = rx8025->ctrl1; if (enabled) ctrl1 |= RX8025_BIT_CTRL1_DALE; else ctrl1 &= ~RX8025_BIT_CTRL1_DALE; if (ctrl1 != rx8025->ctrl1) { rx8025->ctrl1 = ctrl1; err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1, rx8025->ctrl1); if (err) return err; } return 0; } static int rx8025_irq_set_state(struct device *dev, int enabled) { struct i2c_client *client = to_i2c_client(dev); struct rx8025_data *rx8025 = i2c_get_clientdata(client); int ctrl1; int err; if (client->irq <= 0) return -ENXIO; ctrl1 = rx8025->ctrl1 & ~RX8025_BIT_CTRL1_CT; if (enabled) ctrl1 |= RX8025_BIT_CTRL1_CT_1HZ; if (ctrl1 != rx8025->ctrl1) { rx8025->ctrl1 = ctrl1; err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1, rx8025->ctrl1); if (err) return err; } return 0; } static struct rtc_class_ops rx8025_rtc_ops = { .read_time = rx8025_get_time, .set_time = rx8025_set_time, .read_alarm = rx8025_read_alarm, .set_alarm = rx8025_set_alarm, .alarm_irq_enable = rx8025_alarm_irq_enable, .irq_set_state = rx8025_irq_set_state, }; /* * Clock precision adjustment support * * According to the RX8025 SA/NB application manual the frequency and * temperature characteristics can be approximated using the following * equation: * * df = a * (ut - t)**2 * * df: Frequency deviation in any temperature * a : Coefficient = (-35 +-5) * 10**-9 * ut: Ultimate temperature in degree = +25 +-5 degree * t : Any temperature in degree * * Note that the clock adjustment in ppb must be entered (which is * the negative value of the deviation). */ static int rx8025_get_clock_adjust(struct device *dev, int *adj) { struct i2c_client *client = to_i2c_client(dev); u8 digoff; int err; err = rx8025_read_reg(client, RX8025_REG_DIGOFF, &digoff); if (err) return err; *adj = digoff >= 64 ? digoff - 128 : digoff; if (*adj > 0) (*adj)--; *adj *= -RX8025_ADJ_RESOLUTION; return 0; } static int rx8025_set_clock_adjust(struct device *dev, int adj) { struct i2c_client *client = to_i2c_client(dev); u8 digoff; int err; adj /= -RX8025_ADJ_RESOLUTION; if (adj > RX8025_ADJ_DATA_MAX) adj = RX8025_ADJ_DATA_MAX; else if (adj < RX8025_ADJ_DATA_MIN) adj = RX8025_ADJ_DATA_MIN; else if (adj > 0) adj++; else if (adj < 0) adj += 128; digoff = adj; err = rx8025_write_reg(client, RX8025_REG_DIGOFF, digoff); if (err) return err; dev_dbg(dev, "%s: write 0x%02x\n", __func__, digoff); return 0; } static ssize_t rx8025_sysfs_show_clock_adjust(struct device *dev, struct device_attribute *attr, char *buf) { int err, adj; err = rx8025_get_clock_adjust(dev, &adj); if (err) return err; return sprintf(buf, "%d\n", adj); } static ssize_t rx8025_sysfs_store_clock_adjust(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int adj, err; if (sscanf(buf, "%i", &adj) != 1) return -EINVAL; err = rx8025_set_clock_adjust(dev, adj); return err ? err : count; } static DEVICE_ATTR(clock_adjust_ppb, S_IRUGO | S_IWUSR, rx8025_sysfs_show_clock_adjust, rx8025_sysfs_store_clock_adjust); static int rx8025_sysfs_register(struct device *dev) { return device_create_file(dev, &dev_attr_clock_adjust_ppb); } static void rx8025_sysfs_unregister(struct device *dev) { device_remove_file(dev, &dev_attr_clock_adjust_ppb); } static int __devinit rx8025_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct rx8025_data *rx8025; int err, need_reset = 0; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_I2C_BLOCK)) { dev_err(&adapter->dev, "doesn't support required functionality\n"); err = -EIO; goto errout; } rx8025 = kzalloc(sizeof(*rx8025), GFP_KERNEL); if (!rx8025) { dev_err(&adapter->dev, "failed to alloc memory\n"); err = -ENOMEM; goto errout; } rx8025->client = client; i2c_set_clientdata(client, rx8025); INIT_WORK(&rx8025->work, rx8025_work); err = rx8025_init_client(client, &need_reset); if (err) goto errout_free; if (need_reset) { struct rtc_time tm; dev_info(&client->dev, "bad conditions detected, resetting date\n"); rtc_time_to_tm(0, &tm); /* 1970/1/1 */ rx8025_set_time(&client->dev, &tm); } rx8025->rtc = rtc_device_register(client->name, &client->dev, &rx8025_rtc_ops, THIS_MODULE); if (IS_ERR(rx8025->rtc)) { err = PTR_ERR(rx8025->rtc); dev_err(&client->dev, "unable to register the class device\n"); goto errout_free; } if (client->irq > 0) { dev_info(&client->dev, "IRQ %d supplied\n", client->irq); err = request_irq(client->irq, rx8025_irq, 0, "rx8025", client); if (err) { dev_err(&client->dev, "unable to request IRQ\n"); goto errout_reg; } } rx8025->rtc->irq_freq = 1; rx8025->rtc->max_user_freq = 1; err = rx8025_sysfs_register(&client->dev); if (err) goto errout_irq; return 0; errout_irq: if (client->irq > 0) free_irq(client->irq, client); errout_reg: rtc_device_unregister(rx8025->rtc); errout_free: kfree(rx8025); errout: dev_err(&adapter->dev, "probing for rx8025 failed\n"); return err; } static int __devexit rx8025_remove(struct i2c_client *client) { struct rx8025_data *rx8025 = i2c_get_clientdata(client); struct mutex *lock = &rx8025->rtc->ops_lock; if (client->irq > 0) { mutex_lock(lock); rx8025->exiting = 1; mutex_unlock(lock); free_irq(client->irq, client); cancel_work_sync(&rx8025->work); } rx8025_sysfs_unregister(&client->dev); rtc_device_unregister(rx8025->rtc); kfree(rx8025); return 0; } static struct i2c_driver rx8025_driver = { .driver = { .name = "rtc-rx8025", .owner = THIS_MODULE, }, .probe = rx8025_probe, .remove = __devexit_p(rx8025_remove), .id_table = rx8025_id, }; static int __init rx8025_init(void) { return i2c_add_driver(&rx8025_driver); } static void __exit rx8025_exit(void) { i2c_del_driver(&rx8025_driver); } MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); MODULE_DESCRIPTION("RX-8025 SA/NB RTC driver"); MODULE_LICENSE("GPL"); module_init(rx8025_init); module_exit(rx8025_exit);
gpl-2.0
idl3r/P8000-Kernel
drivers/misc/mediatek/imgsensor/src/mt6753/s5k2p8_mipi_raw/s5k2p8_otp.c
108
4868
#include <linux/videodev2.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/cdev.h> #include <linux/uaccess.h> #include <linux/fs.h> #include <asm/atomic.h> #include <linux/slab.h> #include "kd_camera_hw.h" #include "kd_imgsensor.h" #include "kd_imgsensor_define.h" #include "kd_imgsensor_errcode.h" extern int iReadRegI2C(u8 *a_pSendData , u16 a_sizeSendData, u8 * a_pRecvData, u16 a_sizeRecvData, u16 i2cId); extern int iWriteRegI2C(u8 *a_pSendData , u16 a_sizeSendData, u16 i2cId); extern void kdSetI2CSpeed(u16 i2cSpeed); //extern int iBurstWriteReg_multi(u8 *pData, u32 bytes, u16 i2cId, u16 transfer_length); extern int iMultiReadReg(u16 a_u2Addr , u8 * a_puBuff , u16 i2cId, u8 number); #define USHORT unsigned short #define BYTE unsigned char #define Sleep(ms) mdelay(ms) #define EEPROM CAT24C512 #define EEPROM_READ_ID 0xA0 #define EEPROM_WRITE_ID 0xA1 #define I2C_SPEED 400 //CAT24C512 can support 1Mhz #define START_OFFSET 0 #define PAGE_NUM 512 #define EEPROM_PAGE_SIZE 128 //EEPROM size 512x128=65536bytes #define MAX_OFFSET 0xffff #define DATA_SIZE 4096 BYTE eeprom_data[DATA_SIZE]= {0}; static bool get_done = false; static int last_size = 0; static int last_offset = 0; bool byte_write_eeprom(kal_uint16 addr, BYTE data ) { char pu_send_cmd[3] = {(char)(addr >> 8), (char)(addr & 0xFF), (char)(data & 0xFF)}; if(addr > MAX_OFFSET) return false; kdSetI2CSpeed(I2C_SPEED); if(iWriteRegI2C(pu_send_cmd, 3, EEPROM_WRITE_ID)<0) { //printk("byte_write_eeprom fail, addr %x data %d\n",addr,data); return false; } Sleep(7); return true; } /******** Be noted that once your addr are not page-algned, some data may be covered */ bool page_write_eeprom(kal_uint16 addr, BYTE data[], kal_uint32 size) { char pu_send_cmd[EEPROM_PAGE_SIZE+2]; int i = 0; if( (addr+size) > MAX_OFFSET || size > EEPROM_PAGE_SIZE) return false; kdSetI2CSpeed(I2C_SPEED); pu_send_cmd[0] = (char)(addr >> 8); pu_send_cmd[1] = (char)(addr & 0xFF); for(i = 0; i< size; i++) { pu_send_cmd[i+2] = (char)(data[i] & 0xFF); } printk("before iBurstWriteReg_multi\n"); if(1)//iBurstWriteReg_multi(pu_send_cmd , size, EEPROM_WRITE_ID, size)<0) //only support in K2 now return false; Sleep(10); return true; } bool selective_read_eeprom(kal_uint16 addr, BYTE* data) { char pu_send_cmd[2] = {(char)(addr >> 8) , (char)(addr & 0xFF) }; if(addr > MAX_OFFSET) return false; kdSetI2CSpeed(I2C_SPEED); if(iReadRegI2C(pu_send_cmd, 2, (u8*)data, 1, EEPROM_READ_ID)<0) return false; return true; } bool sequential_read_eeprom(kal_uint16 addr, BYTE* data, kal_uint32 size) { char pu_send_cmd[2] = {(char)(addr >> 8) , (char)(addr & 0xFF) }; if( (addr+size) > MAX_OFFSET || size > EEPROM_PAGE_SIZE) return false; kdSetI2CSpeed(I2C_SPEED); if( iMultiReadReg(addr , (u8*)data , EEPROM_READ_ID, size) <0) return false; return true; } static bool _wrtie_eeprom(kal_uint16 addr, BYTE data[], kal_uint32 size ){ int i = 0; int offset = addr; for(i = 0; i < size; i++) { //printk("wrtie_eeprom 0x%0x %d\n",offset, data[i]); if(!byte_write_eeprom( offset, data[i])){ return false; } offset++; } get_done = false; return true; } static bool _read_eeprom(kal_uint16 addr, BYTE* data, kal_uint32 size ){ int i = 0; int offset = addr; for(i = 0; i < size; i++) { if(!selective_read_eeprom(offset, &data[i])){ return false; } //printk("read_eeprom 0x%0x %d\n",offset, data[i]); offset++; } get_done = true; last_size = size; last_offset = addr; return true; } bool read_eeprom( kal_uint16 addr, BYTE* data, kal_uint32 size){ if(!get_done || last_size != size || last_offset != addr) { if(!_read_eeprom(addr, eeprom_data, size)){ get_done = 0; last_size = 0; last_offset = 0; return false; } } memcpy(data, eeprom_data, size); return true; } bool wrtie_eeprom(kal_uint16 addr, BYTE data[],kal_uint32 size ){ return _wrtie_eeprom(addr, data, size); } bool wrtie_eeprom_fast(kal_uint16 addr, BYTE data[],kal_uint32 size ){ bool ret = false; int size_to_send = size; printk("wrtie_eeprom_fast\n"); if( (addr&0xff) == 0 ){//align page #if 0 if(size < EEPROM_PAGE_SIZE+1) { ret = page_write_eeprom(addr, data, size); } else #endif { printk("before page_write_eeprom\n"); for(; size_to_send > 0; size_to_send -= EEPROM_PAGE_SIZE) { ret = page_write_eeprom( addr, data, size_to_send > EEPROM_PAGE_SIZE ? EEPROM_PAGE_SIZE : size_to_send); if(!ret) { break; } data+=EEPROM_PAGE_SIZE; printk("after page_write_eeprom %d\n",size_to_send); } printk("after page_write_eeprom\n"); } } else { ret = _wrtie_eeprom(addr, data, size); } return ret; }
gpl-2.0
arn4v/yu_msm8916
drivers/mtd/ubi/fastmap.c
364
43258
/* * Copyright (c) 2012 Linutronix GmbH * Copyright (c) 2014, Linux Foundation. All rights reserved. * * Author: Richard Weinberger <richard@nod.at> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * */ #include <linux/crc32.h> #include "ubi.h" /** * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device. * @ubi: UBI device description object */ size_t ubi_calc_fm_size(struct ubi_device *ubi) { size_t size; size = sizeof(struct ubi_fm_hdr) + \ sizeof(struct ubi_fm_scan_pool) + \ sizeof(struct ubi_fm_scan_pool) + \ (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \ (sizeof(struct ubi_fm_eba) + \ (ubi->peb_count * sizeof(__be32)) + \ (ubi->peb_count * sizeof(__be32))) + \ sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; return roundup(size, ubi->leb_size); } /** * new_fm_vhdr - allocate a new volume header for fastmap usage. * @ubi: UBI device description object * @vol_id: the VID of the new header * * Returns a new struct ubi_vid_hdr on success. * NULL indicates out of memory. */ static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id) { struct ubi_vid_hdr *new; new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); if (!new) goto out; new->vol_type = UBI_VID_DYNAMIC; new->vol_id = cpu_to_be32(vol_id); /* UBI implementations without fastmap support have to delete the * fastmap. */ new->compat = UBI_COMPAT_DELETE; out: return new; } /** * add_aeb - create and add a attach erase block to a given list. * @ai: UBI attach info object * @list: the target list * @pnum: PEB number of the new attach erase block * @ec: erease counter of the new LEB * @last_erase_time: last erase time stamp (%UBI_UNKNOWN if it * is unknown) * @rc: read counter (%UBI_UNKNOWN if it is unknown) * @scrub: scrub this PEB after attaching * * Returns 0 on success, < 0 indicates an internal error. */ static int add_aeb(struct ubi_attach_info *ai, struct list_head *list, int pnum, int ec, unsigned long last_erase_time, int rc, int scrub) { struct ubi_ainf_peb *aeb; aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); if (!aeb) return -ENOMEM; aeb->pnum = pnum; aeb->ec = ec; aeb->rc = rc; aeb->last_erase_time = last_erase_time; aeb->lnum = -1; aeb->scrub = scrub; aeb->copy_flag = aeb->sqnum = 0; ai->ec_sum += aeb->ec; ai->ec_count++; if (ai->max_ec < aeb->ec) ai->max_ec = aeb->ec; if (ai->min_ec > aeb->ec) ai->min_ec = aeb->ec; ai->last_erase_time_sum += aeb->last_erase_time; ai->last_erase_time_count++; list_add_tail(&aeb->u.list, list); return 0; } /** * add_vol - create and add a new volume to ubi_attach_info. * @ai: ubi_attach_info object * @vol_id: VID of the new volume * @used_ebs: number of used EBS * @data_pad: data padding value of the new volume * @vol_type: volume type * @last_eb_bytes: number of bytes in the last LEB * * Returns the new struct ubi_ainf_volume on success. * NULL indicates an error. */ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id, int used_ebs, int data_pad, u8 vol_type, int last_eb_bytes) { struct ubi_ainf_volume *av; struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; while (*p) { parent = *p; av = rb_entry(parent, struct ubi_ainf_volume, rb); if (vol_id > av->vol_id) p = &(*p)->rb_left; else if (vol_id > av->vol_id) p = &(*p)->rb_right; } av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL); if (!av) goto out; av->highest_lnum = av->leb_count = 0; av->vol_id = vol_id; av->used_ebs = used_ebs; av->data_pad = data_pad; av->last_data_size = last_eb_bytes; av->compat = 0; av->vol_type = vol_type; av->root = RB_ROOT; dbg_bld("found volume (ID %i)", vol_id); rb_link_node(&av->rb, parent, p); rb_insert_color(&av->rb, &ai->volumes); out: return av; } /** * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it * from it's original list. * @ai: ubi_attach_info object * @aeb: the to be assigned SEB * @av: target scan volume */ static void assign_aeb_to_av(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb, struct ubi_ainf_volume *av) { struct ubi_ainf_peb *tmp_aeb; struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; p = &av->root.rb_node; while (*p) { parent = *p; tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); if (aeb->lnum != tmp_aeb->lnum) { if (aeb->lnum < tmp_aeb->lnum) p = &(*p)->rb_left; else p = &(*p)->rb_right; continue; } else break; } list_del(&aeb->u.list); av->leb_count++; rb_link_node(&aeb->u.rb, parent, p); rb_insert_color(&aeb->u.rb, &av->root); } /** * update_vol - inserts or updates a LEB which was found a pool. * @ubi: the UBI device object * @ai: attach info object * @av: the volume this LEB belongs to * @new_vh: the volume header derived from new_aeb * @new_aeb: the AEB to be examined * * Returns 0 on success, < 0 indicates an internal error. */ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh, struct ubi_ainf_peb *new_aeb) { struct rb_node **p = &av->root.rb_node, *parent = NULL; struct ubi_ainf_peb *aeb, *victim; int cmp_res; while (*p) { parent = *p; aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); if (be32_to_cpu(new_vh->lnum) != aeb->lnum) { if (be32_to_cpu(new_vh->lnum) < aeb->lnum) p = &(*p)->rb_left; else p = &(*p)->rb_right; continue; } /* This case can happen if the fastmap gets written * because of a volume change (creation, deletion, ..). * Then a PEB can be within the persistent EBA and the pool. */ if (aeb->pnum == new_aeb->pnum) { ubi_assert(aeb->lnum == new_aeb->lnum); kmem_cache_free(ai->aeb_slab_cache, new_aeb); return 0; } cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh); if (cmp_res < 0) return cmp_res; /* new_aeb is newer */ if (cmp_res & 1) { victim = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); if (!victim) return -ENOMEM; victim->ec = aeb->ec; victim->last_erase_time = aeb->last_erase_time; victim->rc = aeb->rc; victim->pnum = aeb->pnum; list_add_tail(&victim->u.list, &ai->erase); if (av->highest_lnum == be32_to_cpu(new_vh->lnum)) av->last_data_size = \ be32_to_cpu(new_vh->data_size); dbg_bld("vol %i: AEB %i's PEB %i is the newer", av->vol_id, aeb->lnum, new_aeb->pnum); aeb->ec = new_aeb->ec; aeb->last_erase_time = new_aeb->last_erase_time; aeb->rc = new_aeb->rc; aeb->pnum = new_aeb->pnum; aeb->copy_flag = new_vh->copy_flag; aeb->scrub = new_aeb->scrub; kmem_cache_free(ai->aeb_slab_cache, new_aeb); /* new_aeb is older */ } else { dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it", av->vol_id, aeb->lnum, new_aeb->pnum); list_add_tail(&new_aeb->u.list, &ai->erase); } return 0; } /* This LEB is new, last_erase_time's add it to the volume */ if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) { av->highest_lnum = be32_to_cpu(new_vh->lnum); av->last_data_size = be32_to_cpu(new_vh->data_size); } if (av->vol_type == UBI_STATIC_VOLUME) av->used_ebs = be32_to_cpu(new_vh->used_ebs); av->leb_count++; rb_link_node(&new_aeb->u.rb, parent, p); rb_insert_color(&new_aeb->u.rb, &av->root); return 0; } /** * process_pool_aeb - we found a non-empty PEB in a pool. * @ubi: UBI device object * @ai: attach info object * @new_vh: the volume header derived from new_aeb * @new_aeb: the AEB to be examined * * Returns 0 on success, < 0 indicates an internal error. */ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai, struct ubi_vid_hdr *new_vh, struct ubi_ainf_peb *new_aeb) { struct ubi_ainf_volume *av, *tmp_av = NULL; struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; int found = 0; if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID || be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) { kmem_cache_free(ai->aeb_slab_cache, new_aeb); return 0; } /* Find the volume this SEB belongs to */ while (*p) { parent = *p; tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb); if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id) p = &(*p)->rb_left; else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id) p = &(*p)->rb_right; else { found = 1; break; } } if (found) av = tmp_av; else { ubi_err(ubi->ubi_num, "orphaned volume in fastmap pool!"); return UBI_BAD_FASTMAP; } ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id); return update_vol(ubi, ai, av, new_vh, new_aeb); } /** * unmap_peb - unmap a PEB. * If fastmap detects a free PEB in the pool it has to check whether * this PEB has been unmapped after writing the fastmap. * * @ai: UBI attach info object * @pnum: The PEB to be unmapped */ static void unmap_peb(struct ubi_attach_info *ai, int pnum) { struct ubi_ainf_volume *av; struct rb_node *node, *node2; struct ubi_ainf_peb *aeb; for (node = rb_first(&ai->volumes); node; node = rb_next(node)) { av = rb_entry(node, struct ubi_ainf_volume, rb); for (node2 = rb_first(&av->root); node2; node2 = rb_next(node2)) { aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb); if (aeb->pnum == pnum) { rb_erase(&aeb->u.rb, &av->root); kmem_cache_free(ai->aeb_slab_cache, aeb); return; } } } } /** * scan_pool - scans a pool for changed (no longer empty PEBs). * @ubi: UBI device object * @ai: attach info object * @pebs: an array of all PEB numbers in the to be scanned pool * @pool_size: size of the pool (number of entries in @pebs) * @max_sqnum: pointer to the maximal sequence number * @eba_orphans: list of PEBs which need to be scanned * @free: list of PEBs which are most likely free (and go into @ai->free) * * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned. * < 0 indicates an internal error. */ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, int *pebs, int pool_size, unsigned long long *max_sqnum, struct list_head *eba_orphans, struct list_head *free) { struct ubi_vid_hdr *vh; struct ubi_ec_hdr *ech; struct ubi_ainf_peb *new_aeb, *tmp_aeb; int i, pnum, err, found_orphan, ret = 0; ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ech) return -ENOMEM; vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); if (!vh) { kfree(ech); return -ENOMEM; } dbg_bld("scanning fastmap pool: size = %i", pool_size); /* * Now scan all PEBs in the pool to find changes which have been made * after the creation of the fastmap */ for (i = 0; i < pool_size; i++) { int scrub = 0; int image_seq; pnum = be32_to_cpu(pebs[i]); if (ubi_io_is_bad(ubi, pnum)) { ubi_err(ubi->ubi_num, "bad PEB in fastmap pool!"); ret = UBI_BAD_FASTMAP; goto out; } err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); if (err && err != UBI_IO_BITFLIPS) { ubi_err(ubi->ubi_num, "unable to read EC header! PEB:%i err:%i", pnum, err); ret = err > 0 ? UBI_BAD_FASTMAP : err; goto out; } else if (ret == UBI_IO_BITFLIPS) scrub = 1; /* * Older UBI implementations have image_seq set to zero, so * we shouldn't fail if image_seq == 0. */ image_seq = be32_to_cpu(ech->image_seq); if (image_seq && (image_seq != ubi->image_seq)) { ubi_err(ubi->ubi_num, "bad image seq: 0x%x, expected: 0x%x", be32_to_cpu(ech->image_seq), ubi->image_seq); ret = UBI_BAD_FASTMAP; goto out; } err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) { unsigned long long ec = be64_to_cpu(ech->ec); unsigned long long last_erase_time = be64_to_cpu(ech->last_erase_time); unmap_peb(ai, pnum); dbg_bld("Adding PEB to free: %i", pnum); if (err == UBI_IO_FF_BITFLIPS) add_aeb(ai, free, pnum, ec, last_erase_time, 0, 1); else add_aeb(ai, free, pnum, ec, last_erase_time, 0, 0); continue; } else if (err == 0 || err == UBI_IO_BITFLIPS) { dbg_bld("Found non empty PEB:%i in pool", pnum); if (err == UBI_IO_BITFLIPS) scrub = 1; found_orphan = 0; list_for_each_entry(tmp_aeb, eba_orphans, u.list) { if (tmp_aeb->pnum == pnum) { found_orphan = 1; break; } } if (found_orphan) { kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); list_del(&tmp_aeb->u.list); } new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); if (!new_aeb) { ret = -ENOMEM; goto out; } new_aeb->ec = be64_to_cpu(ech->ec); new_aeb->last_erase_time = be64_to_cpu(ech->last_erase_time); new_aeb->rc = UBI_DEF_RD_THRESHOLD; new_aeb->pnum = pnum; new_aeb->lnum = be32_to_cpu(vh->lnum); new_aeb->sqnum = be64_to_cpu(vh->sqnum); new_aeb->copy_flag = vh->copy_flag; new_aeb->scrub = scrub; if (*max_sqnum < new_aeb->sqnum) *max_sqnum = new_aeb->sqnum; err = process_pool_aeb(ubi, ai, vh, new_aeb); if (err) { ret = err > 0 ? UBI_BAD_FASTMAP : err; goto out; } } else { /* We are paranoid and fall back to scanning mode */ ubi_err(ubi->ubi_num, "fastmap pool PEBs contains damaged PEBs!"); ret = err > 0 ? UBI_BAD_FASTMAP : err; goto out; } } out: ubi_free_vid_hdr(ubi, vh); kfree(ech); return ret; } /** * count_fastmap_pebs - Counts the PEBs found by fastmap. * @ai: The UBI attach info object */ static int count_fastmap_pebs(struct ubi_attach_info *ai) { struct ubi_ainf_peb *aeb; struct ubi_ainf_volume *av; struct rb_node *rb1, *rb2; int n = 0; list_for_each_entry(aeb, &ai->erase, u.list) n++; list_for_each_entry(aeb, &ai->free, u.list) n++; ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) n++; return n; } /** * ubi_attach_fastmap - creates ubi_attach_info from a fastmap. * @ubi: UBI device object * @ai: UBI attach info object * @fm: the fastmap to be attached * * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable. * < 0 indicates an internal error. */ static int ubi_attach_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, struct ubi_fastmap_layout *fm) { struct list_head used, eba_orphans, free; struct ubi_ainf_volume *av; struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb; struct ubi_ec_hdr *ech; struct ubi_fm_sb *fmsb; struct ubi_fm_hdr *fmhdr; struct ubi_fm_scan_pool *fmpl1, *fmpl2; struct ubi_fm_ec *fmec; struct ubi_fm_volhdr *fmvhdr; struct ubi_fm_eba *fm_eba; int ret, i, j, pool_size, wl_pool_size; size_t fm_pos = 0, fm_size = ubi->fm_size; unsigned long long max_sqnum = 0; void *fm_raw = ubi->fm_buf; INIT_LIST_HEAD(&used); INIT_LIST_HEAD(&free); INIT_LIST_HEAD(&eba_orphans); INIT_LIST_HEAD(&ai->corr); INIT_LIST_HEAD(&ai->free); INIT_LIST_HEAD(&ai->erase); INIT_LIST_HEAD(&ai->alien); ai->volumes = RB_ROOT; ai->min_ec = UBI_MAX_ERASECOUNTER; ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab", sizeof(struct ubi_ainf_peb), 0, 0, NULL); if (!ai->aeb_slab_cache) { ret = -ENOMEM; goto fail; } fmsb = (struct ubi_fm_sb *)(fm_raw); ai->max_sqnum = fmsb->sqnum; fm_pos += sizeof(struct ubi_fm_sb); if (fm_pos >= fm_size) goto fail_bad; fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos); fm_pos += sizeof(*fmhdr); if (fm_pos >= fm_size) goto fail_bad; if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) { ubi_err(ubi->ubi_num, "bad fastmap header magic: 0x%x, expected: 0x%x", be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC); goto fail_bad; } fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); fm_pos += sizeof(*fmpl1); if (fm_pos >= fm_size) goto fail_bad; if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) { ubi_err(ubi->ubi_num, "bad fastmap pool magic: 0x%x, expected: 0x%x", be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC); goto fail_bad; } fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); fm_pos += sizeof(*fmpl2); if (fm_pos >= fm_size) goto fail_bad; if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) { ubi_err(ubi->ubi_num, "bad fastmap pool magic: 0x%x, expected: 0x%x", be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC); goto fail_bad; } pool_size = be16_to_cpu(fmpl1->size); wl_pool_size = be16_to_cpu(fmpl2->size); fm->max_pool_size = be16_to_cpu(fmpl1->max_size); fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size); if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { ubi_err(ubi->ubi_num, "bad pool size: %i", pool_size); goto fail_bad; } if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) { ubi_err(ubi->ubi_num, "bad WL pool size: %i", wl_pool_size); goto fail_bad; } if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE || fm->max_pool_size < 0) { ubi_err(ubi->ubi_num, "bad maximal pool size: %i", fm->max_pool_size); goto fail_bad; } if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE || fm->max_wl_pool_size < 0) { ubi_err(ubi->ubi_num, "bad maximal WL pool size: %i", fm->max_wl_pool_size); goto fail_bad; } /* read EC values from free list */ for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) { fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fm_pos += sizeof(*fmec); if (fm_pos >= fm_size) goto fail_bad; add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum), be32_to_cpu(fmec->ec), be64_to_cpu(fmec->last_erase_time), be32_to_cpu(fmec->rc), 0); } /* read EC values from used list */ for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) { fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fm_pos += sizeof(*fmec); if (fm_pos >= fm_size) goto fail_bad; add_aeb(ai, &used, be32_to_cpu(fmec->pnum), be32_to_cpu(fmec->ec), be64_to_cpu(fmec->last_erase_time), be32_to_cpu(fmec->rc), 0); } /* read EC values from scrub list */ for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) { fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fm_pos += sizeof(*fmec); if (fm_pos >= fm_size) goto fail_bad; add_aeb(ai, &used, be32_to_cpu(fmec->pnum), be32_to_cpu(fmec->ec), be64_to_cpu(fmec->last_erase_time), be32_to_cpu(fmec->rc), 1); } /* read EC values from erase list */ for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) { fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fm_pos += sizeof(*fmec); if (fm_pos >= fm_size) goto fail_bad; add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum), be32_to_cpu(fmec->ec), be64_to_cpu(fmec->last_erase_time), be32_to_cpu(fmec->rc), 1); } ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); ai->mean_last_erase_time = div_u64(ai->last_erase_time_sum, ai->last_erase_time_count); ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count); /* Iterate over all volumes and read their EBA table */ for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) { fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); fm_pos += sizeof(*fmvhdr); if (fm_pos >= fm_size) goto fail_bad; if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) { ubi_err(ubi->ubi_num, "bad fastmap vol header magic: 0x%x, expected: 0x%x", be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC); goto fail_bad; } av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id), be32_to_cpu(fmvhdr->used_ebs), be32_to_cpu(fmvhdr->data_pad), fmvhdr->vol_type, be32_to_cpu(fmvhdr->last_eb_bytes)); if (!av) goto fail_bad; ai->vols_found++; if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id)) ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id); fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos); fm_pos += sizeof(*fm_eba); fm_pos += 2 * (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs)); if (fm_pos >= fm_size) goto fail_bad; if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) { ubi_err(ubi->ubi_num, "bad fastmap EBA header magic: 0x%x, " "expected: 0x%x", be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC); goto fail_bad; } for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) { int pnum = be32_to_cpu(fm_eba->peb_data[j].pnum); if ((int)be32_to_cpu(fm_eba->peb_data[j].pnum) < 0) continue; aeb = NULL; list_for_each_entry(tmp_aeb, &used, u.list) { if (tmp_aeb->pnum == pnum) { aeb = tmp_aeb; break; } } /* This can happen if a PEB is already in an EBA known * by this fastmap but the PEB itself is not in the used * list. * In this case the PEB can be within the fastmap pool * or while writing the fastmap it was in the protection * queue. */ if (!aeb) { aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); if (!aeb) { ret = -ENOMEM; goto fail; } aeb->lnum = j; aeb->pnum = be32_to_cpu(fm_eba->peb_data[j].pnum); aeb->ec = UBI_UNKNOWN; aeb->rc = be32_to_cpu(fm_eba->peb_data[j].rc); aeb->last_erase_time = UBI_UNKNOWN; aeb->scrub = aeb->copy_flag = aeb->sqnum = 0; list_add_tail(&aeb->u.list, &eba_orphans); continue; } aeb->lnum = j; if (av->highest_lnum <= aeb->lnum) av->highest_lnum = aeb->lnum; assign_aeb_to_av(ai, aeb, av); dbg_bld("inserting PEB:%i (LEB %i) to vol %i", aeb->pnum, aeb->lnum, av->vol_id); } ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ech) { ret = -ENOMEM; goto fail; } list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) { int err; if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) { ubi_err(ubi->ubi_num, "bad PEB in fastmap EBA orphan list"); ret = UBI_BAD_FASTMAP; kfree(ech); goto fail; } err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0); if (err && err != UBI_IO_BITFLIPS) { ubi_err(ubi->ubi_num, "unable to read EC header! PEB:%i err:%i", tmp_aeb->pnum, err); ret = err > 0 ? UBI_BAD_FASTMAP : err; kfree(ech); goto fail; } else if (err == UBI_IO_BITFLIPS) tmp_aeb->scrub = 1; tmp_aeb->ec = be64_to_cpu(ech->ec); tmp_aeb->last_erase_time = be64_to_cpu(ech->last_erase_time); tmp_aeb->rc = UBI_DEF_RD_THRESHOLD; assign_aeb_to_av(ai, tmp_aeb, av); } kfree(ech); } ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, &eba_orphans, &free); if (ret) goto fail; ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, &eba_orphans, &free); if (ret) goto fail; if (max_sqnum > ai->max_sqnum) ai->max_sqnum = max_sqnum; list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) list_move_tail(&tmp_aeb->u.list, &ai->free); ubi_assert(list_empty(&used)); ubi_assert(list_empty(&eba_orphans)); ubi_assert(list_empty(&free)); /* * If fastmap is leaking PEBs (must not happen), raise a * fat warning and fall back to scanning mode. * We do this here because in ubi_wl_init() it's too late * and we cannot fall back to scanning. */ if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count - ai->bad_peb_count - fm->used_blocks)) goto fail_bad; return 0; fail_bad: ret = UBI_BAD_FASTMAP; fail: list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) { kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); list_del(&tmp_aeb->u.list); } list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) { kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); list_del(&tmp_aeb->u.list); } list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); list_del(&tmp_aeb->u.list); } return ret; } /** * ubi_scan_fastmap - scan the fastmap. * @ubi: UBI device object * @ai: UBI attach info to be filled * @fm_anchor: The fastmap starts at this PEB * * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found, * UBI_BAD_FASTMAP if one was found but is not usable. * < 0 indicates an internal error. */ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, int fm_anchor) { struct ubi_fm_sb *fmsb, *fmsb2; struct ubi_vid_hdr *vh; struct ubi_ec_hdr *ech; struct ubi_fastmap_layout *fm; int i, used_blocks, pnum, ret = 0; size_t fm_size; __be32 crc, tmp_crc; unsigned long long sqnum = 0; mutex_lock(&ubi->fm_mutex); memset(ubi->fm_buf, 0, ubi->fm_size); fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL); if (!fmsb) { ret = -ENOMEM; goto out; } fm = kzalloc(sizeof(*fm), GFP_KERNEL); if (!fm) { ret = -ENOMEM; kfree(fmsb); goto out; } ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb)); if (ret && ret != UBI_IO_BITFLIPS) goto free_fm_sb; else if (ret == UBI_IO_BITFLIPS) fm->to_be_tortured[0] = 1; if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) { ubi_err(ubi->ubi_num, "bad super block magic: 0x%x, expected: 0x%x", be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC); ret = UBI_BAD_FASTMAP; goto free_fm_sb; } if (fmsb->version != UBI_FM_FMT_VERSION) { ubi_err(ubi->ubi_num, "bad fastmap version: %i, expected: %i", fmsb->version, UBI_FM_FMT_VERSION); ret = UBI_BAD_FASTMAP; goto free_fm_sb; } used_blocks = be32_to_cpu(fmsb->used_blocks); if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) { ubi_err(ubi->ubi_num, "number of fastmap blocks is invalid: %i", used_blocks); ret = UBI_BAD_FASTMAP; goto free_fm_sb; } fm_size = ubi->leb_size * used_blocks; if (fm_size != ubi->fm_size) { ubi_err(ubi->ubi_num, "bad fastmap size: %zi, expected: %zi", fm_size, ubi->fm_size); ret = UBI_BAD_FASTMAP; goto free_fm_sb; } ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ech) { ret = -ENOMEM; goto free_fm_sb; } vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); if (!vh) { ret = -ENOMEM; goto free_hdr; } for (i = 0; i < used_blocks; i++) { int image_seq; pnum = be32_to_cpu(fmsb->block_loc[i]); if (ubi_io_is_bad(ubi, pnum)) { ret = UBI_BAD_FASTMAP; goto free_hdr; } ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); if (ret && ret != UBI_IO_BITFLIPS) { ubi_err(ubi->ubi_num, "unable to read fastmap block# %i EC (PEB: %i)", i, pnum); if (ret > 0) ret = UBI_BAD_FASTMAP; goto free_hdr; } else if (ret == UBI_IO_BITFLIPS) fm->to_be_tortured[i] = 1; image_seq = be32_to_cpu(ech->image_seq); if (!ubi->image_seq) ubi->image_seq = image_seq; /* * Older UBI implementations have image_seq set to zero, so * we shouldn't fail if image_seq == 0. */ if (image_seq && (image_seq != ubi->image_seq)) { ubi_err(ubi->ubi_num, "wrong image seq:%d instead of %d", be32_to_cpu(ech->image_seq), ubi->image_seq); ret = UBI_BAD_FASTMAP; goto free_hdr; } ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); if (ret && ret != UBI_IO_BITFLIPS) { ubi_err(ubi->ubi_num, "unable to read fastmap block# %i (PEB: %i)", i, pnum); goto free_hdr; } if (i == 0) { if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) { ubi_err(ubi->ubi_num, "bad fastmap anchor vol_id: 0x%x," " expected: 0x%x", be32_to_cpu(vh->vol_id), UBI_FM_SB_VOLUME_ID); ret = UBI_BAD_FASTMAP; goto free_hdr; } } else { if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) { ubi_err(ubi->ubi_num, "bad fastmap data vol_id: 0x%x," " expected: 0x%x", be32_to_cpu(vh->vol_id), UBI_FM_DATA_VOLUME_ID); ret = UBI_BAD_FASTMAP; goto free_hdr; } } if (sqnum < be64_to_cpu(vh->sqnum)) sqnum = be64_to_cpu(vh->sqnum); ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum, ubi->leb_start, ubi->leb_size); if (ret && ret != UBI_IO_BITFLIPS) { ubi_err(ubi->ubi_num, "unable to read fastmap block# %i (PEB: %i, " "err: %i)", i, pnum, ret); goto free_hdr; } } kfree(fmsb); fmsb = NULL; fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf); tmp_crc = be32_to_cpu(fmsb2->data_crc); fmsb2->data_crc = 0; crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); if (crc != tmp_crc) { ubi_err(ubi->ubi_num, "fastmap data CRC is invalid"); ubi_err(ubi->ubi_num, "CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc); ret = UBI_BAD_FASTMAP; goto free_hdr; } fmsb2->sqnum = sqnum; fm->used_blocks = used_blocks; ret = ubi_attach_fastmap(ubi, ai, fm); if (ret) { if (ret > 0) ret = UBI_BAD_FASTMAP; goto free_hdr; } for (i = 0; i < used_blocks; i++) { struct ubi_wl_entry *e; e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) { while (i--) kfree(fm->e[i]); ret = -ENOMEM; goto free_hdr; } e->pnum = be32_to_cpu(fmsb2->block_loc[i]); e->ec = be32_to_cpu(fmsb2->block_ec[i]); e->last_erase_time = be64_to_cpu(fmsb2->block_let[i]); e->rc = be32_to_cpu(fmsb2->block_rc[i]); fm->e[i] = e; } ubi->fm = fm; ubi->fm_pool.max_size = ubi->fm->max_pool_size; ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size; ubi_msg(ubi->ubi_num, "attached by fastmap"); ubi_msg(ubi->ubi_num, "fastmap pool size: %d", ubi->fm_pool.max_size); ubi_msg(ubi->ubi_num, "fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); ubi->fm_disabled = 0; ubi_free_vid_hdr(ubi, vh); kfree(ech); out: mutex_unlock(&ubi->fm_mutex); if (ret == UBI_BAD_FASTMAP) ubi_err(ubi->ubi_num, "Attach by fastmap failed, doing a full scan!"); return ret; free_hdr: ubi_free_vid_hdr(ubi, vh); kfree(ech); free_fm_sb: kfree(fmsb); kfree(fm); goto out; } /** * ubi_write_fastmap - writes a fastmap. * @ubi: UBI device object * @new_fm: the to be written fastmap * * Returns 0 on success, < 0 indicates an internal error. */ static int ubi_write_fastmap(struct ubi_device *ubi, struct ubi_fastmap_layout *new_fm) { size_t fm_pos = 0; void *fm_raw; struct ubi_fm_sb *fmsb; struct ubi_fm_hdr *fmh; struct ubi_fm_scan_pool *fmpl1, *fmpl2; struct ubi_fm_ec *fec; struct ubi_fm_volhdr *fvh; struct ubi_fm_eba *feba; struct rb_node *node; struct ubi_wl_entry *wl_e; struct ubi_volume *vol; struct ubi_vid_hdr *avhdr, *dvhdr; struct ubi_work *ubi_wrk; int ret, i, j, free_peb_count, used_peb_count, vol_count; int scrub_peb_count, erase_peb_count; fm_raw = ubi->fm_buf; memset(ubi->fm_buf, 0, ubi->fm_size); avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); if (!avhdr) { ret = -ENOMEM; goto out; } dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID); if (!dvhdr) { ret = -ENOMEM; goto out_kfree; } spin_lock(&ubi->volumes_lock); spin_lock(&ubi->wl_lock); fmsb = (struct ubi_fm_sb *)fm_raw; fm_pos += sizeof(*fmsb); ubi_assert(fm_pos <= ubi->fm_size); fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos); fm_pos += sizeof(*fmh); ubi_assert(fm_pos <= ubi->fm_size); fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC); fmsb->version = UBI_FM_FMT_VERSION; fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks); /* the max sqnum will be filled in while *reading* the fastmap */ fmsb->sqnum = 0; fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC); free_peb_count = 0; used_peb_count = 0; scrub_peb_count = 0; erase_peb_count = 0; vol_count = 0; fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); fm_pos += sizeof(*fmpl1); fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); fmpl1->size = cpu_to_be16(ubi->fm_pool.size); fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size); for (i = 0; i < ubi->fm_pool.size; i++) fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]); fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); fm_pos += sizeof(*fmpl2); fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size); fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size); for (i = 0; i < ubi->fm_wl_pool.size; i++) fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]); for (node = rb_first(&ubi->free); node; node = rb_next(node)) { wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fec->pnum = cpu_to_be32(wl_e->pnum); fec->ec = cpu_to_be32(wl_e->ec); fec->last_erase_time = cpu_to_be64(wl_e->last_erase_time); fec->rc = cpu_to_be32(wl_e->rc); free_peb_count++; fm_pos += sizeof(*fec); ubi_assert(fm_pos <= ubi->fm_size); } fmh->free_peb_count = cpu_to_be32(free_peb_count); for (node = rb_first(&ubi->used); node; node = rb_next(node)) { wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fec->pnum = cpu_to_be32(wl_e->pnum); fec->ec = cpu_to_be32(wl_e->ec); fec->last_erase_time = cpu_to_be64(wl_e->last_erase_time); fec->rc = cpu_to_be32(wl_e->rc); used_peb_count++; fm_pos += sizeof(*fec); ubi_assert(fm_pos <= ubi->fm_size); } fmh->used_peb_count = cpu_to_be32(used_peb_count); for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) { wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fec->pnum = cpu_to_be32(wl_e->pnum); fec->ec = cpu_to_be32(wl_e->ec); fec->last_erase_time = cpu_to_be64(wl_e->last_erase_time); fec->rc = cpu_to_be32(wl_e->rc); scrub_peb_count++; fm_pos += sizeof(*fec); ubi_assert(fm_pos <= ubi->fm_size); } fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count); list_for_each_entry(ubi_wrk, &ubi->works, list) { if (ubi_is_erase_work(ubi_wrk)) { wl_e = ubi_wrk->e; ubi_assert(wl_e); fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fec->pnum = cpu_to_be32(wl_e->pnum); fec->ec = cpu_to_be32(wl_e->ec); fec->last_erase_time = cpu_to_be64(wl_e->last_erase_time); fec->rc = cpu_to_be32(wl_e->rc); erase_peb_count++; fm_pos += sizeof(*fec); ubi_assert(fm_pos <= ubi->fm_size); } } fmh->erase_peb_count = cpu_to_be32(erase_peb_count); for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) { vol = ubi->volumes[i]; if (!vol) continue; vol_count++; fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); fm_pos += sizeof(*fvh); ubi_assert(fm_pos <= ubi->fm_size); fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC); fvh->vol_id = cpu_to_be32(vol->vol_id); fvh->vol_type = vol->vol_type; fvh->used_ebs = cpu_to_be32(vol->used_ebs); fvh->data_pad = cpu_to_be32(vol->data_pad); fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes); ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME || vol->vol_type == UBI_STATIC_VOLUME); feba = (struct ubi_fm_eba *)(fm_raw + fm_pos); fm_pos += sizeof(*feba) + 2 * (sizeof(__be32) * vol->reserved_pebs); ubi_assert(fm_pos <= ubi->fm_size); for (j = 0; j < vol->reserved_pebs; j++) { feba->peb_data[j].pnum = cpu_to_be32(vol->eba_tbl[j]); feba->peb_data[j].rc = cpu_to_be32(UBI_UNKNOWN); if (vol->eba_tbl[j] >= 0 && ubi->lookuptbl[vol->eba_tbl[j]]) feba->peb_data[j].rc = cpu_to_be32( ubi->lookuptbl[vol->eba_tbl[j]]->rc); } feba->reserved_pebs = cpu_to_be32(j); feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC); } fmh->vol_count = cpu_to_be32(vol_count); fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count); avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); avhdr->lnum = 0; spin_unlock(&ubi->wl_lock); spin_unlock(&ubi->volumes_lock); dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr); if (ret) { ubi_err(ubi->ubi_num, "unable to write vid_hdr to fastmap SB!"); goto out_kfree; } for (i = 0; i < new_fm->used_blocks; i++) { fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum); fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec); fmsb->block_let[i] = cpu_to_be64(new_fm->e[i]->last_erase_time); fmsb->block_rc[i] = cpu_to_be32(new_fm->e[i]->rc); } fmsb->data_crc = 0; fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw, ubi->fm_size)); for (i = 1; i < new_fm->used_blocks; i++) { dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); dvhdr->lnum = cpu_to_be32(i); dbg_bld("writing fastmap data to PEB %i sqnum %llu", new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr); if (ret) { ubi_err(ubi->ubi_num, "unable to write vid_hdr to PEB %i!", new_fm->e[i]->pnum); goto out_kfree; } } for (i = 0; i < new_fm->used_blocks; i++) { ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size), new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size); if (ret) { ubi_err(ubi->ubi_num, "unable to write fastmap to PEB %i!", new_fm->e[i]->pnum); goto out_kfree; } } ubi_assert(new_fm); ubi->fm = new_fm; dbg_bld("fastmap written!"); out_kfree: ubi_free_vid_hdr(ubi, avhdr); ubi_free_vid_hdr(ubi, dvhdr); out: return ret; } /** * erase_block - Manually erase a PEB. * @ubi: UBI device object * @pnum: PEB to be erased * * Returns the new EC value on success, < 0 indicates an internal error. */ static int erase_block(struct ubi_device *ubi, int pnum) { int ret; struct ubi_ec_hdr *ec_hdr; long long ec; struct timeval tv; ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ec_hdr) return -ENOMEM; ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); if (ret < 0) goto out; else if (ret && ret != UBI_IO_BITFLIPS) { ret = -EINVAL; goto out; } ret = ubi_io_sync_erase(ubi, pnum, 0); if (ret < 0) goto out; ec = be64_to_cpu(ec_hdr->ec); ec += ret; if (ec > UBI_MAX_ERASECOUNTER) { ret = -EINVAL; goto out; } ec_hdr->ec = cpu_to_be64(ec); do_gettimeofday(&tv); /* The last erase time resolution is in days */ ec_hdr->last_erase_time = cpu_to_be64(tv.tv_sec / NUM_SEC_IN_DAY); ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); if (ret < 0) goto out; ret = ec; out: kfree(ec_hdr); return ret; } /** * invalidate_fastmap - destroys a fastmap. * @ubi: UBI device object * @fm: the fastmap to be destroyed * * Returns 0 on success, < 0 indicates an internal error. */ static int invalidate_fastmap(struct ubi_device *ubi, struct ubi_fastmap_layout *fm) { int ret; struct ubi_vid_hdr *vh; struct timeval tv; ret = erase_block(ubi, fm->e[0]->pnum); if (ret < 0) return ret; fm->e[0]->ec = ret; do_gettimeofday(&tv); /* The last erase time resolution is in days */ fm->e[0]->last_erase_time = tv.tv_sec / NUM_SEC_IN_DAY; fm->e[0]->rc = 0; vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); if (!vh) return -ENOMEM; /* deleting the current fastmap SB is not enough, an old SB may exist, * so create a (corrupted) SB such that fastmap will find it and fall * back to scanning mode in any case */ vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh); return ret; } /** * ubi_update_fastmap - will be called by UBI if a volume changes or * a fastmap pool becomes full. * @ubi: UBI device object * * Returns 0 on success, < 0 indicates an internal error. */ int ubi_update_fastmap(struct ubi_device *ubi) { int ret, i; struct ubi_fastmap_layout *new_fm, *old_fm; struct ubi_wl_entry *tmp_e; struct timeval tv; do_gettimeofday(&tv); mutex_lock(&ubi->fm_mutex); ubi_refill_pools(ubi); if (ubi->ro_mode || ubi->fm_disabled) { mutex_unlock(&ubi->fm_mutex); return 0; } ret = ubi_ensure_anchor_pebs(ubi); if (ret) { mutex_unlock(&ubi->fm_mutex); return ret; } new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); if (!new_fm) { mutex_unlock(&ubi->fm_mutex); return -ENOMEM; } new_fm->used_blocks = ubi->fm_size / ubi->leb_size; for (i = 0; i < new_fm->used_blocks; i++) { new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!new_fm->e[i]) { while (i--) kfree(new_fm->e[i]); kfree(new_fm); mutex_unlock(&ubi->fm_mutex); return -ENOMEM; } } old_fm = ubi->fm; ubi->fm = NULL; if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) { ubi_err(ubi->ubi_num, "fastmap too large"); ret = -ENOSPC; goto err; } for (i = 1; i < new_fm->used_blocks; i++) { spin_lock(&ubi->wl_lock); tmp_e = ubi_wl_get_fm_peb(ubi, 0); spin_unlock(&ubi->wl_lock); if (!tmp_e && !old_fm) { int j; ubi_err(ubi->ubi_num, "could not get any free erase block"); for (j = 1; j < i; j++) ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); ret = -ENOSPC; goto err; } else if (!tmp_e && old_fm) { ret = erase_block(ubi, old_fm->e[i]->pnum); if (ret < 0) { int j; for (j = 1; j < i; j++) ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); ubi_err(ubi->ubi_num, "could not erase old fastmap PEB"); goto err; } new_fm->e[i]->pnum = old_fm->e[i]->pnum; new_fm->e[i]->ec = old_fm->e[i]->ec = ret; /* The last erase time resolution is in days */ new_fm->e[i]->last_erase_time = tv.tv_sec / NUM_SEC_IN_DAY; old_fm->e[i]->last_erase_time = tv.tv_sec / NUM_SEC_IN_DAY; new_fm->e[i]->rc = old_fm->e[i]->rc = 0; } else { new_fm->e[i]->pnum = tmp_e->pnum; new_fm->e[i]->ec = tmp_e->ec; new_fm->e[i]->rc = tmp_e->rc; new_fm->e[i]->last_erase_time = tmp_e->last_erase_time; if (old_fm) ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, old_fm->to_be_tortured[i]); } } spin_lock(&ubi->wl_lock); tmp_e = ubi_wl_get_fm_peb(ubi, 1); spin_unlock(&ubi->wl_lock); if (old_fm) { /* no fresh anchor PEB was found, reuse the old one */ if (!tmp_e) { ret = erase_block(ubi, old_fm->e[0]->pnum); if (ret < 0) { int i; ubi_err(ubi->ubi_num, "could not erase old anchor PEB"); for (i = 1; i < new_fm->used_blocks; i++) ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); goto err; } new_fm->e[0]->pnum = old_fm->e[0]->pnum; new_fm->e[0]->ec = old_fm->e[0]->ec = ret; /* The last erase time resolution is in days */ new_fm->e[0]->last_erase_time = tv.tv_sec / NUM_SEC_IN_DAY; old_fm->e[0]->last_erase_time = tv.tv_sec / NUM_SEC_IN_DAY; new_fm->e[0]->rc = old_fm->e[0]->rc = 0; } else { /* we've got a new anchor PEB, return the old one */ ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0, old_fm->to_be_tortured[0]); new_fm->e[0]->pnum = tmp_e->pnum; new_fm->e[0]->ec = tmp_e->ec; new_fm->e[0]->last_erase_time = tmp_e->last_erase_time; new_fm->e[0]->rc = tmp_e->rc; } } else { if (!tmp_e) { int i; ubi_err(ubi->ubi_num, "could not find any anchor PEB"); for (i = 1; i < new_fm->used_blocks; i++) ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); ret = -ENOSPC; goto err; } new_fm->e[0]->pnum = tmp_e->pnum; new_fm->e[0]->ec = tmp_e->ec; new_fm->e[0]->last_erase_time = tmp_e->last_erase_time; new_fm->e[0]->rc = tmp_e->rc; } down_write(&ubi->work_sem); down_write(&ubi->fm_sem); ret = ubi_write_fastmap(ubi, new_fm); up_write(&ubi->fm_sem); up_write(&ubi->work_sem); if (ret) goto err; out_unlock: mutex_unlock(&ubi->fm_mutex); kfree(old_fm); return ret; err: kfree(new_fm); ubi_warn(ubi->ubi_num, "Unable to write new fastmap, err=%i", ret); ret = 0; if (old_fm) { ret = invalidate_fastmap(ubi, old_fm); if (ret < 0) ubi_err(ubi->ubi_num, "Unable to invalidiate current fastmap!"); else if (ret) ret = 0; } goto out_unlock; }
gpl-2.0
zhuyingtao/linux
drivers/staging/gdm724x/gdm_usb.c
620
23273
/* * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/usb.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/usb/cdc.h> #include <linux/wait.h> #include <linux/if_ether.h> #include <linux/pm_runtime.h> #include "gdm_usb.h" #include "gdm_lte.h" #include "hci.h" #include "hci_packet.h" #include "gdm_endian.h" #define USB_DEVICE_CDC_DATA(vid, pid) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_CLASS | \ USB_DEVICE_ID_MATCH_INT_SUBCLASS,\ .idVendor = vid,\ .idProduct = pid,\ .bInterfaceClass = USB_CLASS_COMM,\ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET #define USB_DEVICE_MASS_DATA(vid, pid) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_INFO,\ .idVendor = vid,\ .idProduct = pid,\ .bInterfaceSubClass = USB_SC_SCSI, \ .bInterfaceClass = USB_CLASS_MASS_STORAGE,\ .bInterfaceProtocol = USB_PR_BULK static const struct usb_device_id id_table[] = { { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) }, /* GCT GDM7240 */ { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) }, /* GCT GDM7243 */ { } }; MODULE_DEVICE_TABLE(usb, id_table); static struct workqueue_struct *usb_tx_wq; static struct workqueue_struct *usb_rx_wq; static void do_tx(struct work_struct *work); static void do_rx(struct work_struct *work); static int gdm_usb_recv(void *priv_dev, int (*cb)(void *cb_data, void *data, int len, int context), void *cb_data, int context); static int request_mac_address(struct lte_udev *udev) { u8 buf[16] = {0,}; struct hci_packet *hci = (struct hci_packet *)buf; struct usb_device *usbdev = udev->usbdev; int actual; int ret = -1; hci->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_GET_INFORMATION); hci->len = gdm_cpu_to_dev16(&udev->gdm_ed, 1); hci->data[0] = MAC_ADDRESS; ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5, &actual, 1000); udev->request_mac_addr = 1; return ret; } static struct usb_tx *alloc_tx_struct(int len) { struct usb_tx *t = NULL; int ret = 0; t = kzalloc(sizeof(struct usb_tx), GFP_ATOMIC); if (!t) { ret = -ENOMEM; goto out; } t->urb = usb_alloc_urb(0, GFP_ATOMIC); if (!(len % 512)) len++; t->buf = kmalloc(len, GFP_ATOMIC); if (!t->urb || !t->buf) { ret = -ENOMEM; goto out; } out: if (ret < 0) { if (t) { usb_free_urb(t->urb); kfree(t->buf); kfree(t); } return NULL; } return t; } static struct usb_tx_sdu *alloc_tx_sdu_struct(void) { struct usb_tx_sdu *t_sdu; t_sdu = kzalloc(sizeof(struct usb_tx_sdu), GFP_KERNEL); if (!t_sdu) return NULL; t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_KERNEL); if (!t_sdu->buf) { kfree(t_sdu); return NULL; } return t_sdu; } static void free_tx_struct(struct usb_tx *t) { if (t) { usb_free_urb(t->urb); kfree(t->buf); kfree(t); } } static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu) { if (t_sdu) { kfree(t_sdu->buf); kfree(t_sdu); } } static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc) { struct usb_tx_sdu *t_sdu; if (list_empty(&tx->free_list)) return NULL; t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list); list_del(&t_sdu->list); tx->avail_count--; *no_spc = list_empty(&tx->free_list) ? 1 : 0; return t_sdu; } static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu) { list_add_tail(&t_sdu->list, &tx->free_list); tx->avail_count++; } static struct usb_rx *alloc_rx_struct(void) { struct usb_rx *r = NULL; int ret = 0; r = kmalloc(sizeof(struct usb_rx), GFP_KERNEL); if (!r) { ret = -ENOMEM; goto out; } r->urb = usb_alloc_urb(0, GFP_KERNEL); r->buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL); if (!r->urb || !r->buf) { ret = -ENOMEM; goto out; } out: if (ret < 0) { if (r) { usb_free_urb(r->urb); kfree(r->buf); kfree(r); } return NULL; } return r; } static void free_rx_struct(struct usb_rx *r) { if (r) { usb_free_urb(r->urb); kfree(r->buf); kfree(r); } } static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc) { struct usb_rx *r; unsigned long flags; spin_lock_irqsave(&rx->rx_lock, flags); if (list_empty(&rx->free_list)) { spin_unlock_irqrestore(&rx->rx_lock, flags); return NULL; } r = list_entry(rx->free_list.next, struct usb_rx, free_list); list_del(&r->free_list); rx->avail_count--; *no_spc = list_empty(&rx->free_list) ? 1 : 0; spin_unlock_irqrestore(&rx->rx_lock, flags); return r; } static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r) { unsigned long flags; spin_lock_irqsave(&rx->rx_lock, flags); list_add_tail(&r->free_list, &rx->free_list); rx->avail_count++; spin_unlock_irqrestore(&rx->rx_lock, flags); } static void release_usb(struct lte_udev *udev) { struct rx_cxt *rx = &udev->rx; struct tx_cxt *tx = &udev->tx; struct usb_tx *t, *t_next; struct usb_rx *r, *r_next; struct usb_tx_sdu *t_sdu, *t_sdu_next; unsigned long flags; spin_lock_irqsave(&tx->lock, flags); list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list) { list_del(&t_sdu->list); free_tx_sdu_struct(t_sdu); } list_for_each_entry_safe(t, t_next, &tx->hci_list, list) { list_del(&t->list); free_tx_struct(t); } list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list) { list_del(&t_sdu->list); free_tx_sdu_struct(t_sdu); } spin_unlock_irqrestore(&tx->lock, flags); spin_lock_irqsave(&rx->submit_lock, flags); list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list) { spin_unlock_irqrestore(&rx->submit_lock, flags); usb_kill_urb(r->urb); spin_lock_irqsave(&rx->submit_lock, flags); } spin_unlock_irqrestore(&rx->submit_lock, flags); spin_lock_irqsave(&rx->rx_lock, flags); list_for_each_entry_safe(r, r_next, &rx->free_list, free_list) { list_del(&r->free_list); free_rx_struct(r); } spin_unlock_irqrestore(&rx->rx_lock, flags); spin_lock_irqsave(&rx->to_host_lock, flags); list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) { if (r->index == (void *)udev) { list_del(&r->to_host_list); free_rx_struct(r); } } spin_unlock_irqrestore(&rx->to_host_lock, flags); } static int init_usb(struct lte_udev *udev) { int ret = 0; int i; struct tx_cxt *tx = &udev->tx; struct rx_cxt *rx = &udev->rx; struct usb_tx_sdu *t_sdu = NULL; struct usb_rx *r = NULL; udev->send_complete = 1; udev->tx_stop = 0; udev->request_mac_addr = 0; udev->usb_state = PM_NORMAL; INIT_LIST_HEAD(&tx->sdu_list); INIT_LIST_HEAD(&tx->hci_list); INIT_LIST_HEAD(&tx->free_list); INIT_LIST_HEAD(&rx->rx_submit_list); INIT_LIST_HEAD(&rx->free_list); INIT_LIST_HEAD(&rx->to_host_list); spin_lock_init(&tx->lock); spin_lock_init(&rx->rx_lock); spin_lock_init(&rx->submit_lock); spin_lock_init(&rx->to_host_lock); tx->avail_count = 0; rx->avail_count = 0; udev->rx_cb = NULL; for (i = 0; i < MAX_NUM_SDU_BUF; i++) { t_sdu = alloc_tx_sdu_struct(); if (t_sdu == NULL) { ret = -ENOMEM; goto fail; } list_add(&t_sdu->list, &tx->free_list); tx->avail_count++; } for (i = 0; i < MAX_RX_SUBMIT_COUNT*2; i++) { r = alloc_rx_struct(); if (r == NULL) { ret = -ENOMEM; goto fail; } list_add(&r->free_list, &rx->free_list); rx->avail_count++; } INIT_DELAYED_WORK(&udev->work_tx, do_tx); INIT_DELAYED_WORK(&udev->work_rx, do_rx); return 0; fail: release_usb(udev); return ret; } static int set_mac_address(u8 *data, void *arg) { struct phy_dev *phy_dev = (struct phy_dev *)arg; struct lte_udev *udev = phy_dev->priv_dev; struct tlv *tlv = (struct tlv *)data; u8 mac_address[ETH_ALEN] = {0, }; if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) { memcpy(mac_address, tlv->data, tlv->len); if (register_lte_device(phy_dev, &udev->intf->dev, mac_address) < 0) pr_err("register lte device failed\n"); udev->request_mac_addr = 0; return 1; } return 0; } static void do_rx(struct work_struct *work) { struct lte_udev *udev = container_of(work, struct lte_udev, work_rx.work); struct rx_cxt *rx = &udev->rx; struct usb_rx *r; struct hci_packet *hci; struct phy_dev *phy_dev; u16 cmd_evt; int ret; unsigned long flags; while (1) { spin_lock_irqsave(&rx->to_host_lock, flags); if (list_empty(&rx->to_host_list)) { spin_unlock_irqrestore(&rx->to_host_lock, flags); break; } r = list_entry(rx->to_host_list.next, struct usb_rx, to_host_list); list_del(&r->to_host_list); spin_unlock_irqrestore(&rx->to_host_lock, flags); phy_dev = (struct phy_dev *)r->cb_data; udev = (struct lte_udev *)phy_dev->priv_dev; hci = (struct hci_packet *)r->buf; cmd_evt = gdm_dev16_to_cpu(&udev->gdm_ed, hci->cmd_evt); switch (cmd_evt) { case LTE_GET_INFORMATION_RESULT: if (set_mac_address(hci->data, r->cb_data) == 0) { ret = r->callback(r->cb_data, r->buf, r->urb->actual_length, KERNEL_THREAD); } break; default: if (r->callback) { ret = r->callback(r->cb_data, r->buf, r->urb->actual_length, KERNEL_THREAD); if (ret == -EAGAIN) pr_err("failed to send received data\n"); } break; } put_rx_struct(rx, r); gdm_usb_recv(udev, r->callback, r->cb_data, USB_COMPLETE); } } static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx) { unsigned long flags; struct usb_rx *r_remove, *r_remove_next; spin_lock_irqsave(&rx->submit_lock, flags); list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list, rx_submit_list) { if (r == r_remove) { list_del(&r->rx_submit_list); break; } } spin_unlock_irqrestore(&rx->submit_lock, flags); } static void gdm_usb_rcv_complete(struct urb *urb) { struct usb_rx *r = urb->context; struct rx_cxt *rx = r->rx; unsigned long flags; struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx); struct usb_device *usbdev = udev->usbdev; remove_rx_submit_list(r, rx); if (!urb->status && r->callback) { spin_lock_irqsave(&rx->to_host_lock, flags); list_add_tail(&r->to_host_list, &rx->to_host_list); queue_work(usb_rx_wq, &udev->work_rx.work); spin_unlock_irqrestore(&rx->to_host_lock, flags); } else { if (urb->status && udev->usb_state == PM_NORMAL) dev_err(&urb->dev->dev, "%s: urb status error %d\n", __func__, urb->status); put_rx_struct(rx, r); } usb_mark_last_busy(usbdev); } static int gdm_usb_recv(void *priv_dev, int (*cb)(void *cb_data, void *data, int len, int context), void *cb_data, int context) { struct lte_udev *udev = priv_dev; struct usb_device *usbdev = udev->usbdev; struct rx_cxt *rx = &udev->rx; struct usb_rx *r; int no_spc; int ret; unsigned long flags; if (!udev->usbdev) { pr_err("invalid device\n"); return -ENODEV; } r = get_rx_struct(rx, &no_spc); if (!r) { pr_err("Out of Memory\n"); return -ENOMEM; } udev->rx_cb = cb; r->callback = cb; r->cb_data = cb_data; r->index = (void *)udev; r->rx = rx; usb_fill_bulk_urb(r->urb, usbdev, usb_rcvbulkpipe(usbdev, 0x83), r->buf, RX_BUF_SIZE, gdm_usb_rcv_complete, r); spin_lock_irqsave(&rx->submit_lock, flags); list_add_tail(&r->rx_submit_list, &rx->rx_submit_list); spin_unlock_irqrestore(&rx->submit_lock, flags); if (context == KERNEL_THREAD) ret = usb_submit_urb(r->urb, GFP_KERNEL); else ret = usb_submit_urb(r->urb, GFP_ATOMIC); if (ret) { spin_lock_irqsave(&rx->submit_lock, flags); list_del(&r->rx_submit_list); spin_unlock_irqrestore(&rx->submit_lock, flags); pr_err("usb_submit_urb failed (%p)\n", r); put_rx_struct(rx, r); } return ret; } static void gdm_usb_send_complete(struct urb *urb) { struct usb_tx *t = urb->context; struct tx_cxt *tx = t->tx; struct lte_udev *udev = container_of(tx, struct lte_udev, tx); unsigned long flags; if (urb->status == -ECONNRESET) { dev_info(&urb->dev->dev, "CONNRESET\n"); return; } if (t->callback) t->callback(t->cb_data); free_tx_struct(t); spin_lock_irqsave(&tx->lock, flags); udev->send_complete = 1; queue_work(usb_tx_wq, &udev->work_tx.work); spin_unlock_irqrestore(&tx->lock, flags); } static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len) { int ret = 0; if (!(len%512)) len++; usb_fill_bulk_urb(t->urb, usbdev, usb_sndbulkpipe(usbdev, 2), t->buf, len, gdm_usb_send_complete, t); ret = usb_submit_urb(t->urb, GFP_ATOMIC); if (ret) dev_err(&usbdev->dev, "usb_submit_urb failed: %d\n", ret); usb_mark_last_busy(usbdev); return ret; } static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf) { struct tx_cxt *tx = &udev->tx; struct usb_tx_sdu *t_sdu = NULL; struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf; u16 send_len = 0; u16 num_packet = 0; unsigned long flags; multi_sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_MULTI_SDU); while (num_packet < MAX_PACKET_IN_MULTI_SDU) { spin_lock_irqsave(&tx->lock, flags); if (list_empty(&tx->sdu_list)) { spin_unlock_irqrestore(&tx->lock, flags); break; } t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list); if (send_len + t_sdu->len > MAX_SDU_SIZE) { spin_unlock_irqrestore(&tx->lock, flags); break; } list_del(&t_sdu->list); spin_unlock_irqrestore(&tx->lock, flags); memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len); send_len += (t_sdu->len + 3) & 0xfffc; num_packet++; if (tx->avail_count > 10) t_sdu->callback(t_sdu->cb_data); spin_lock_irqsave(&tx->lock, flags); put_tx_struct(tx, t_sdu); spin_unlock_irqrestore(&tx->lock, flags); } multi_sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len); multi_sdu->num_packet = gdm_cpu_to_dev16(&udev->gdm_ed, num_packet); return send_len + offsetof(struct multi_sdu, data); } static void do_tx(struct work_struct *work) { struct lte_udev *udev = container_of(work, struct lte_udev, work_tx.work); struct usb_device *usbdev = udev->usbdev; struct tx_cxt *tx = &udev->tx; struct usb_tx *t = NULL; int is_send = 0; u32 len = 0; unsigned long flags; if (!usb_autopm_get_interface(udev->intf)) usb_autopm_put_interface(udev->intf); if (udev->usb_state == PM_SUSPEND) return; spin_lock_irqsave(&tx->lock, flags); if (!udev->send_complete) { spin_unlock_irqrestore(&tx->lock, flags); return; } udev->send_complete = 0; if (!list_empty(&tx->hci_list)) { t = list_entry(tx->hci_list.next, struct usb_tx, list); list_del(&t->list); len = t->len; t->is_sdu = 0; is_send = 1; } else if (!list_empty(&tx->sdu_list)) { if (udev->tx_stop) { udev->send_complete = 1; spin_unlock_irqrestore(&tx->lock, flags); return; } t = alloc_tx_struct(TX_BUF_SIZE); if (t == NULL) { spin_unlock_irqrestore(&tx->lock, flags); return; } t->callback = NULL; t->tx = tx; t->is_sdu = 1; is_send = 1; } if (!is_send) { udev->send_complete = 1; spin_unlock_irqrestore(&tx->lock, flags); return; } spin_unlock_irqrestore(&tx->lock, flags); if (t->is_sdu) len = packet_aggregation(udev, t->buf); if (send_tx_packet(usbdev, t, len)) { pr_err("send_tx_packet failed\n"); t->callback = NULL; gdm_usb_send_complete(t->urb); } } #define SDU_PARAM_LEN 12 static int gdm_usb_sdu_send(void *priv_dev, void *data, int len, unsigned int dftEpsId, unsigned int epsId, void (*cb)(void *data), void *cb_data, int dev_idx, int nic_type) { struct lte_udev *udev = priv_dev; struct tx_cxt *tx = &udev->tx; struct usb_tx_sdu *t_sdu; struct sdu *sdu = NULL; unsigned long flags; int no_spc = 0; u16 send_len; if (!udev->usbdev) { pr_err("sdu send - invalid device\n"); return TX_NO_DEV; } spin_lock_irqsave(&tx->lock, flags); t_sdu = get_tx_sdu_struct(tx, &no_spc); spin_unlock_irqrestore(&tx->lock, flags); if (t_sdu == NULL) { pr_err("sdu send - free list empty\n"); return TX_NO_SPC; } sdu = (struct sdu *)t_sdu->buf; sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_SDU); if (nic_type == NIC_TYPE_ARP) { send_len = len + SDU_PARAM_LEN; memcpy(sdu->data, data, len); } else { send_len = len - ETH_HLEN; send_len += SDU_PARAM_LEN; memcpy(sdu->data, data+ETH_HLEN, len-ETH_HLEN); } sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len); sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId); sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId); sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type); t_sdu->len = send_len + HCI_HEADER_SIZE; t_sdu->callback = cb; t_sdu->cb_data = cb_data; spin_lock_irqsave(&tx->lock, flags); list_add_tail(&t_sdu->list, &tx->sdu_list); queue_work(usb_tx_wq, &udev->work_tx.work); spin_unlock_irqrestore(&tx->lock, flags); if (no_spc) return TX_NO_BUFFER; return 0; } static int gdm_usb_hci_send(void *priv_dev, void *data, int len, void (*cb)(void *data), void *cb_data) { struct lte_udev *udev = priv_dev; struct tx_cxt *tx = &udev->tx; struct usb_tx *t; unsigned long flags; if (!udev->usbdev) { pr_err("hci send - invalid device\n"); return -ENODEV; } t = alloc_tx_struct(len); if (t == NULL) { pr_err("hci_send - out of memory\n"); return -ENOMEM; } memcpy(t->buf, data, len); t->callback = cb; t->cb_data = cb_data; t->len = len; t->tx = tx; t->is_sdu = 0; spin_lock_irqsave(&tx->lock, flags); list_add_tail(&t->list, &tx->hci_list); queue_work(usb_tx_wq, &udev->work_tx.work); spin_unlock_irqrestore(&tx->lock, flags); return 0; } static struct gdm_endian *gdm_usb_get_endian(void *priv_dev) { struct lte_udev *udev = priv_dev; return &udev->gdm_ed; } static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret = 0; struct phy_dev *phy_dev = NULL; struct lte_udev *udev = NULL; u16 idVendor, idProduct; int bInterfaceNumber; struct usb_device *usbdev = interface_to_usbdev(intf); bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber; idVendor = __le16_to_cpu(usbdev->descriptor.idVendor); idProduct = __le16_to_cpu(usbdev->descriptor.idProduct); pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct); if (bInterfaceNumber > NETWORK_INTERFACE) { pr_info("not a network device\n"); return -ENODEV; } phy_dev = kzalloc(sizeof(struct phy_dev), GFP_KERNEL); if (!phy_dev) return -ENOMEM; udev = kzalloc(sizeof(struct lte_udev), GFP_KERNEL); if (!udev) { ret = -ENOMEM; goto err_udev; } phy_dev->priv_dev = (void *)udev; phy_dev->send_hci_func = gdm_usb_hci_send; phy_dev->send_sdu_func = gdm_usb_sdu_send; phy_dev->rcv_func = gdm_usb_recv; phy_dev->get_endian = gdm_usb_get_endian; udev->usbdev = usbdev; ret = init_usb(udev); if (ret < 0) { dev_err(intf->usb_dev, "init_usb func failed\n"); goto err_init_usb; } udev->intf = intf; intf->needs_remote_wakeup = 1; usb_enable_autosuspend(usbdev); pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER); /* List up hosts with big endians, otherwise, * defaults to little endian */ if (idProduct == PID_GDM7243) gdm_set_endian(&udev->gdm_ed, ENDIANNESS_BIG); else gdm_set_endian(&udev->gdm_ed, ENDIANNESS_LITTLE); ret = request_mac_address(udev); if (ret < 0) { dev_err(intf->usb_dev, "request Mac address failed\n"); goto err_mac_address; } start_rx_proc(phy_dev); usb_get_dev(usbdev); usb_set_intfdata(intf, phy_dev); return 0; err_mac_address: release_usb(udev); err_init_usb: kfree(udev); err_udev: kfree(phy_dev); return ret; } static void gdm_usb_disconnect(struct usb_interface *intf) { struct phy_dev *phy_dev; struct lte_udev *udev; u16 idVendor, idProduct; struct usb_device *usbdev; usbdev = interface_to_usbdev(intf); idVendor = __le16_to_cpu(usbdev->descriptor.idVendor); idProduct = __le16_to_cpu(usbdev->descriptor.idProduct); phy_dev = usb_get_intfdata(intf); udev = phy_dev->priv_dev; unregister_lte_device(phy_dev); release_usb(udev); kfree(udev); udev = NULL; kfree(phy_dev); phy_dev = NULL; usb_put_dev(usbdev); } static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg) { struct phy_dev *phy_dev; struct lte_udev *udev; struct rx_cxt *rx; struct usb_rx *r; struct usb_rx *r_next; unsigned long flags; phy_dev = usb_get_intfdata(intf); udev = phy_dev->priv_dev; rx = &udev->rx; if (udev->usb_state != PM_NORMAL) { dev_err(intf->usb_dev, "usb suspend - invalid state\n"); return -1; } udev->usb_state = PM_SUSPEND; spin_lock_irqsave(&rx->submit_lock, flags); list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list) { spin_unlock_irqrestore(&rx->submit_lock, flags); usb_kill_urb(r->urb); spin_lock_irqsave(&rx->submit_lock, flags); } spin_unlock_irqrestore(&rx->submit_lock, flags); return 0; } static int gdm_usb_resume(struct usb_interface *intf) { struct phy_dev *phy_dev; struct lte_udev *udev; struct tx_cxt *tx; struct rx_cxt *rx; unsigned long flags; int issue_count; int i; phy_dev = usb_get_intfdata(intf); udev = phy_dev->priv_dev; rx = &udev->rx; if (udev->usb_state != PM_SUSPEND) { dev_err(intf->usb_dev, "usb resume - invalid state\n"); return -1; } udev->usb_state = PM_NORMAL; spin_lock_irqsave(&rx->rx_lock, flags); issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT; spin_unlock_irqrestore(&rx->rx_lock, flags); if (issue_count >= 0) { for (i = 0; i < issue_count; i++) gdm_usb_recv(phy_dev->priv_dev, udev->rx_cb, phy_dev, USB_COMPLETE); } tx = &udev->tx; spin_lock_irqsave(&tx->lock, flags); queue_work(usb_tx_wq, &udev->work_tx.work); spin_unlock_irqrestore(&tx->lock, flags); return 0; } static struct usb_driver gdm_usb_lte_driver = { .name = "gdm_lte", .probe = gdm_usb_probe, .disconnect = gdm_usb_disconnect, .id_table = id_table, .supports_autosuspend = 1, .suspend = gdm_usb_suspend, .resume = gdm_usb_resume, .reset_resume = gdm_usb_resume, }; static int __init gdm_usb_lte_init(void) { if (gdm_lte_event_init() < 0) { pr_err("error creating event\n"); return -1; } usb_tx_wq = create_workqueue("usb_tx_wq"); if (usb_tx_wq == NULL) return -1; usb_rx_wq = create_workqueue("usb_rx_wq"); if (usb_rx_wq == NULL) return -1; return usb_register(&gdm_usb_lte_driver); } static void __exit gdm_usb_lte_exit(void) { gdm_lte_event_exit(); usb_deregister(&gdm_usb_lte_driver); if (usb_tx_wq) { flush_workqueue(usb_tx_wq); destroy_workqueue(usb_tx_wq); } if (usb_rx_wq) { flush_workqueue(usb_rx_wq); destroy_workqueue(usb_rx_wq); } } module_init(gdm_usb_lte_init); module_exit(gdm_usb_lte_exit); MODULE_VERSION(DRIVER_VERSION); MODULE_DESCRIPTION("GCT LTE USB Device Driver"); MODULE_LICENSE("GPL");
gpl-2.0
jmztaylor/android_kernel_htc_express
net/ipv4/netfilter/nf_nat_proto_sctp.c
1388
2624
/* * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/init.h> #include <linux/ip.h> #include <linux/sctp.h> #include <net/sctp/checksum.h> #include <net/netfilter/nf_nat_protocol.h> static u_int16_t nf_sctp_port_rover; static bool sctp_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &nf_sctp_port_rover); } static bool sctp_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); struct sk_buff *frag; sctp_sctphdr_t *hdr; unsigned int hdroff = iphdroff + iph->ihl*4; __be32 oldip, newip; __be32 crc32; if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) return false; iph = (struct iphdr *)(skb->data + iphdroff); hdr = (struct sctphdr *)(skb->data + hdroff); if (maniptype == IP_NAT_MANIP_SRC) { /* Get rid of src ip and src pt */ oldip = iph->saddr; newip = tuple->src.u3.ip; hdr->source = tuple->src.u.sctp.port; } else { /* Get rid of dst ip and dst pt */ oldip = iph->daddr; newip = tuple->dst.u3.ip; hdr->dest = tuple->dst.u.sctp.port; } crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff); skb_walk_frags(skb, frag) crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag), crc32); crc32 = sctp_end_cksum(crc32); hdr->checksum = crc32; return true; } static const struct nf_nat_protocol nf_nat_protocol_sctp = { .protonum = IPPROTO_SCTP, .me = THIS_MODULE, .manip_pkt = sctp_manip_pkt, .in_range = nf_nat_proto_in_range, .unique_tuple = sctp_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .range_to_nlattr = nf_nat_proto_range_to_nlattr, .nlattr_to_range = nf_nat_proto_nlattr_to_range, #endif }; static int __init nf_nat_proto_sctp_init(void) { return nf_nat_protocol_register(&nf_nat_protocol_sctp); } static void __exit nf_nat_proto_sctp_exit(void) { nf_nat_protocol_unregister(&nf_nat_protocol_sctp); } module_init(nf_nat_proto_sctp_init); module_exit(nf_nat_proto_sctp_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SCTP NAT protocol helper"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
gpl-2.0
wareash/linux-xylon
tools/perf/ui/tui/util.c
1388
5261
#include "../../util/util.h" #include <signal.h> #include <stdbool.h> #include <string.h> #include <sys/ttydefaults.h> #include "../../util/cache.h" #include "../../util/debug.h" #include "../browser.h" #include "../keysyms.h" #include "../helpline.h" #include "../ui.h" #include "../util.h" #include "../libslang.h" static void ui_browser__argv_write(struct ui_browser *browser, void *entry, int row) { char **arg = entry; bool current_entry = ui_browser__is_current_entry(browser, row); ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : HE_COLORSET_NORMAL); slsmg_write_nstring(*arg, browser->width); } static int popup_menu__run(struct ui_browser *menu) { int key; if (ui_browser__show(menu, " ", "ESC: exit, ENTER|->: Select option") < 0) return -1; while (1) { key = ui_browser__run(menu, 0); switch (key) { case K_RIGHT: case K_ENTER: key = menu->index; break; case K_LEFT: case K_ESC: case 'q': case CTRL('c'): key = -1; break; default: continue; } break; } ui_browser__hide(menu); return key; } int ui__popup_menu(int argc, char * const argv[]) { struct ui_browser menu = { .entries = (void *)argv, .refresh = ui_browser__argv_refresh, .seek = ui_browser__argv_seek, .write = ui_browser__argv_write, .nr_entries = argc, }; return popup_menu__run(&menu); } int ui_browser__input_window(const char *title, const char *text, char *input, const char *exit_msg, int delay_secs) { int x, y, len, key; int max_len = 60, nr_lines = 0; static char buf[50]; const char *t; t = text; while (1) { const char *sep = strchr(t, '\n'); if (sep == NULL) sep = strchr(t, '\0'); len = sep - t; if (max_len < len) max_len = len; ++nr_lines; if (*sep == '\0') break; t = sep + 1; } pthread_mutex_lock(&ui__lock); max_len += 2; nr_lines += 8; y = SLtt_Screen_Rows / 2 - nr_lines / 2; x = SLtt_Screen_Cols / 2 - max_len / 2; SLsmg_set_color(0); SLsmg_draw_box(y, x++, nr_lines, max_len); if (title) { SLsmg_gotorc(y, x + 1); SLsmg_write_string((char *)title); } SLsmg_gotorc(++y, x); nr_lines -= 7; max_len -= 2; SLsmg_write_wrapped_string((unsigned char *)text, y, x, nr_lines, max_len, 1); y += nr_lines; len = 5; while (len--) { SLsmg_gotorc(y + len - 1, x); SLsmg_write_nstring((char *)" ", max_len); } SLsmg_draw_box(y++, x + 1, 3, max_len - 2); SLsmg_gotorc(y + 3, x); SLsmg_write_nstring((char *)exit_msg, max_len); SLsmg_refresh(); pthread_mutex_unlock(&ui__lock); x += 2; len = 0; key = ui__getch(delay_secs); while (key != K_TIMER && key != K_ENTER && key != K_ESC) { pthread_mutex_lock(&ui__lock); if (key == K_BKSPC) { if (len == 0) { pthread_mutex_unlock(&ui__lock); goto next_key; } SLsmg_gotorc(y, x + --len); SLsmg_write_char(' '); } else { buf[len] = key; SLsmg_gotorc(y, x + len++); SLsmg_write_char(key); } SLsmg_refresh(); pthread_mutex_unlock(&ui__lock); /* XXX more graceful overflow handling needed */ if (len == sizeof(buf) - 1) { ui_helpline__push("maximum size of symbol name reached!"); key = K_ENTER; break; } next_key: key = ui__getch(delay_secs); } buf[len] = '\0'; strncpy(input, buf, len+1); return key; } int ui__question_window(const char *title, const char *text, const char *exit_msg, int delay_secs) { int x, y; int max_len = 0, nr_lines = 0; const char *t; t = text; while (1) { const char *sep = strchr(t, '\n'); int len; if (sep == NULL) sep = strchr(t, '\0'); len = sep - t; if (max_len < len) max_len = len; ++nr_lines; if (*sep == '\0') break; t = sep + 1; } pthread_mutex_lock(&ui__lock); max_len += 2; nr_lines += 4; y = SLtt_Screen_Rows / 2 - nr_lines / 2, x = SLtt_Screen_Cols / 2 - max_len / 2; SLsmg_set_color(0); SLsmg_draw_box(y, x++, nr_lines, max_len); if (title) { SLsmg_gotorc(y, x + 1); SLsmg_write_string((char *)title); } SLsmg_gotorc(++y, x); nr_lines -= 2; max_len -= 2; SLsmg_write_wrapped_string((unsigned char *)text, y, x, nr_lines, max_len, 1); SLsmg_gotorc(y + nr_lines - 2, x); SLsmg_write_nstring((char *)" ", max_len); SLsmg_gotorc(y + nr_lines - 1, x); SLsmg_write_nstring((char *)exit_msg, max_len); SLsmg_refresh(); pthread_mutex_unlock(&ui__lock); return ui__getch(delay_secs); } int ui__help_window(const char *text) { return ui__question_window("Help", text, "Press any key...", 0); } int ui__dialog_yesno(const char *msg) { return ui__question_window(NULL, msg, "Enter: Yes, ESC: No", 0); } static int __ui__warning(const char *title, const char *format, va_list args) { char *s; if (vasprintf(&s, format, args) > 0) { int key; key = ui__question_window(title, s, "Press any key...", 0); free(s); return key; } fprintf(stderr, "%s\n", title); vfprintf(stderr, format, args); return K_ESC; } static int perf_tui__error(const char *format, va_list args) { return __ui__warning("Error:", format, args); } static int perf_tui__warning(const char *format, va_list args) { return __ui__warning("Warning:", format, args); } struct perf_error_ops perf_tui_eops = { .error = perf_tui__error, .warning = perf_tui__warning, };
gpl-2.0
KunYi/linux_samx6i
drivers/staging/vt6656/wcmd.c
1388
4425
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: wcmd.c * * Purpose: Handles the management command interface functions * * Author: Lyndon Chen * * Date: May 8, 2003 * * Functions: * vnt_cmd_complete - Command Complete function * vnt_schedule_command - Push Command and wait Command Scheduler to do * vnt_cmd_timer_wait- Call back timer * * Revision History: * */ #include "device.h" #include "mac.h" #include "wcmd.h" #include "power.h" #include "usbpipe.h" #include "rxtx.h" #include "rf.h" static void vnt_cmd_timer_wait(struct vnt_private *priv, unsigned long msecs) { schedule_delayed_work(&priv->run_command_work, msecs_to_jiffies(msecs)); } static int vnt_cmd_complete(struct vnt_private *priv) { priv->command_state = WLAN_CMD_IDLE; if (priv->free_cmd_queue == CMD_Q_SIZE) { /* Command Queue Empty */ priv->cmd_running = false; return true; } priv->command = priv->cmd_queue[priv->cmd_dequeue_idx]; ADD_ONE_WITH_WRAP_AROUND(priv->cmd_dequeue_idx, CMD_Q_SIZE); priv->free_cmd_queue++; priv->cmd_running = true; switch (priv->command) { case WLAN_CMD_INIT_MAC80211: priv->command_state = WLAN_CMD_INIT_MAC80211_START; break; case WLAN_CMD_TBTT_WAKEUP: priv->command_state = WLAN_CMD_TBTT_WAKEUP_START; break; case WLAN_CMD_BECON_SEND: priv->command_state = WLAN_CMD_BECON_SEND_START; break; case WLAN_CMD_SETPOWER: priv->command_state = WLAN_CMD_SETPOWER_START; break; case WLAN_CMD_CHANGE_ANTENNA: priv->command_state = WLAN_CMD_CHANGE_ANTENNA_START; break; default: break; } vnt_cmd_timer_wait(priv, 0); return true; } void vnt_run_command(struct work_struct *work) { struct vnt_private *priv = container_of(work, struct vnt_private, run_command_work.work); if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) return; if (priv->cmd_running != true) return; switch (priv->command_state) { case WLAN_CMD_INIT_MAC80211_START: if (priv->mac_hw) break; dev_info(&priv->usb->dev, "Starting mac80211\n"); if (vnt_init(priv)) { /* If fail all ends TODO retry */ dev_err(&priv->usb->dev, "failed to start\n"); ieee80211_free_hw(priv->hw); return; } break; case WLAN_CMD_TBTT_WAKEUP_START: vnt_next_tbtt_wakeup(priv); break; case WLAN_CMD_BECON_SEND_START: if (!priv->vif) break; vnt_beacon_make(priv, priv->vif); vnt_mac_reg_bits_on(priv, MAC_REG_TCR, TCR_AUTOBCNTX); break; case WLAN_CMD_SETPOWER_START: vnt_rf_setpower(priv, priv->current_rate, priv->hw->conf.chandef.chan->hw_value); break; case WLAN_CMD_CHANGE_ANTENNA_START: dev_dbg(&priv->usb->dev, "Change from Antenna%d to", priv->rx_antenna_sel); if (priv->rx_antenna_sel == 0) { priv->rx_antenna_sel = 1; if (priv->tx_rx_ant_inv == true) vnt_set_antenna_mode(priv, ANT_RXA); else vnt_set_antenna_mode(priv, ANT_RXB); } else { priv->rx_antenna_sel = 0; if (priv->tx_rx_ant_inv == true) vnt_set_antenna_mode(priv, ANT_RXB); else vnt_set_antenna_mode(priv, ANT_RXA); } break; default: break; } vnt_cmd_complete(priv); } int vnt_schedule_command(struct vnt_private *priv, enum vnt_cmd command) { if (priv->free_cmd_queue == 0) return false; priv->cmd_queue[priv->cmd_enqueue_idx] = command; ADD_ONE_WITH_WRAP_AROUND(priv->cmd_enqueue_idx, CMD_Q_SIZE); priv->free_cmd_queue--; if (priv->cmd_running == false) vnt_cmd_complete(priv); return true; } void vnt_reset_command_timer(struct vnt_private *priv) { priv->free_cmd_queue = CMD_Q_SIZE; priv->cmd_dequeue_idx = 0; priv->cmd_enqueue_idx = 0; priv->command_state = WLAN_CMD_IDLE; priv->cmd_running = false; }
gpl-2.0
Ezekeel/Nexus-S-reference-kernel
drivers/watchdog/softdog.c
1644
7370
/* * SoftDog 0.07: A Software Watchdog Device * * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide * warranty for any of this software. This material is provided * "AS-IS" and at no charge. * * (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk> * * Software only watchdog driver. Unlike its big brother the WDT501P * driver this won't always recover a failed machine. * * 03/96: Angelo Haritsis <ah@doc.ic.ac.uk> : * Modularised. * Added soft_margin; use upon insmod to change the timer delay. * NB: uses same minor as wdt (WATCHDOG_MINOR); we could use separate * minors. * * 19980911 Alan Cox * Made SMP safe for 2.3.x * * 20011127 Joel Becker (jlbec@evilplan.org> * Added soft_noboot; Allows testing the softdog trigger without * requiring a recompile. * Added WDIOC_GETTIMEOUT and WDIOC_SETTIMOUT. * * 20020530 Joel Becker <joel.becker@oracle.com> * Added Matt Domsch's nowayout module option. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/fs.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/uaccess.h> #define PFX "SoftDog: " #define TIMER_MARGIN 60 /* Default is 60 seconds */ static int soft_margin = TIMER_MARGIN; /* in seconds */ module_param(soft_margin, int, 0); MODULE_PARM_DESC(soft_margin, "Watchdog soft_margin in seconds. (0 < soft_margin < 65536, default=" __MODULE_STRING(TIMER_MARGIN) ")"); static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); #ifdef ONLY_TESTING static int soft_noboot = 1; #else static int soft_noboot = 0; #endif /* ONLY_TESTING */ module_param(soft_noboot, int, 0); MODULE_PARM_DESC(soft_noboot, "Softdog action, set to 1 to ignore reboots, 0 to reboot " "(default depends on ONLY_TESTING)"); /* * Our timer */ static void watchdog_fire(unsigned long); static struct timer_list watchdog_ticktock = TIMER_INITIALIZER(watchdog_fire, 0, 0); static unsigned long driver_open, orphan_timer; static char expect_close; /* * If the timer expires.. */ static void watchdog_fire(unsigned long data) { if (test_and_clear_bit(0, &orphan_timer)) module_put(THIS_MODULE); if (soft_noboot) printk(KERN_CRIT PFX "Triggered - Reboot ignored.\n"); else { printk(KERN_CRIT PFX "Initiating system reboot.\n"); emergency_restart(); printk(KERN_CRIT PFX "Reboot didn't ?????\n"); } } /* * Softdog operations */ static int softdog_keepalive(void) { mod_timer(&watchdog_ticktock, jiffies+(soft_margin*HZ)); return 0; } static int softdog_stop(void) { del_timer(&watchdog_ticktock); return 0; } static int softdog_set_heartbeat(int t) { if ((t < 0x0001) || (t > 0xFFFF)) return -EINVAL; soft_margin = t; return 0; } /* * /dev/watchdog handling */ static int softdog_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &driver_open)) return -EBUSY; if (!test_and_clear_bit(0, &orphan_timer)) __module_get(THIS_MODULE); /* * Activate timer */ softdog_keepalive(); return nonseekable_open(inode, file); } static int softdog_release(struct inode *inode, struct file *file) { /* * Shut off the timer. * Lock it in if it's a module and we set nowayout */ if (expect_close == 42) { softdog_stop(); module_put(THIS_MODULE); } else { printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); set_bit(0, &orphan_timer); softdog_keepalive(); } clear_bit(0, &driver_open); expect_close = 0; return 0; } static ssize_t softdog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* * Refresh the timer. */ if (len) { if (!nowayout) { size_t i; /* In case it was set long ago */ expect_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } softdog_keepalive(); } return len; } static long softdog_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; int new_margin; static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .firmware_version = 0, .identity = "Software Watchdog", }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_KEEPALIVE: softdog_keepalive(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_margin, p)) return -EFAULT; if (softdog_set_heartbeat(new_margin)) return -EINVAL; softdog_keepalive(); /* Fall */ case WDIOC_GETTIMEOUT: return put_user(soft_margin, p); default: return -ENOTTY; } } /* * Notifier for system down */ static int softdog_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) /* Turn the WDT off */ softdog_stop(); return NOTIFY_DONE; } /* * Kernel Interfaces */ static const struct file_operations softdog_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = softdog_write, .unlocked_ioctl = softdog_ioctl, .open = softdog_open, .release = softdog_release, }; static struct miscdevice softdog_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &softdog_fops, }; static struct notifier_block softdog_notifier = { .notifier_call = softdog_notify_sys, }; static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 " "initialized. soft_noboot=%d soft_margin=%d sec (nowayout= %d)\n"; static int __init watchdog_init(void) { int ret; /* Check that the soft_margin value is within it's range; if not reset to the default */ if (softdog_set_heartbeat(soft_margin)) { softdog_set_heartbeat(TIMER_MARGIN); printk(KERN_INFO PFX "soft_margin must be 0 < soft_margin < 65536, using %d\n", TIMER_MARGIN); } ret = register_reboot_notifier(&softdog_notifier); if (ret) { printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", ret); return ret; } ret = misc_register(&softdog_miscdev); if (ret) { printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); unregister_reboot_notifier(&softdog_notifier); return ret; } printk(banner, soft_noboot, soft_margin, nowayout); return 0; } static void __exit watchdog_exit(void) { misc_deregister(&softdog_miscdev); unregister_reboot_notifier(&softdog_notifier); } module_init(watchdog_init); module_exit(watchdog_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("Software Watchdog Device Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
gchild320/shamu-old
drivers/staging/vt6656/dpc.c
2156
51776
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: dpc.c * * Purpose: handle dpc rx functions * * Author: Lyndon Chen * * Date: May 20, 2003 * * Functions: * device_receive_frame - Rcv 802.11 frame function * s_bAPModeRxCtl- AP Rcv frame filer Ctl. * s_bAPModeRxData- AP Rcv data frame handle * s_bHandleRxEncryption- Rcv decrypted data via on-fly * s_bHostWepRxEncryption- Rcv encrypted data via host * s_byGetRateIdx- get rate index * s_vGetDASA- get data offset * s_vProcessRxMACHeader- Rcv 802.11 and translate to 802.3 * * Revision History: * */ #include "device.h" #include "rxtx.h" #include "tether.h" #include "card.h" #include "bssdb.h" #include "mac.h" #include "baseband.h" #include "michael.h" #include "tkip.h" #include "tcrc.h" #include "wctl.h" #include "hostap.h" #include "rf.h" #include "iowpa.h" #include "aes_ccmp.h" #include "datarate.h" #include "usbpipe.h" //static int msglevel =MSG_LEVEL_DEBUG; static int msglevel =MSG_LEVEL_INFO; const u8 acbyRxRate[MAX_RATE] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108}; static u8 s_byGetRateIdx(u8 byRate); static void s_vGetDASA( u8 * pbyRxBufferAddr, unsigned int *pcbHeaderSize, struct ethhdr *psEthHeader ); static void s_vProcessRxMACHeader(struct vnt_private *pDevice, u8 *pbyRxBufferAddr, u32 cbPacketSize, int bIsWEP, int bExtIV, u32 *pcbHeadSize); static int s_bAPModeRxCtl(struct vnt_private *pDevice, u8 *pbyFrame, s32 iSANodeIndex); static int s_bAPModeRxData(struct vnt_private *pDevice, struct sk_buff *skb, u32 FrameSize, u32 cbHeaderOffset, s32 iSANodeIndex, s32 iDANodeIndex); static int s_bHandleRxEncryption(struct vnt_private *pDevice, u8 *pbyFrame, u32 FrameSize, u8 *pbyRsr, u8 *pbyNewRsr, PSKeyItem *pKeyOut, s32 *pbExtIV, u16 *pwRxTSC15_0, u32 *pdwRxTSC47_16); static int s_bHostWepRxEncryption(struct vnt_private *pDevice, u8 *pbyFrame, u32 FrameSize, u8 *pbyRsr, int bOnFly, PSKeyItem pKey, u8 *pbyNewRsr, s32 *pbExtIV, u16 *pwRxTSC15_0, u32 *pdwRxTSC47_16); /*+ * * Description: * Translate Rcv 802.11 header to 802.3 header with Rx buffer * * Parameters: * In: * pDevice * dwRxBufferAddr - Address of Rcv Buffer * cbPacketSize - Rcv Packet size * bIsWEP - If Rcv with WEP * Out: * pcbHeaderSize - 802.11 header size * * Return Value: None * -*/ static void s_vProcessRxMACHeader(struct vnt_private *pDevice, u8 *pbyRxBufferAddr, u32 cbPacketSize, int bIsWEP, int bExtIV, u32 *pcbHeadSize) { u8 *pbyRxBuffer; u32 cbHeaderSize = 0; u16 *pwType; struct ieee80211_hdr *pMACHeader; int ii; pMACHeader = (struct ieee80211_hdr *) (pbyRxBufferAddr + cbHeaderSize); s_vGetDASA((u8 *)pMACHeader, &cbHeaderSize, &pDevice->sRxEthHeader); if (bIsWEP) { if (bExtIV) { // strip IV&ExtIV , add 8 byte cbHeaderSize += (WLAN_HDR_ADDR3_LEN + 8); } else { // strip IV , add 4 byte cbHeaderSize += (WLAN_HDR_ADDR3_LEN + 4); } } else { cbHeaderSize += WLAN_HDR_ADDR3_LEN; }; pbyRxBuffer = (u8 *) (pbyRxBufferAddr + cbHeaderSize); if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_Bridgetunnel[0])) { cbHeaderSize += 6; } else if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) { cbHeaderSize += 6; pwType = (u16 *) (pbyRxBufferAddr + cbHeaderSize); if ((*pwType == cpu_to_be16(ETH_P_IPX)) || (*pwType == cpu_to_le16(0xF380))) { cbHeaderSize -= 8; pwType = (u16 *) (pbyRxBufferAddr + cbHeaderSize); if (bIsWEP) { if (bExtIV) { *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN - 8); // 8 is IV&ExtIV } else { *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN - 4); // 4 is IV } } else { *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN); } } } else { cbHeaderSize -= 2; pwType = (u16 *) (pbyRxBufferAddr + cbHeaderSize); if (bIsWEP) { if (bExtIV) { *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN - 8); // 8 is IV&ExtIV } else { *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN - 4); // 4 is IV } } else { *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN); } } cbHeaderSize -= (ETH_ALEN * 2); pbyRxBuffer = (u8 *) (pbyRxBufferAddr + cbHeaderSize); for (ii = 0; ii < ETH_ALEN; ii++) *pbyRxBuffer++ = pDevice->sRxEthHeader.h_dest[ii]; for (ii = 0; ii < ETH_ALEN; ii++) *pbyRxBuffer++ = pDevice->sRxEthHeader.h_source[ii]; *pcbHeadSize = cbHeaderSize; } static u8 s_byGetRateIdx(u8 byRate) { u8 byRateIdx; for (byRateIdx = 0; byRateIdx <MAX_RATE ; byRateIdx++) { if (acbyRxRate[byRateIdx%MAX_RATE] == byRate) return byRateIdx; } return 0; } static void s_vGetDASA ( u8 * pbyRxBufferAddr, unsigned int *pcbHeaderSize, struct ethhdr *psEthHeader ) { unsigned int cbHeaderSize = 0; struct ieee80211_hdr *pMACHeader; int ii; pMACHeader = (struct ieee80211_hdr *) (pbyRxBufferAddr + cbHeaderSize); if ((pMACHeader->frame_control & FC_TODS) == 0) { if (pMACHeader->frame_control & FC_FROMDS) { for (ii = 0; ii < ETH_ALEN; ii++) { psEthHeader->h_dest[ii] = pMACHeader->addr1[ii]; psEthHeader->h_source[ii] = pMACHeader->addr3[ii]; } } else { /* IBSS mode */ for (ii = 0; ii < ETH_ALEN; ii++) { psEthHeader->h_dest[ii] = pMACHeader->addr1[ii]; psEthHeader->h_source[ii] = pMACHeader->addr2[ii]; } } } else { /* Is AP mode.. */ if (pMACHeader->frame_control & FC_FROMDS) { for (ii = 0; ii < ETH_ALEN; ii++) { psEthHeader->h_dest[ii] = pMACHeader->addr3[ii]; psEthHeader->h_source[ii] = pMACHeader->addr4[ii]; cbHeaderSize += 6; } } else { for (ii = 0; ii < ETH_ALEN; ii++) { psEthHeader->h_dest[ii] = pMACHeader->addr3[ii]; psEthHeader->h_source[ii] = pMACHeader->addr2[ii]; } } }; *pcbHeaderSize = cbHeaderSize; } int RXbBulkInProcessData(struct vnt_private *pDevice, PRCB pRCB, unsigned long BytesToIndicate) { struct net_device_stats *pStats = &pDevice->stats; struct sk_buff *skb; struct vnt_manager *pMgmt = &pDevice->vnt_mgmt; struct vnt_rx_mgmt *pRxPacket = &pMgmt->sRxPacket; struct ieee80211_hdr *p802_11Header; u8 *pbyRsr, *pbyNewRsr, *pbyRSSI, *pbyFrame; u64 *pqwTSFTime; u32 bDeFragRx = false; u32 cbHeaderOffset, cbIVOffset; u32 FrameSize; u16 wEtherType = 0; s32 iSANodeIndex = -1, iDANodeIndex = -1; int ii; u8 *pbyRxSts, *pbyRxRate, *pbySQ, *pby3SQ; u32 cbHeaderSize; PSKeyItem pKey = NULL; u16 wRxTSC15_0 = 0; u32 dwRxTSC47_16 = 0; SKeyItem STempKey; /* signed long ldBm = 0; */ int bIsWEP = false; int bExtIV = false; u32 dwWbkStatus; PRCB pRCBIndicate = pRCB; u8 *pbyDAddress; u16 *pwPLCP_Length; u8 abyVaildRate[MAX_RATE] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108}; u16 wPLCPwithPadding; struct ieee80211_hdr *pMACHeader; int bRxeapol_key = false; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---------- RXbBulkInProcessData---\n"); skb = pRCB->skb; /* [31:16]RcvByteCount ( not include 4-byte Status ) */ dwWbkStatus = *((u32 *)(skb->data)); FrameSize = dwWbkStatus >> 16; FrameSize += 4; if (BytesToIndicate != FrameSize) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"------- WRONG Length 1\n"); return false; } if ((BytesToIndicate > 2372) || (BytesToIndicate <= 40)) { // Frame Size error drop this packet. DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---------- WRONG Length 2\n"); return false; } pbyDAddress = (u8 *)(skb->data); pbyRxSts = pbyDAddress+4; pbyRxRate = pbyDAddress+5; //real Frame Size = USBFrameSize -4WbkStatus - 4RxStatus - 8TSF - 4RSR - 4SQ3 - ?Padding //if SQ3 the range is 24~27, if no SQ3 the range is 20~23 //real Frame size in PLCPLength field. pwPLCP_Length = (u16 *) (pbyDAddress + 6); //Fix hardware bug => PLCP_Length error if ( ((BytesToIndicate - (*pwPLCP_Length)) > 27) || ((BytesToIndicate - (*pwPLCP_Length)) < 24) || (BytesToIndicate < (*pwPLCP_Length)) ) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Wrong PLCP Length %x\n", (int) *pwPLCP_Length); ASSERT(0); return false; } for ( ii=RATE_1M;ii<MAX_RATE;ii++) { if ( *pbyRxRate == abyVaildRate[ii] ) { break; } } if ( ii==MAX_RATE ) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Wrong RxRate %x\n",(int) *pbyRxRate); return false; } wPLCPwithPadding = ( (*pwPLCP_Length / 4) + ( (*pwPLCP_Length % 4) ? 1:0 ) ) *4; pqwTSFTime = (u64 *)(pbyDAddress + 8 + wPLCPwithPadding); if(pDevice->byBBType == BB_TYPE_11G) { pby3SQ = pbyDAddress + 8 + wPLCPwithPadding + 12; pbySQ = pby3SQ; } else { pbySQ = pbyDAddress + 8 + wPLCPwithPadding + 8; pby3SQ = pbySQ; } pbyNewRsr = pbyDAddress + 8 + wPLCPwithPadding + 9; pbyRSSI = pbyDAddress + 8 + wPLCPwithPadding + 10; pbyRsr = pbyDAddress + 8 + wPLCPwithPadding + 11; FrameSize = *pwPLCP_Length; pbyFrame = pbyDAddress + 8; // update receive statistic counter STAvUpdateRDStatCounter(&pDevice->scStatistic, *pbyRsr, *pbyNewRsr, *pbyRxSts, *pbyRxRate, pbyFrame, FrameSize ); pMACHeader = (struct ieee80211_hdr *) pbyFrame; //mike add: to judge if current AP is activated? if ((pMgmt->eCurrMode == WMAC_MODE_STANDBY) || (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)) { if (pMgmt->sNodeDBTable[0].bActive) { if (!compare_ether_addr(pMgmt->abyCurrBSSID, pMACHeader->addr2)) { if (pMgmt->sNodeDBTable[0].uInActiveCount != 0) pMgmt->sNodeDBTable[0].uInActiveCount = 0; } } } if (!is_multicast_ether_addr(pMACHeader->addr1)) { if (WCTLbIsDuplicate(&(pDevice->sDupRxCache), (struct ieee80211_hdr *) pbyFrame)) { pDevice->s802_11Counter.FrameDuplicateCount++; return false; } if (compare_ether_addr(pDevice->abyCurrentNetAddr, pMACHeader->addr1)) { return false; } } // Use for TKIP MIC s_vGetDASA(pbyFrame, &cbHeaderSize, &pDevice->sRxEthHeader); if (!compare_ether_addr((u8 *)&(pDevice->sRxEthHeader.h_source[0]), pDevice->abyCurrentNetAddr)) return false; if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) || (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) { if (IS_CTL_PSPOLL(pbyFrame) || !IS_TYPE_CONTROL(pbyFrame)) { p802_11Header = (struct ieee80211_hdr *) (pbyFrame); // get SA NodeIndex if (BSSbIsSTAInNodeDB(pDevice, (u8 *)(p802_11Header->addr2), &iSANodeIndex)) { pMgmt->sNodeDBTable[iSANodeIndex].ulLastRxJiffer = jiffies; pMgmt->sNodeDBTable[iSANodeIndex].uInActiveCount = 0; } } } if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { if (s_bAPModeRxCtl(pDevice, pbyFrame, iSANodeIndex) == true) { return false; } } if (IS_FC_WEP(pbyFrame)) { bool bRxDecryOK = false; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"rx WEP pkt\n"); bIsWEP = true; if ((pDevice->bEnableHostWEP) && (iSANodeIndex >= 0)) { pKey = &STempKey; pKey->byCipherSuite = pMgmt->sNodeDBTable[iSANodeIndex].byCipherSuite; pKey->dwKeyIndex = pMgmt->sNodeDBTable[iSANodeIndex].dwKeyIndex; pKey->uKeyLength = pMgmt->sNodeDBTable[iSANodeIndex].uWepKeyLength; pKey->dwTSC47_16 = pMgmt->sNodeDBTable[iSANodeIndex].dwTSC47_16; pKey->wTSC15_0 = pMgmt->sNodeDBTable[iSANodeIndex].wTSC15_0; memcpy(pKey->abyKey, &pMgmt->sNodeDBTable[iSANodeIndex].abyWepKey[0], pKey->uKeyLength ); bRxDecryOK = s_bHostWepRxEncryption(pDevice, pbyFrame, FrameSize, pbyRsr, pMgmt->sNodeDBTable[iSANodeIndex].bOnFly, pKey, pbyNewRsr, &bExtIV, &wRxTSC15_0, &dwRxTSC47_16); } else { bRxDecryOK = s_bHandleRxEncryption(pDevice, pbyFrame, FrameSize, pbyRsr, pbyNewRsr, &pKey, &bExtIV, &wRxTSC15_0, &dwRxTSC47_16); } if (bRxDecryOK) { if ((*pbyNewRsr & NEWRSR_DECRYPTOK) == 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ICV Fail\n"); if ( (pMgmt->eAuthenMode == WMAC_AUTH_WPA) || (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) || (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) || (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) { if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP)) { pDevice->s802_11Counter.TKIPICVErrors++; } else if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP)) { pDevice->s802_11Counter.CCMPDecryptErrors++; } else if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_WEP)) { // pDevice->s802_11Counter.WEPICVErrorCount.QuadPart++; } } return false; } } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"WEP Func Fail\n"); return false; } if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP)) FrameSize -= 8; // Message Integrity Code else FrameSize -= 4; // 4 is ICV } // // RX OK // /* remove the FCS/CRC length */ FrameSize -= ETH_FCS_LEN; if ( !(*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI)) && // unicast address (IS_FRAGMENT_PKT((pbyFrame))) ) { // defragment bDeFragRx = WCTLbHandleFragment(pDevice, (struct ieee80211_hdr *) (pbyFrame), FrameSize, bIsWEP, bExtIV); pDevice->s802_11Counter.ReceivedFragmentCount++; if (bDeFragRx) { // defrag complete // TODO skb, pbyFrame skb = pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].skb; FrameSize = pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength; pbyFrame = skb->data + 8; } else { return false; } } // // Management & Control frame Handle // if ((IS_TYPE_DATA((pbyFrame))) == false) { // Handle Control & Manage Frame if (IS_TYPE_MGMT((pbyFrame))) { u8 * pbyData1; u8 * pbyData2; pRxPacket = &(pRCB->sMngPacket); pRxPacket->p80211Header = (PUWLAN_80211HDR)(pbyFrame); pRxPacket->cbMPDULen = FrameSize; pRxPacket->uRSSI = *pbyRSSI; pRxPacket->bySQ = *pbySQ; pRxPacket->qwLocalTSF = cpu_to_le64(*pqwTSFTime); if (bIsWEP) { // strip IV pbyData1 = WLAN_HDR_A3_DATA_PTR(pbyFrame); pbyData2 = WLAN_HDR_A3_DATA_PTR(pbyFrame) + 4; for (ii = 0; ii < (FrameSize - 4); ii++) { *pbyData1 = *pbyData2; pbyData1++; pbyData2++; } } pRxPacket->byRxRate = s_byGetRateIdx(*pbyRxRate); if ( *pbyRxSts == 0 ) { //Discard beacon packet which channel is 0 if ( (WLAN_GET_FC_FSTYPE((pRxPacket->p80211Header->sA3.wFrameCtl)) == WLAN_FSTYPE_BEACON) || (WLAN_GET_FC_FSTYPE((pRxPacket->p80211Header->sA3.wFrameCtl)) == WLAN_FSTYPE_PROBERESP) ) { return false; } } pRxPacket->byRxChannel = (*pbyRxSts) >> 2; // hostap Deamon handle 802.11 management if (pDevice->bEnableHostapd) { skb->dev = pDevice->apdev; //skb->data += 4; //skb->tail += 4; skb->data += 8; skb->tail += 8; skb_put(skb, FrameSize); skb_reset_mac_header(skb); skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); return true; } // // Insert the RCB in the Recv Mng list // EnqueueRCB(pDevice->FirstRecvMngList, pDevice->LastRecvMngList, pRCBIndicate); pDevice->NumRecvMngList++; if ( bDeFragRx == false) { pRCB->Ref++; } if (pDevice->bIsRxMngWorkItemQueued == false) { pDevice->bIsRxMngWorkItemQueued = true; tasklet_schedule(&pDevice->RxMngWorkItem); } } else { // Control Frame }; return false; } else { if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { //In AP mode, hw only check addr1(BSSID or RA) if equal to local MAC. if ( !(*pbyRsr & RSR_BSSIDOK)) { if (bDeFragRx) { if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc more frag bufs\n", pDevice->dev->name); } } return false; } } else { // discard DATA packet while not associate || BSSID error if ((pDevice->bLinkPass == false) || !(*pbyRsr & RSR_BSSIDOK)) { if (bDeFragRx) { if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc more frag bufs\n", pDevice->dev->name); } } return false; } //mike add:station mode check eapol-key challenge---> { u8 Protocol_Version; //802.1x Authentication u8 Packet_Type; //802.1x Authentication u8 Descriptor_type; u16 Key_info; if (bIsWEP) cbIVOffset = 8; else cbIVOffset = 0; wEtherType = (skb->data[cbIVOffset + 8 + 24 + 6] << 8) | skb->data[cbIVOffset + 8 + 24 + 6 + 1]; Protocol_Version = skb->data[cbIVOffset + 8 + 24 + 6 + 1 +1]; Packet_Type = skb->data[cbIVOffset + 8 + 24 + 6 + 1 +1+1]; if (wEtherType == ETH_P_PAE) { //Protocol Type in LLC-Header if(((Protocol_Version==1) ||(Protocol_Version==2)) && (Packet_Type==3)) { //802.1x OR eapol-key challenge frame receive bRxeapol_key = true; Descriptor_type = skb->data[cbIVOffset + 8 + 24 + 6 + 1 +1+1+1+2]; Key_info = (skb->data[cbIVOffset + 8 + 24 + 6 + 1 +1+1+1+2+1]<<8) |skb->data[cbIVOffset + 8 + 24 + 6 + 1 +1+1+1+2+2] ; if(Descriptor_type==2) { //RSN // printk("WPA2_Rx_eapol-key_info<-----:%x\n",Key_info); } else if(Descriptor_type==254) { // printk("WPA_Rx_eapol-key_info<-----:%x\n",Key_info); } } } } //mike add:station mode check eapol-key challenge<--- } } // Data frame Handle if (pDevice->bEnablePSMode) { if (IS_FC_MOREDATA((pbyFrame))) { if (*pbyRsr & RSR_ADDROK) { //PSbSendPSPOLL((PSDevice)pDevice); } } else { if (pMgmt->bInTIMWake == true) { pMgmt->bInTIMWake = false; } } } // Now it only supports 802.11g Infrastructure Mode, and support rate must up to 54 Mbps if (pDevice->bDiversityEnable && (FrameSize>50) && (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) && (pDevice->bLinkPass == true)) { BBvAntennaDiversity(pDevice, s_byGetRateIdx(*pbyRxRate), 0); } // ++++++++ For BaseBand Algorithm +++++++++++++++ pDevice->uCurrRSSI = *pbyRSSI; pDevice->byCurrSQ = *pbySQ; // todo /* if ((*pbyRSSI != 0) && (pMgmt->pCurrBSS!=NULL)) { RFvRSSITodBm(pDevice, *pbyRSSI, &ldBm); // Monitor if RSSI is too strong. pMgmt->pCurrBSS->byRSSIStatCnt++; pMgmt->pCurrBSS->byRSSIStatCnt %= RSSI_STAT_COUNT; pMgmt->pCurrBSS->ldBmAverage[pMgmt->pCurrBSS->byRSSIStatCnt] = ldBm; for (ii = 0; ii < RSSI_STAT_COUNT; ii++) { if (pMgmt->pCurrBSS->ldBmAverage[ii] != 0) { pMgmt->pCurrBSS->ldBmMAX = max(pMgmt->pCurrBSS->ldBmAverage[ii], ldBm); } } } */ // ----------------------------------------------- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->bEnable8021x == true)){ u8 abyMacHdr[24]; // Only 802.1x packet incoming allowed if (bIsWEP) cbIVOffset = 8; else cbIVOffset = 0; wEtherType = (skb->data[cbIVOffset + 8 + 24 + 6] << 8) | skb->data[cbIVOffset + 8 + 24 + 6 + 1]; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wEtherType = %04x \n", wEtherType); if (wEtherType == ETH_P_PAE) { skb->dev = pDevice->apdev; if (bIsWEP == true) { // strip IV header(8) memcpy(&abyMacHdr[0], (skb->data + 8), 24); memcpy((skb->data + 8 + cbIVOffset), &abyMacHdr[0], 24); } skb->data += (cbIVOffset + 8); skb->tail += (cbIVOffset + 8); skb_put(skb, FrameSize); skb_reset_mac_header(skb); skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); return true; } // check if 802.1x authorized if (!(pMgmt->sNodeDBTable[iSANodeIndex].dwFlags & WLAN_STA_AUTHORIZED)) return false; } if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP)) { if (bIsWEP) { FrameSize -= 8; //MIC } } //-------------------------------------------------------------------------------- // Soft MIC if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP)) { if (bIsWEP) { u32 * pdwMIC_L; u32 * pdwMIC_R; u32 dwMIC_Priority; u32 dwMICKey0 = 0, dwMICKey1 = 0; u32 dwLocalMIC_L = 0; u32 dwLocalMIC_R = 0; if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[24])); dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[28])); } else { if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) { dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[16])); dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[20])); } else if ((pKey->dwKeyIndex & BIT28) == 0) { dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[16])); dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[20])); } else { dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[24])); dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[28])); } } MIC_vInit(dwMICKey0, dwMICKey1); MIC_vAppend((u8 *)&(pDevice->sRxEthHeader.h_dest[0]), 12); dwMIC_Priority = 0; MIC_vAppend((u8 *)&dwMIC_Priority, 4); // 4 is Rcv buffer header, 24 is MAC Header, and 8 is IV and Ext IV. MIC_vAppend((u8 *)(skb->data + 8 + WLAN_HDR_ADDR3_LEN + 8), FrameSize - WLAN_HDR_ADDR3_LEN - 8); MIC_vGetMIC(&dwLocalMIC_L, &dwLocalMIC_R); MIC_vUnInit(); pdwMIC_L = (u32 *)(skb->data + 8 + FrameSize); pdwMIC_R = (u32 *)(skb->data + 8 + FrameSize + 4); if ((cpu_to_le32(*pdwMIC_L) != dwLocalMIC_L) || (cpu_to_le32(*pdwMIC_R) != dwLocalMIC_R) || (pDevice->bRxMICFail == true)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC comparison is fail!\n"); pDevice->bRxMICFail = false; //pDevice->s802_11Counter.TKIPLocalMICFailures.QuadPart++; pDevice->s802_11Counter.TKIPLocalMICFailures++; if (bDeFragRx) { if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc more frag bufs\n", pDevice->dev->name); } } //send event to wpa_supplicant //if(pDevice->bWPASuppWextEnabled == true) { union iwreq_data wrqu; struct iw_michaelmicfailure ev; int keyidx = pbyFrame[cbHeaderSize+3] >> 6; //top two-bits memset(&ev, 0, sizeof(ev)); ev.flags = keyidx & IW_MICFAILURE_KEY_ID; if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC) && (*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI)) == 0) { ev.flags |= IW_MICFAILURE_PAIRWISE; } else { ev.flags |= IW_MICFAILURE_GROUP; } ev.src_addr.sa_family = ARPHRD_ETHER; memcpy(ev.src_addr.sa_data, pMACHeader->addr2, ETH_ALEN); memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = sizeof(ev); PRINT_K("wireless_send_event--->IWEVMICHAELMICFAILURE\n"); wireless_send_event(pDevice->dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev); } return false; } } } //---end of SOFT MIC----------------------------------------------------------------------- // ++++++++++ Reply Counter Check +++++++++++++ if ((pKey != NULL) && ((pKey->byCipherSuite == KEY_CTL_TKIP) || (pKey->byCipherSuite == KEY_CTL_CCMP))) { if (bIsWEP) { u16 wLocalTSC15_0 = 0; u32 dwLocalTSC47_16 = 0; unsigned long long RSC = 0; // endian issues RSC = *((unsigned long long *) &(pKey->KeyRSC)); wLocalTSC15_0 = (u16) RSC; dwLocalTSC47_16 = (u32) (RSC>>16); RSC = dwRxTSC47_16; RSC <<= 16; RSC += wRxTSC15_0; memcpy(&(pKey->KeyRSC), &RSC, sizeof(u64)); if (pDevice->vnt_mgmt.eCurrMode == WMAC_MODE_ESS_STA && pDevice->vnt_mgmt.eCurrState == WMAC_STATE_ASSOC) { /* check RSC */ if ( (wRxTSC15_0 < wLocalTSC15_0) && (dwRxTSC47_16 <= dwLocalTSC47_16) && !((dwRxTSC47_16 == 0) && (dwLocalTSC47_16 == 0xFFFFFFFF))) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"TSC is illegal~~!\n "); if (pKey->byCipherSuite == KEY_CTL_TKIP) //pDevice->s802_11Counter.TKIPReplays.QuadPart++; pDevice->s802_11Counter.TKIPReplays++; else //pDevice->s802_11Counter.CCMPReplays.QuadPart++; pDevice->s802_11Counter.CCMPReplays++; if (bDeFragRx) { if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc more frag bufs\n", pDevice->dev->name); } } return false; } } } } // ----- End of Reply Counter Check -------------------------- s_vProcessRxMACHeader(pDevice, (u8 *)(skb->data+8), FrameSize, bIsWEP, bExtIV, &cbHeaderOffset); FrameSize -= cbHeaderOffset; cbHeaderOffset += 8; // 8 is Rcv buffer header // Null data, framesize = 12 if (FrameSize < 12) return false; if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { if (s_bAPModeRxData(pDevice, skb, FrameSize, cbHeaderOffset, iSANodeIndex, iDANodeIndex ) == false) { if (bDeFragRx) { if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc more frag bufs\n", pDevice->dev->name); } } return false; } } skb->data += cbHeaderOffset; skb->tail += cbHeaderOffset; skb_put(skb, FrameSize); skb->protocol=eth_type_trans(skb, skb->dev); skb->ip_summed=CHECKSUM_NONE; pStats->rx_bytes +=skb->len; pStats->rx_packets++; netif_rx(skb); if (bDeFragRx) { if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc more frag bufs\n", pDevice->dev->name); } return false; } return true; } static int s_bAPModeRxCtl(struct vnt_private *pDevice, u8 *pbyFrame, s32 iSANodeIndex) { struct vnt_manager *pMgmt = &pDevice->vnt_mgmt; struct ieee80211_hdr *p802_11Header; CMD_STATUS Status; if (IS_CTL_PSPOLL(pbyFrame) || !IS_TYPE_CONTROL(pbyFrame)) { p802_11Header = (struct ieee80211_hdr *) (pbyFrame); if (!IS_TYPE_MGMT(pbyFrame)) { // Data & PS-Poll packet // check frame class if (iSANodeIndex > 0) { // frame class 3 fliter & checking if (pMgmt->sNodeDBTable[iSANodeIndex].eNodeState < NODE_AUTH) { // send deauth notification // reason = (6) class 2 received from nonauth sta vMgrDeAuthenBeginSta(pDevice, pMgmt, (u8 *)(p802_11Header->addr2), (WLAN_MGMT_REASON_CLASS2_NONAUTH), &Status ); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: send vMgrDeAuthenBeginSta 1\n"); return true; } if (pMgmt->sNodeDBTable[iSANodeIndex].eNodeState < NODE_ASSOC) { // send deassoc notification // reason = (7) class 3 received from nonassoc sta vMgrDisassocBeginSta(pDevice, pMgmt, (u8 *)(p802_11Header->addr2), (WLAN_MGMT_REASON_CLASS3_NONASSOC), &Status ); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: send vMgrDisassocBeginSta 2\n"); return true; } if (pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable) { // delcare received ps-poll event if (IS_CTL_PSPOLL(pbyFrame)) { pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true; bScheduleCommand((void *) pDevice, WLAN_CMD_RX_PSPOLL, NULL); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: WLAN_CMD_RX_PSPOLL 1\n"); } else { // check Data PS state // if PW bit off, send out all PS bufferring packets. if (!IS_FC_POWERMGT(pbyFrame)) { pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = false; pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true; bScheduleCommand((void *) pDevice, WLAN_CMD_RX_PSPOLL, NULL); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: WLAN_CMD_RX_PSPOLL 2\n"); } } } else { if (IS_FC_POWERMGT(pbyFrame)) { pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = true; // Once if STA in PS state, enable multicast bufferring pMgmt->sNodeDBTable[0].bPSEnable = true; } else { // clear all pending PS frame. if (pMgmt->sNodeDBTable[iSANodeIndex].wEnQueueCnt > 0) { pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = false; pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true; bScheduleCommand((void *) pDevice, WLAN_CMD_RX_PSPOLL, NULL); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: WLAN_CMD_RX_PSPOLL 3\n"); } } } } else { vMgrDeAuthenBeginSta(pDevice, pMgmt, (u8 *)(p802_11Header->addr2), (WLAN_MGMT_REASON_CLASS2_NONAUTH), &Status ); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: send vMgrDeAuthenBeginSta 3\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "BSSID:%pM\n", p802_11Header->addr3); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ADDR2:%pM\n", p802_11Header->addr2); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ADDR1:%pM\n", p802_11Header->addr1); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: frame_control= %x\n", p802_11Header->frame_control); return true; } } } return false; } static int s_bHandleRxEncryption(struct vnt_private *pDevice, u8 *pbyFrame, u32 FrameSize, u8 *pbyRsr, u8 *pbyNewRsr, PSKeyItem *pKeyOut, s32 *pbExtIV, u16 *pwRxTSC15_0, u32 *pdwRxTSC47_16) { struct vnt_manager *pMgmt = &pDevice->vnt_mgmt; u32 PayloadLen = FrameSize; u8 *pbyIV; u8 byKeyIdx; PSKeyItem pKey = NULL; u8 byDecMode = KEY_CTL_WEP; *pwRxTSC15_0 = 0; *pdwRxTSC47_16 = 0; pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN; if ( WLAN_GET_FC_TODS(*(u16 *)pbyFrame) && WLAN_GET_FC_FROMDS(*(u16 *)pbyFrame) ) { pbyIV += 6; // 6 is 802.11 address4 PayloadLen -= 6; } byKeyIdx = (*(pbyIV+3) & 0xc0); byKeyIdx >>= 6; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\nKeyIdx: %d\n", byKeyIdx); if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA) || (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) || (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) || (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) { if (((*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI)) == 0) && (pMgmt->byCSSPK != KEY_CTL_NONE)) { // unicast pkt use pairwise key DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"unicast pkt\n"); if (KeybGetKey(&(pDevice->sKey), pDevice->abyBSSID, 0xFFFFFFFF, &pKey) == true) { if (pMgmt->byCSSPK == KEY_CTL_TKIP) byDecMode = KEY_CTL_TKIP; else if (pMgmt->byCSSPK == KEY_CTL_CCMP) byDecMode = KEY_CTL_CCMP; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"unicast pkt: %d, %p\n", byDecMode, pKey); } else { // use group key KeybGetKey(&(pDevice->sKey), pDevice->abyBSSID, byKeyIdx, &pKey); if (pMgmt->byCSSGK == KEY_CTL_TKIP) byDecMode = KEY_CTL_TKIP; else if (pMgmt->byCSSGK == KEY_CTL_CCMP) byDecMode = KEY_CTL_CCMP; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"group pkt: %d, %d, %p\n", byKeyIdx, byDecMode, pKey); } } // our WEP only support Default Key if (pKey == NULL) { // use default group key KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, byKeyIdx, &pKey); if (pMgmt->byCSSGK == KEY_CTL_TKIP) byDecMode = KEY_CTL_TKIP; else if (pMgmt->byCSSGK == KEY_CTL_CCMP) byDecMode = KEY_CTL_CCMP; } *pKeyOut = pKey; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"AES:%d %d %d\n", pMgmt->byCSSPK, pMgmt->byCSSGK, byDecMode); if (pKey == NULL) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey == NULL\n"); if (byDecMode == KEY_CTL_WEP) { // pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++; } else if (pDevice->bLinkPass == true) { // pDevice->s802_11Counter.DecryptFailureCount.QuadPart++; } return false; } if (byDecMode != pKey->byCipherSuite) { if (byDecMode == KEY_CTL_WEP) { // pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++; } else if (pDevice->bLinkPass == true) { // pDevice->s802_11Counter.DecryptFailureCount.QuadPart++; } *pKeyOut = NULL; return false; } if (byDecMode == KEY_CTL_WEP) { // handle WEP if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || (((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == true)) { // Software WEP // 1. 3253A // 2. WEP 256 PayloadLen -= (WLAN_HDR_ADDR3_LEN + 4 + 4); // 24 is 802.11 header,4 is IV, 4 is crc memcpy(pDevice->abyPRNG, pbyIV, 3); memcpy(pDevice->abyPRNG + 3, pKey->abyKey, pKey->uKeyLength); rc4_init(&pDevice->SBox, pDevice->abyPRNG, pKey->uKeyLength + 3); rc4_encrypt(&pDevice->SBox, pbyIV+4, pbyIV+4, PayloadLen); if (ETHbIsBufferCrc32Ok(pbyIV+4, PayloadLen)) { *pbyNewRsr |= NEWRSR_DECRYPTOK; } } } else if ((byDecMode == KEY_CTL_TKIP) || (byDecMode == KEY_CTL_CCMP)) { // TKIP/AES PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc *pdwRxTSC47_16 = cpu_to_le32(*(u32 *)(pbyIV + 4)); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16); if (byDecMode == KEY_CTL_TKIP) { *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV)); } else { *pwRxTSC15_0 = cpu_to_le16(*(u16 *)pbyIV); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"TSC0_15: %x\n", *pwRxTSC15_0); if ((byDecMode == KEY_CTL_TKIP) && (pDevice->byLocalID <= REV_ID_VT3253_A1)) { // Software TKIP // 1. 3253 A struct ieee80211_hdr *pMACHeader = (struct ieee80211_hdr *) (pbyFrame); TKIPvMixKey(pKey->abyKey, pMACHeader->addr2, *pwRxTSC15_0, *pdwRxTSC47_16, pDevice->abyPRNG); rc4_init(&pDevice->SBox, pDevice->abyPRNG, TKIP_KEY_LEN); rc4_encrypt(&pDevice->SBox, pbyIV+8, pbyIV+8, PayloadLen); if (ETHbIsBufferCrc32Ok(pbyIV+8, PayloadLen)) { *pbyNewRsr |= NEWRSR_DECRYPTOK; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ICV OK!\n"); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ICV FAIL!!!\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"PayloadLen = %d\n", PayloadLen); } } }// end of TKIP/AES if ((*(pbyIV+3) & 0x20) != 0) *pbExtIV = true; return true; } static int s_bHostWepRxEncryption(struct vnt_private *pDevice, u8 *pbyFrame, u32 FrameSize, u8 *pbyRsr, int bOnFly, PSKeyItem pKey, u8 *pbyNewRsr, s32 *pbExtIV, u16 *pwRxTSC15_0, u32 *pdwRxTSC47_16) { struct vnt_manager *pMgmt = &pDevice->vnt_mgmt; struct ieee80211_hdr *pMACHeader; u32 PayloadLen = FrameSize; u8 *pbyIV; u8 byKeyIdx; u8 byDecMode = KEY_CTL_WEP; *pwRxTSC15_0 = 0; *pdwRxTSC47_16 = 0; pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN; if ( WLAN_GET_FC_TODS(*(u16 *)pbyFrame) && WLAN_GET_FC_FROMDS(*(u16 *)pbyFrame) ) { pbyIV += 6; // 6 is 802.11 address4 PayloadLen -= 6; } byKeyIdx = (*(pbyIV+3) & 0xc0); byKeyIdx >>= 6; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\nKeyIdx: %d\n", byKeyIdx); if (pMgmt->byCSSGK == KEY_CTL_TKIP) byDecMode = KEY_CTL_TKIP; else if (pMgmt->byCSSGK == KEY_CTL_CCMP) byDecMode = KEY_CTL_CCMP; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"AES:%d %d %d\n", pMgmt->byCSSPK, pMgmt->byCSSGK, byDecMode); if (byDecMode != pKey->byCipherSuite) { if (byDecMode == KEY_CTL_WEP) { // pDevice->s802_11Counter.WEPUndecryptableCount.QuadPart++; } else if (pDevice->bLinkPass == true) { // pDevice->s802_11Counter.DecryptFailureCount.QuadPart++; } return false; } if (byDecMode == KEY_CTL_WEP) { // handle WEP DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"byDecMode == KEY_CTL_WEP\n"); if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || (((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == true) || (bOnFly == false)) { // Software WEP // 1. 3253A // 2. WEP 256 // 3. NotOnFly PayloadLen -= (WLAN_HDR_ADDR3_LEN + 4 + 4); // 24 is 802.11 header,4 is IV, 4 is crc memcpy(pDevice->abyPRNG, pbyIV, 3); memcpy(pDevice->abyPRNG + 3, pKey->abyKey, pKey->uKeyLength); rc4_init(&pDevice->SBox, pDevice->abyPRNG, pKey->uKeyLength + 3); rc4_encrypt(&pDevice->SBox, pbyIV+4, pbyIV+4, PayloadLen); if (ETHbIsBufferCrc32Ok(pbyIV+4, PayloadLen)) { *pbyNewRsr |= NEWRSR_DECRYPTOK; } } } else if ((byDecMode == KEY_CTL_TKIP) || (byDecMode == KEY_CTL_CCMP)) { // TKIP/AES PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc *pdwRxTSC47_16 = cpu_to_le32(*(u32 *)(pbyIV + 4)); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16); if (byDecMode == KEY_CTL_TKIP) { *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV)); } else { *pwRxTSC15_0 = cpu_to_le16(*(u16 *)pbyIV); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"TSC0_15: %x\n", *pwRxTSC15_0); if (byDecMode == KEY_CTL_TKIP) { if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || (bOnFly == false)) { // Software TKIP // 1. 3253 A // 2. NotOnFly DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"soft KEY_CTL_TKIP \n"); pMACHeader = (struct ieee80211_hdr *) (pbyFrame); TKIPvMixKey(pKey->abyKey, pMACHeader->addr2, *pwRxTSC15_0, *pdwRxTSC47_16, pDevice->abyPRNG); rc4_init(&pDevice->SBox, pDevice->abyPRNG, TKIP_KEY_LEN); rc4_encrypt(&pDevice->SBox, pbyIV+8, pbyIV+8, PayloadLen); if (ETHbIsBufferCrc32Ok(pbyIV+8, PayloadLen)) { *pbyNewRsr |= NEWRSR_DECRYPTOK; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ICV OK!\n"); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ICV FAIL!!!\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"PayloadLen = %d\n", PayloadLen); } } } if (byDecMode == KEY_CTL_CCMP) { if (bOnFly == false) { // Software CCMP // NotOnFly DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"soft KEY_CTL_CCMP\n"); if (AESbGenCCMP(pKey->abyKey, pbyFrame, FrameSize)) { *pbyNewRsr |= NEWRSR_DECRYPTOK; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"CCMP MIC compare OK!\n"); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"CCMP MIC fail!\n"); } } } }// end of TKIP/AES if ((*(pbyIV+3) & 0x20) != 0) *pbExtIV = true; return true; } static int s_bAPModeRxData(struct vnt_private *pDevice, struct sk_buff *skb, u32 FrameSize, u32 cbHeaderOffset, s32 iSANodeIndex, s32 iDANodeIndex) { struct sk_buff *skbcpy; struct vnt_manager *pMgmt = &pDevice->vnt_mgmt; int bRelayAndForward = false; int bRelayOnly = false; u8 byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80}; u16 wAID; if (FrameSize > CB_MAX_BUF_SIZE) return false; // check DA if (is_multicast_ether_addr((u8 *)(skb->data+cbHeaderOffset))) { if (pMgmt->sNodeDBTable[0].bPSEnable) { skbcpy = dev_alloc_skb((int)pDevice->rx_buf_sz); // if any node in PS mode, buffer packet until DTIM. if (skbcpy == NULL) { DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "relay multicast no skb available \n"); } else { skbcpy->dev = pDevice->dev; skbcpy->len = FrameSize; memcpy(skbcpy->data, skb->data+cbHeaderOffset, FrameSize); skb_queue_tail(&(pMgmt->sNodeDBTable[0].sTxPSQueue), skbcpy); pMgmt->sNodeDBTable[0].wEnQueueCnt++; // set tx map pMgmt->abyPSTxMap[0] |= byMask[0]; } } else { bRelayAndForward = true; } } else { // check if relay if (BSSbIsSTAInNodeDB(pDevice, (u8 *)(skb->data+cbHeaderOffset), &iDANodeIndex)) { if (pMgmt->sNodeDBTable[iDANodeIndex].eNodeState >= NODE_ASSOC) { if (pMgmt->sNodeDBTable[iDANodeIndex].bPSEnable) { // queue this skb until next PS tx, and then release. skb->data += cbHeaderOffset; skb->tail += cbHeaderOffset; skb_put(skb, FrameSize); skb_queue_tail(&pMgmt->sNodeDBTable[iDANodeIndex].sTxPSQueue, skb); pMgmt->sNodeDBTable[iDANodeIndex].wEnQueueCnt++; wAID = pMgmt->sNodeDBTable[iDANodeIndex].wAID; pMgmt->abyPSTxMap[wAID >> 3] |= byMask[wAID & 7]; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "relay: index= %d, pMgmt->abyPSTxMap[%d]= %d\n", iDANodeIndex, (wAID >> 3), pMgmt->abyPSTxMap[wAID >> 3]); return true; } else { bRelayOnly = true; } } } } if (bRelayOnly || bRelayAndForward) { // relay this packet right now if (bRelayAndForward) iDANodeIndex = 0; if ((pDevice->uAssocCount > 1) && (iDANodeIndex >= 0)) { bRelayPacketSend(pDevice, (u8 *) (skb->data + cbHeaderOffset), FrameSize, (unsigned int) iDANodeIndex); } if (bRelayOnly) return false; } // none associate, don't forward if (pDevice->uAssocCount == 0) return false; return true; } void RXvWorkItem(struct vnt_private *pDevice) { int ntStatus; PRCB pRCB = NULL; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Rx Polling Thread\n"); spin_lock_irq(&pDevice->lock); while ((pDevice->Flags & fMP_POST_READS) && MP_IS_READY(pDevice) && (pDevice->NumRecvFreeList != 0) ) { pRCB = pDevice->FirstRecvFreeList; pDevice->NumRecvFreeList--; ASSERT(pRCB);// cannot be NULL DequeueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList); ntStatus = PIPEnsBulkInUsbRead(pDevice, pRCB); } pDevice->bIsRxWorkItemQueued = false; spin_unlock_irq(&pDevice->lock); } void RXvFreeRCB(PRCB pRCB, int bReAllocSkb) { struct vnt_private *pDevice = pRCB->pDevice; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->RXvFreeRCB\n"); ASSERT(!pRCB->Ref); // should be 0 ASSERT(pRCB->pDevice); // shouldn't be NULL if (bReAllocSkb == false) { kfree_skb(pRCB->skb); bReAllocSkb = true; } if (bReAllocSkb == true) { pRCB->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); // todo error handling if (pRCB->skb == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR" Failed to re-alloc rx skb\n"); }else { pRCB->skb->dev = pDevice->dev; } } // // Insert the RCB back in the Recv free list // EnqueueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList, pRCB); pDevice->NumRecvFreeList++; if ((pDevice->Flags & fMP_POST_READS) && MP_IS_READY(pDevice) && (pDevice->bIsRxWorkItemQueued == false) ) { pDevice->bIsRxWorkItemQueued = true; tasklet_schedule(&pDevice->ReadWorkItem); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----RXFreeRCB %d %d\n",pDevice->NumRecvFreeList, pDevice->NumRecvMngList); } void RXvMngWorkItem(struct vnt_private *pDevice) { PRCB pRCB = NULL; struct vnt_rx_mgmt *pRxPacket; int bReAllocSkb = false; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Rx Mng Thread\n"); spin_lock_irq(&pDevice->lock); while (pDevice->NumRecvMngList!=0) { pRCB = pDevice->FirstRecvMngList; pDevice->NumRecvMngList--; DequeueRCB(pDevice->FirstRecvMngList, pDevice->LastRecvMngList); if(!pRCB){ break; } ASSERT(pRCB);// cannot be NULL pRxPacket = &(pRCB->sMngPacket); vMgrRxManagePacket(pDevice, &pDevice->vnt_mgmt, pRxPacket); pRCB->Ref--; if(pRCB->Ref == 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RxvFreeMng %d %d\n",pDevice->NumRecvFreeList, pDevice->NumRecvMngList); RXvFreeRCB(pRCB, bReAllocSkb); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rx Mng Only we have the right to free RCB\n"); } } pDevice->bIsRxMngWorkItemQueued = false; spin_unlock_irq(&pDevice->lock); }
gpl-2.0
muhviehstah/N915FY-MM-Kernel
net/ipv4/netfilter/nf_nat_proto_gre.c
2668
4041
/* * nf_nat_proto_gre.c * * NAT protocol helper module for GRE. * * GRE is a generic encapsulation protocol, which is generally not very * suited for NAT, as it has no protocol-specific part as port numbers. * * It has an optional key field, which may help us distinguishing two * connections between the same two hosts. * * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784 * * PPTP is built on top of a modified version of GRE, and has a mandatory * field called "CallID", which serves us for the same purpose as the key * field in plain GRE. * * Documentation about PPTP can be found in RFC 2637 * * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org> * * Development of this code funded by Astaro AG (http://www.astaro.com/) * * (C) 2006-2012 Patrick McHardy <kaber@trash.net> * */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_l4proto.h> #include <linux/netfilter/nf_conntrack_proto_gre.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE"); /* generate unique tuple ... */ static void gre_unique_tuple(const struct nf_nat_l3proto *l3proto, struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { static u_int16_t key; __be16 *keyptr; unsigned int min, i, range_size; /* If there is no master conntrack we are not PPTP, do not change tuples */ if (!ct->master) return; if (maniptype == NF_NAT_MANIP_SRC) keyptr = &tuple->src.u.gre.key; else keyptr = &tuple->dst.u.gre.key; if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) { pr_debug("%p: NATing GRE PPTP\n", ct); min = 1; range_size = 0xffff; } else { min = ntohs(range->min_proto.gre.key); range_size = ntohs(range->max_proto.gre.key) - min + 1; } pr_debug("min = %u, range_size = %u\n", min, range_size); for (i = 0; ; ++key) { *keyptr = htons(min + key % range_size); if (++i == range_size || !nf_nat_used_tuple(tuple, ct)) return; } pr_debug("%p: no NAT mapping\n", ct); return; } /* manipulate a GRE packet according to maniptype */ static bool gre_manip_pkt(struct sk_buff *skb, const struct nf_nat_l3proto *l3proto, unsigned int iphdroff, unsigned int hdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { const struct gre_hdr *greh; struct gre_hdr_pptp *pgreh; /* pgreh includes two optional 32bit fields which are not required * to be there. That's where the magic '8' comes from */ if (!skb_make_writable(skb, hdroff + sizeof(*pgreh) - 8)) return false; greh = (void *)skb->data + hdroff; pgreh = (struct gre_hdr_pptp *)greh; /* we only have destination manip of a packet, since 'source key' * is not present in the packet itself */ if (maniptype != NF_NAT_MANIP_DST) return true; switch (greh->version) { case GRE_VERSION_1701: /* We do not currently NAT any GREv0 packets. * Try to behave like "nf_nat_proto_unknown" */ break; case GRE_VERSION_PPTP: pr_debug("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key)); pgreh->call_id = tuple->dst.u.gre.key; break; default: pr_debug("can't nat unknown GRE version\n"); return false; } return true; } static const struct nf_nat_l4proto gre = { .l4proto = IPPROTO_GRE, .manip_pkt = gre_manip_pkt, .in_range = nf_nat_l4proto_in_range, .unique_tuple = gre_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .nlattr_to_range = nf_nat_l4proto_nlattr_to_range, #endif }; static int __init nf_nat_proto_gre_init(void) { return nf_nat_l4proto_register(NFPROTO_IPV4, &gre); } static void __exit nf_nat_proto_gre_fini(void) { nf_nat_l4proto_unregister(NFPROTO_IPV4, &gre); } module_init(nf_nat_proto_gre_init); module_exit(nf_nat_proto_gre_fini); void nf_nat_need_gre(void) { return; } EXPORT_SYMBOL_GPL(nf_nat_need_gre);
gpl-2.0
pacificIT/udoo_kernel_imx
drivers/video/bfin-lq035q1-fb.c
2924
21469
/* * Blackfin LCD Framebuffer driver SHARP LQ035Q1DH02 * * Copyright 2008-2009 Analog Devices Inc. * Licensed under the GPL-2 or later. */ #define DRIVER_NAME "bfin-lq035q1" #define pr_fmt(fmt) DRIVER_NAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/fb.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/backlight.h> #include <linux/lcd.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <asm/blackfin.h> #include <asm/irq.h> #include <asm/dma.h> #include <asm/portmux.h> #include <asm/gptimers.h> #include <asm/bfin-lq035q1.h> #if defined(BF533_FAMILY) || defined(BF538_FAMILY) #define TIMER_HSYNC_id TIMER1_id #define TIMER_HSYNCbit TIMER1bit #define TIMER_HSYNC_STATUS_TRUN TIMER_STATUS_TRUN1 #define TIMER_HSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL1 #define TIMER_HSYNC_STATUS_TOVF TIMER_STATUS_TOVF1 #define TIMER_VSYNC_id TIMER2_id #define TIMER_VSYNCbit TIMER2bit #define TIMER_VSYNC_STATUS_TRUN TIMER_STATUS_TRUN2 #define TIMER_VSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL2 #define TIMER_VSYNC_STATUS_TOVF TIMER_STATUS_TOVF2 #else #define TIMER_HSYNC_id TIMER0_id #define TIMER_HSYNCbit TIMER0bit #define TIMER_HSYNC_STATUS_TRUN TIMER_STATUS_TRUN0 #define TIMER_HSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL0 #define TIMER_HSYNC_STATUS_TOVF TIMER_STATUS_TOVF0 #define TIMER_VSYNC_id TIMER1_id #define TIMER_VSYNCbit TIMER1bit #define TIMER_VSYNC_STATUS_TRUN TIMER_STATUS_TRUN1 #define TIMER_VSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL1 #define TIMER_VSYNC_STATUS_TOVF TIMER_STATUS_TOVF1 #endif #define LCD_X_RES 320 /* Horizontal Resolution */ #define LCD_Y_RES 240 /* Vertical Resolution */ #define DMA_BUS_SIZE 16 #define U_LINE 4 /* Blanking Lines */ /* Interface 16/18-bit TFT over an 8-bit wide PPI using a small Programmable Logic Device (CPLD) * http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165 */ #define BFIN_LCD_NBR_PALETTE_ENTRIES 256 #define PPI_TX_MODE 0x2 #define PPI_XFER_TYPE_11 0xC #define PPI_PORT_CFG_01 0x10 #define PPI_POLS_1 0x8000 #define LQ035_INDEX 0x74 #define LQ035_DATA 0x76 #define LQ035_DRIVER_OUTPUT_CTL 0x1 #define LQ035_SHUT_CTL 0x11 #define LQ035_DRIVER_OUTPUT_MASK (LQ035_LR | LQ035_TB | LQ035_BGR | LQ035_REV) #define LQ035_DRIVER_OUTPUT_DEFAULT (0x2AEF & ~LQ035_DRIVER_OUTPUT_MASK) #define LQ035_SHUT (1 << 0) /* Shutdown */ #define LQ035_ON (0 << 0) /* Shutdown */ struct bfin_lq035q1fb_info { struct fb_info *fb; struct device *dev; struct spi_driver spidrv; struct bfin_lq035q1fb_disp_info *disp_info; unsigned char *fb_buffer; /* RGB Buffer */ dma_addr_t dma_handle; int lq035_open_cnt; int irq; spinlock_t lock; /* lock */ u32 pseudo_pal[16]; u32 lcd_bpp; u32 h_actpix; u32 h_period; u32 h_pulse; u32 h_start; u32 v_lines; u32 v_pulse; u32 v_period; }; static int nocursor; module_param(nocursor, int, 0644); MODULE_PARM_DESC(nocursor, "cursor enable/disable"); struct spi_control { unsigned short mode; }; static int lq035q1_control(struct spi_device *spi, unsigned char reg, unsigned short value) { int ret; u8 regs[3] = { LQ035_INDEX, 0, 0 }; u8 dat[3] = { LQ035_DATA, 0, 0 }; if (!spi) return -ENODEV; regs[2] = reg; dat[1] = value >> 8; dat[2] = value & 0xFF; ret = spi_write(spi, regs, ARRAY_SIZE(regs)); ret |= spi_write(spi, dat, ARRAY_SIZE(dat)); return ret; } static int __devinit lq035q1_spidev_probe(struct spi_device *spi) { int ret; struct spi_control *ctl; struct bfin_lq035q1fb_info *info = container_of(spi->dev.driver, struct bfin_lq035q1fb_info, spidrv.driver); ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); if (!ctl) return -ENOMEM; ctl->mode = (info->disp_info->mode & LQ035_DRIVER_OUTPUT_MASK) | LQ035_DRIVER_OUTPUT_DEFAULT; ret = lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_ON); ret |= lq035q1_control(spi, LQ035_DRIVER_OUTPUT_CTL, ctl->mode); if (ret) { kfree(ctl); return ret; } spi_set_drvdata(spi, ctl); return 0; } static int lq035q1_spidev_remove(struct spi_device *spi) { return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT); } #ifdef CONFIG_PM static int lq035q1_spidev_suspend(struct spi_device *spi, pm_message_t state) { return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT); } static int lq035q1_spidev_resume(struct spi_device *spi) { int ret; struct spi_control *ctl = spi_get_drvdata(spi); ret = lq035q1_control(spi, LQ035_DRIVER_OUTPUT_CTL, ctl->mode); if (ret) return ret; return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_ON); } #else # define lq035q1_spidev_suspend NULL # define lq035q1_spidev_resume NULL #endif /* Power down all displays on reboot, poweroff or halt */ static void lq035q1_spidev_shutdown(struct spi_device *spi) { lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT); } static int lq035q1_backlight(struct bfin_lq035q1fb_info *info, unsigned arg) { if (info->disp_info->use_bl) gpio_set_value(info->disp_info->gpio_bl, arg); return 0; } static int bfin_lq035q1_calc_timing(struct bfin_lq035q1fb_info *fbi) { unsigned long clocks_per_pix, cpld_pipeline_delay_cor; /* * Interface 16/18-bit TFT over an 8-bit wide PPI using a small * Programmable Logic Device (CPLD) * http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165 */ switch (fbi->disp_info->ppi_mode) { case USE_RGB565_16_BIT_PPI: fbi->lcd_bpp = 16; clocks_per_pix = 1; cpld_pipeline_delay_cor = 0; break; case USE_RGB565_8_BIT_PPI: fbi->lcd_bpp = 16; clocks_per_pix = 2; cpld_pipeline_delay_cor = 3; break; case USE_RGB888_8_BIT_PPI: fbi->lcd_bpp = 24; clocks_per_pix = 3; cpld_pipeline_delay_cor = 5; break; default: return -EINVAL; } /* * HS and VS timing parameters (all in number of PPI clk ticks) */ fbi->h_actpix = (LCD_X_RES * clocks_per_pix); /* active horizontal pixel */ fbi->h_period = (336 * clocks_per_pix); /* HS period */ fbi->h_pulse = (2 * clocks_per_pix); /* HS pulse width */ fbi->h_start = (7 * clocks_per_pix + cpld_pipeline_delay_cor); /* first valid pixel */ fbi->v_lines = (LCD_Y_RES + U_LINE); /* total vertical lines */ fbi->v_pulse = (2 * clocks_per_pix); /* VS pulse width (1-5 H_PERIODs) */ fbi->v_period = (fbi->h_period * fbi->v_lines); /* VS period */ return 0; } static void bfin_lq035q1_config_ppi(struct bfin_lq035q1fb_info *fbi) { unsigned ppi_pmode; if (fbi->disp_info->ppi_mode == USE_RGB565_16_BIT_PPI) ppi_pmode = DLEN_16; else ppi_pmode = (DLEN_8 | PACK_EN); bfin_write_PPI_DELAY(fbi->h_start); bfin_write_PPI_COUNT(fbi->h_actpix - 1); bfin_write_PPI_FRAME(fbi->v_lines); bfin_write_PPI_CONTROL(PPI_TX_MODE | /* output mode , PORT_DIR */ PPI_XFER_TYPE_11 | /* sync mode XFR_TYPE */ PPI_PORT_CFG_01 | /* two frame sync PORT_CFG */ ppi_pmode | /* 8/16 bit data length / PACK_EN? */ PPI_POLS_1); /* faling edge syncs POLS */ } static inline void bfin_lq035q1_disable_ppi(void) { bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() & ~PORT_EN); } static inline void bfin_lq035q1_enable_ppi(void) { bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() | PORT_EN); } static void bfin_lq035q1_start_timers(void) { enable_gptimers(TIMER_VSYNCbit | TIMER_HSYNCbit); } static void bfin_lq035q1_stop_timers(void) { disable_gptimers(TIMER_HSYNCbit | TIMER_VSYNCbit); set_gptimer_status(0, TIMER_HSYNC_STATUS_TRUN | TIMER_VSYNC_STATUS_TRUN | TIMER_HSYNC_STATUS_TIMIL | TIMER_VSYNC_STATUS_TIMIL | TIMER_HSYNC_STATUS_TOVF | TIMER_VSYNC_STATUS_TOVF); } static void bfin_lq035q1_init_timers(struct bfin_lq035q1fb_info *fbi) { bfin_lq035q1_stop_timers(); set_gptimer_period(TIMER_HSYNC_id, fbi->h_period); set_gptimer_pwidth(TIMER_HSYNC_id, fbi->h_pulse); set_gptimer_config(TIMER_HSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT | TIMER_TIN_SEL | TIMER_CLK_SEL| TIMER_EMU_RUN); set_gptimer_period(TIMER_VSYNC_id, fbi->v_period); set_gptimer_pwidth(TIMER_VSYNC_id, fbi->v_pulse); set_gptimer_config(TIMER_VSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT | TIMER_TIN_SEL | TIMER_CLK_SEL | TIMER_EMU_RUN); } static void bfin_lq035q1_config_dma(struct bfin_lq035q1fb_info *fbi) { set_dma_config(CH_PPI, set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO, INTR_DISABLE, DIMENSION_2D, DATA_SIZE_16, DMA_NOSYNC_KEEP_DMA_BUF)); set_dma_x_count(CH_PPI, (LCD_X_RES * fbi->lcd_bpp) / DMA_BUS_SIZE); set_dma_x_modify(CH_PPI, DMA_BUS_SIZE / 8); set_dma_y_count(CH_PPI, fbi->v_lines); set_dma_y_modify(CH_PPI, DMA_BUS_SIZE / 8); set_dma_start_addr(CH_PPI, (unsigned long)fbi->fb_buffer); } static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3, P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7, P_PPI0_D8, P_PPI0_D9, P_PPI0_D10, P_PPI0_D11, P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, P_PPI0_D15, 0}; static const u16 ppi0_req_8[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3, P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7, 0}; static inline void bfin_lq035q1_free_ports(unsigned ppi16) { if (ppi16) peripheral_free_list(ppi0_req_16); else peripheral_free_list(ppi0_req_8); if (ANOMALY_05000400) gpio_free(P_IDENT(P_PPI0_FS3)); } static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev, unsigned ppi16) { int ret; /* ANOMALY_05000400 - PPI Does Not Start Properly In Specific Mode: * Drive PPI_FS3 Low */ if (ANOMALY_05000400) { int ret = gpio_request(P_IDENT(P_PPI0_FS3), "PPI_FS3"); if (ret) return ret; gpio_direction_output(P_IDENT(P_PPI0_FS3), 0); } if (ppi16) ret = peripheral_request_list(ppi0_req_16, DRIVER_NAME); else ret = peripheral_request_list(ppi0_req_8, DRIVER_NAME); if (ret) { dev_err(&pdev->dev, "requesting peripherals failed\n"); return -EFAULT; } return 0; } static int bfin_lq035q1_fb_open(struct fb_info *info, int user) { struct bfin_lq035q1fb_info *fbi = info->par; spin_lock(&fbi->lock); fbi->lq035_open_cnt++; if (fbi->lq035_open_cnt <= 1) { bfin_lq035q1_disable_ppi(); SSYNC(); bfin_lq035q1_config_dma(fbi); bfin_lq035q1_config_ppi(fbi); bfin_lq035q1_init_timers(fbi); /* start dma */ enable_dma(CH_PPI); bfin_lq035q1_enable_ppi(); bfin_lq035q1_start_timers(); lq035q1_backlight(fbi, 1); } spin_unlock(&fbi->lock); return 0; } static int bfin_lq035q1_fb_release(struct fb_info *info, int user) { struct bfin_lq035q1fb_info *fbi = info->par; spin_lock(&fbi->lock); fbi->lq035_open_cnt--; if (fbi->lq035_open_cnt <= 0) { lq035q1_backlight(fbi, 0); bfin_lq035q1_disable_ppi(); SSYNC(); disable_dma(CH_PPI); bfin_lq035q1_stop_timers(); } spin_unlock(&fbi->lock); return 0; } static int bfin_lq035q1_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct bfin_lq035q1fb_info *fbi = info->par; if (var->bits_per_pixel == fbi->lcd_bpp) { var->red.offset = info->var.red.offset; var->green.offset = info->var.green.offset; var->blue.offset = info->var.blue.offset; var->red.length = info->var.red.length; var->green.length = info->var.green.length; var->blue.length = info->var.blue.length; var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; } else { pr_debug("%s: depth not supported: %u BPP\n", __func__, var->bits_per_pixel); return -EINVAL; } if (info->var.xres != var->xres || info->var.yres != var->yres || info->var.xres_virtual != var->xres_virtual || info->var.yres_virtual != var->yres_virtual) { pr_debug("%s: Resolution not supported: X%u x Y%u \n", __func__, var->xres, var->yres); return -EINVAL; } /* * Memory limit */ if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) { pr_debug("%s: Memory Limit requested yres_virtual = %u\n", __func__, var->yres_virtual); return -ENOMEM; } return 0; } int bfin_lq035q1_fb_cursor(struct fb_info *info, struct fb_cursor *cursor) { if (nocursor) return 0; else return -EINVAL; /* just to force soft_cursor() call */ } static int bfin_lq035q1_fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { if (regno >= BFIN_LCD_NBR_PALETTE_ENTRIES) return -EINVAL; if (info->var.grayscale) { /* grayscale = 0.30*R + 0.59*G + 0.11*B */ red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; } if (info->fix.visual == FB_VISUAL_TRUECOLOR) { u32 value; /* Place color in the pseudopalette */ if (regno > 16) return -EINVAL; red >>= (16 - info->var.red.length); green >>= (16 - info->var.green.length); blue >>= (16 - info->var.blue.length); value = (red << info->var.red.offset) | (green << info->var.green.offset) | (blue << info->var.blue.offset); value &= 0xFFFFFF; ((u32 *) (info->pseudo_palette))[regno] = value; } return 0; } static struct fb_ops bfin_lq035q1_fb_ops = { .owner = THIS_MODULE, .fb_open = bfin_lq035q1_fb_open, .fb_release = bfin_lq035q1_fb_release, .fb_check_var = bfin_lq035q1_fb_check_var, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_cursor = bfin_lq035q1_fb_cursor, .fb_setcolreg = bfin_lq035q1_fb_setcolreg, }; static irqreturn_t bfin_lq035q1_irq_error(int irq, void *dev_id) { /*struct bfin_lq035q1fb_info *info = (struct bfin_lq035q1fb_info *)dev_id;*/ u16 status = bfin_read_PPI_STATUS(); bfin_write_PPI_STATUS(-1); if (status) { bfin_lq035q1_disable_ppi(); disable_dma(CH_PPI); /* start dma */ enable_dma(CH_PPI); bfin_lq035q1_enable_ppi(); bfin_write_PPI_STATUS(-1); } return IRQ_HANDLED; } static int __devinit bfin_lq035q1_probe(struct platform_device *pdev) { struct bfin_lq035q1fb_info *info; struct fb_info *fbinfo; u32 active_video_mem_offset; int ret; ret = request_dma(CH_PPI, DRIVER_NAME"_CH_PPI"); if (ret < 0) { dev_err(&pdev->dev, "PPI DMA unavailable\n"); goto out1; } fbinfo = framebuffer_alloc(sizeof(*info), &pdev->dev); if (!fbinfo) { ret = -ENOMEM; goto out2; } info = fbinfo->par; info->fb = fbinfo; info->dev = &pdev->dev; info->disp_info = pdev->dev.platform_data; platform_set_drvdata(pdev, fbinfo); ret = bfin_lq035q1_calc_timing(info); if (ret < 0) { dev_err(&pdev->dev, "Failed PPI Mode\n"); goto out3; } strcpy(fbinfo->fix.id, DRIVER_NAME); fbinfo->fix.type = FB_TYPE_PACKED_PIXELS; fbinfo->fix.type_aux = 0; fbinfo->fix.xpanstep = 0; fbinfo->fix.ypanstep = 0; fbinfo->fix.ywrapstep = 0; fbinfo->fix.accel = FB_ACCEL_NONE; fbinfo->fix.visual = FB_VISUAL_TRUECOLOR; fbinfo->var.nonstd = 0; fbinfo->var.activate = FB_ACTIVATE_NOW; fbinfo->var.height = -1; fbinfo->var.width = -1; fbinfo->var.accel_flags = 0; fbinfo->var.vmode = FB_VMODE_NONINTERLACED; fbinfo->var.xres = LCD_X_RES; fbinfo->var.xres_virtual = LCD_X_RES; fbinfo->var.yres = LCD_Y_RES; fbinfo->var.yres_virtual = LCD_Y_RES; fbinfo->var.bits_per_pixel = info->lcd_bpp; if (info->disp_info->mode & LQ035_BGR) { if (info->lcd_bpp == 24) { fbinfo->var.red.offset = 0; fbinfo->var.green.offset = 8; fbinfo->var.blue.offset = 16; } else { fbinfo->var.red.offset = 0; fbinfo->var.green.offset = 5; fbinfo->var.blue.offset = 11; } } else { if (info->lcd_bpp == 24) { fbinfo->var.red.offset = 16; fbinfo->var.green.offset = 8; fbinfo->var.blue.offset = 0; } else { fbinfo->var.red.offset = 11; fbinfo->var.green.offset = 5; fbinfo->var.blue.offset = 0; } } fbinfo->var.transp.offset = 0; if (info->lcd_bpp == 24) { fbinfo->var.red.length = 8; fbinfo->var.green.length = 8; fbinfo->var.blue.length = 8; } else { fbinfo->var.red.length = 5; fbinfo->var.green.length = 6; fbinfo->var.blue.length = 5; } fbinfo->var.transp.length = 0; active_video_mem_offset = ((U_LINE / 2) * LCD_X_RES * (info->lcd_bpp / 8)); fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * info->lcd_bpp / 8 + active_video_mem_offset; fbinfo->fix.line_length = fbinfo->var.xres_virtual * fbinfo->var.bits_per_pixel / 8; fbinfo->fbops = &bfin_lq035q1_fb_ops; fbinfo->flags = FBINFO_FLAG_DEFAULT; info->fb_buffer = dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle, GFP_KERNEL); if (NULL == info->fb_buffer) { dev_err(&pdev->dev, "couldn't allocate dma buffer\n"); ret = -ENOMEM; goto out3; } fbinfo->screen_base = (void *)info->fb_buffer + active_video_mem_offset; fbinfo->fix.smem_start = (int)info->fb_buffer + active_video_mem_offset; fbinfo->fbops = &bfin_lq035q1_fb_ops; fbinfo->pseudo_palette = &info->pseudo_pal; ret = fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0); if (ret < 0) { dev_err(&pdev->dev, "failed to allocate colormap (%d entries)\n", BFIN_LCD_NBR_PALETTE_ENTRIES); goto out4; } ret = bfin_lq035q1_request_ports(pdev, info->disp_info->ppi_mode == USE_RGB565_16_BIT_PPI); if (ret) { dev_err(&pdev->dev, "couldn't request gpio port\n"); goto out6; } info->irq = platform_get_irq(pdev, 0); if (info->irq < 0) { ret = -EINVAL; goto out7; } ret = request_irq(info->irq, bfin_lq035q1_irq_error, IRQF_DISABLED, DRIVER_NAME" PPI ERROR", info); if (ret < 0) { dev_err(&pdev->dev, "unable to request PPI ERROR IRQ\n"); goto out7; } info->spidrv.driver.name = DRIVER_NAME"-spi"; info->spidrv.probe = lq035q1_spidev_probe; info->spidrv.remove = __devexit_p(lq035q1_spidev_remove); info->spidrv.shutdown = lq035q1_spidev_shutdown; info->spidrv.suspend = lq035q1_spidev_suspend; info->spidrv.resume = lq035q1_spidev_resume; ret = spi_register_driver(&info->spidrv); if (ret < 0) { dev_err(&pdev->dev, "couldn't register SPI Interface\n"); goto out8; } if (info->disp_info->use_bl) { ret = gpio_request(info->disp_info->gpio_bl, "LQ035 Backlight"); if (ret) { dev_err(&pdev->dev, "failed to request GPIO %d\n", info->disp_info->gpio_bl); goto out9; } gpio_direction_output(info->disp_info->gpio_bl, 0); } ret = register_framebuffer(fbinfo); if (ret < 0) { dev_err(&pdev->dev, "unable to register framebuffer\n"); goto out10; } dev_info(&pdev->dev, "%dx%d %d-bit RGB FrameBuffer initialized\n", LCD_X_RES, LCD_Y_RES, info->lcd_bpp); return 0; out10: if (info->disp_info->use_bl) gpio_free(info->disp_info->gpio_bl); out9: spi_unregister_driver(&info->spidrv); out8: free_irq(info->irq, info); out7: bfin_lq035q1_free_ports(info->disp_info->ppi_mode == USE_RGB565_16_BIT_PPI); out6: fb_dealloc_cmap(&fbinfo->cmap); out4: dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer, info->dma_handle); out3: framebuffer_release(fbinfo); out2: free_dma(CH_PPI); out1: platform_set_drvdata(pdev, NULL); return ret; } static int __devexit bfin_lq035q1_remove(struct platform_device *pdev) { struct fb_info *fbinfo = platform_get_drvdata(pdev); struct bfin_lq035q1fb_info *info = fbinfo->par; if (info->disp_info->use_bl) gpio_free(info->disp_info->gpio_bl); spi_unregister_driver(&info->spidrv); unregister_framebuffer(fbinfo); free_dma(CH_PPI); free_irq(info->irq, info); if (info->fb_buffer != NULL) dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer, info->dma_handle); fb_dealloc_cmap(&fbinfo->cmap); bfin_lq035q1_free_ports(info->disp_info->ppi_mode == USE_RGB565_16_BIT_PPI); platform_set_drvdata(pdev, NULL); framebuffer_release(fbinfo); dev_info(&pdev->dev, "unregistered LCD driver\n"); return 0; } #ifdef CONFIG_PM static int bfin_lq035q1_suspend(struct device *dev) { struct fb_info *fbinfo = dev_get_drvdata(dev); struct bfin_lq035q1fb_info *info = fbinfo->par; if (info->lq035_open_cnt) { lq035q1_backlight(info, 0); bfin_lq035q1_disable_ppi(); SSYNC(); disable_dma(CH_PPI); bfin_lq035q1_stop_timers(); bfin_write_PPI_STATUS(-1); } return 0; } static int bfin_lq035q1_resume(struct device *dev) { struct fb_info *fbinfo = dev_get_drvdata(dev); struct bfin_lq035q1fb_info *info = fbinfo->par; if (info->lq035_open_cnt) { bfin_lq035q1_disable_ppi(); SSYNC(); bfin_lq035q1_config_dma(info); bfin_lq035q1_config_ppi(info); bfin_lq035q1_init_timers(info); /* start dma */ enable_dma(CH_PPI); bfin_lq035q1_enable_ppi(); bfin_lq035q1_start_timers(); lq035q1_backlight(info, 1); } return 0; } static struct dev_pm_ops bfin_lq035q1_dev_pm_ops = { .suspend = bfin_lq035q1_suspend, .resume = bfin_lq035q1_resume, }; #endif static struct platform_driver bfin_lq035q1_driver = { .probe = bfin_lq035q1_probe, .remove = __devexit_p(bfin_lq035q1_remove), .driver = { .name = DRIVER_NAME, #ifdef CONFIG_PM .pm = &bfin_lq035q1_dev_pm_ops, #endif }, }; static int __init bfin_lq035q1_driver_init(void) { return platform_driver_register(&bfin_lq035q1_driver); } module_init(bfin_lq035q1_driver_init); static void __exit bfin_lq035q1_driver_cleanup(void) { platform_driver_unregister(&bfin_lq035q1_driver); } module_exit(bfin_lq035q1_driver_cleanup); MODULE_DESCRIPTION("Blackfin TFT LCD Driver"); MODULE_LICENSE("GPL");
gpl-2.0
futranbg/ef65l-kernel-2.0
drivers/staging/rtl8192e/r819xE_cmdpkt.c
2924
13662
/****************************************************************************** (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved. Module: r819xusb_cmdpkt.c (RTL8190 TX/RX command packet handler Source C File) Note: The module is responsible for handling TX and RX command packet. 1. TX : Send set and query configuration command packet. 2. RX : Receive tx feedback, beacon state, query configuration command packet. Function: Export: Abbrev: History: Data Who Remark 05/06/2008 amy Create initial version porting from windows driver. ******************************************************************************/ #include "r8192E.h" #include "r8192E_hw.h" #include "r819xE_cmdpkt.h" /* * Driver internal module can call the API to send message to * firmware side. For example, you can send a debug command packet. * Or you can send a request for FW to modify RLX4181 LBUS HW bank. * Otherwise, you can change MAC/PHT/RF register by firmware at * run time. We do not support message more than one segment now. */ RT_STATUS cmpk_message_handle_tx( struct r8192_priv *priv, u8* code_virtual_address, u32 packettype, u32 buffer_len) { RT_STATUS rt_status = RT_STATUS_SUCCESS; u16 frag_threshold; u16 frag_length = 0, frag_offset = 0; rt_firmware *pfirmware = priv->pFirmware; struct sk_buff *skb; unsigned char *seg_ptr; cb_desc *tcb_desc; u8 bLastIniPkt; PTX_FWINFO_8190PCI pTxFwInfo = NULL; int i; RT_TRACE(COMP_CMDPKT,"%s(),buffer_len is %d\n",__FUNCTION__,buffer_len); firmware_init_param(priv); //Fragmentation might be required frag_threshold = pfirmware->cmdpacket_frag_thresold; do { if((buffer_len - frag_offset) > frag_threshold) { frag_length = frag_threshold ; bLastIniPkt = 0; } else { frag_length =(u16)(buffer_len - frag_offset); bLastIniPkt = 1; } /* Allocate skb buffer to contain firmware info and tx descriptor info * add 4 to avoid packet appending overflow. * */ skb = dev_alloc_skb(frag_length + priv->ieee80211->tx_headroom + 4); if(skb == NULL) { rt_status = RT_STATUS_FAILURE; goto Failed; } tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->queue_index = TXCMD_QUEUE; tcb_desc->bCmdOrInit = packettype; tcb_desc->bLastIniPkt = bLastIniPkt; tcb_desc->pkt_size = frag_length; //seg_ptr = skb_put(skb, frag_length + priv->ieee80211->tx_headroom); seg_ptr = skb_put(skb, priv->ieee80211->tx_headroom); pTxFwInfo = (PTX_FWINFO_8190PCI)seg_ptr; memset(pTxFwInfo,0,sizeof(TX_FWINFO_8190PCI)); memset(pTxFwInfo,0x12,8); seg_ptr +=sizeof(TX_FWINFO_8190PCI); /* * Transform from little endian to big endian * and pending zero */ seg_ptr = skb_tail_pointer(skb); for(i=0 ; i < frag_length; i+=4) { *seg_ptr++ = ((i+0)<frag_length)?code_virtual_address[i+3]:0; *seg_ptr++ = ((i+1)<frag_length)?code_virtual_address[i+2]:0; *seg_ptr++ = ((i+2)<frag_length)?code_virtual_address[i+1]:0; *seg_ptr++ = ((i+3)<frag_length)?code_virtual_address[i+0]:0; } skb_put(skb, i); priv->ieee80211->softmac_hard_start_xmit(skb, priv->ieee80211); code_virtual_address += frag_length; frag_offset += frag_length; }while(frag_offset < buffer_len); Failed: return rt_status; } static void cmpk_count_txstatistic(struct r8192_priv *priv, cmpk_txfb_t *pstx_fb) { #ifdef ENABLE_PS RT_RF_POWER_STATE rtState; pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState)); // When RF is off, we should not count the packet for hw/sw synchronize // reason, ie. there may be a duration while sw switch is changed and hw // switch is being changed. 2006.12.04, by shien chang. if (rtState == eRfOff) { return; } #endif #ifdef TODO if(pAdapter->bInHctTest) return; #endif /* We can not know the packet length and transmit type: broadcast or uni or multicast. So the relative statistics must be collected in tx feedback info. */ if (pstx_fb->tok) { priv->stats.txoktotal++; /* We can not make sure broadcast/multicast or unicast mode. */ if (pstx_fb->pkt_type != PACKET_MULTICAST && pstx_fb->pkt_type != PACKET_BROADCAST) { priv->stats.txbytesunicast += pstx_fb->pkt_length; } } } /* * The function is responsible for extract the message inside TX * feedbck message from firmware. It will contain dedicated info in * ws-06-0063-rtl8190-command-packet-specification. Please * refer to chapter "TX Feedback Element". We have to read 20 bytes * in the command packet. */ static void cmpk_handle_tx_feedback(struct r8192_priv *priv, u8 *pmsg) { cmpk_txfb_t rx_tx_fb; /* */ priv->stats.txfeedback++; memcpy((u8*)&rx_tx_fb, pmsg, sizeof(cmpk_txfb_t)); /* Use tx feedback info to count TX statistics. */ cmpk_count_txstatistic(priv, &rx_tx_fb); } /* * The function is responsible for extract the message from * firmware. It will contain dedicated info in * ws-07-0063-v06-rtl819x-command-packet-specification-070315.doc. * Please refer to chapter "Interrupt Status Element". */ static void cmpk_handle_interrupt_status(struct r8192_priv *priv, u8 *pmsg) { cmpk_intr_sta_t rx_intr_status; /* */ DMESG("---> cmpk_Handle_Interrupt_Status()\n"); /* 1. Extract TX feedback info from RFD to temp structure buffer. */ /* It seems that FW use big endian(MIPS) and DRV use little endian in windows OS. So we have to read the content byte by byte or transfer endian type before copy the message copy. */ //rx_bcn_state.Element_ID = pMsg[0]; //rx_bcn_state.Length = pMsg[1]; rx_intr_status.length = pmsg[1]; if (rx_intr_status.length != (sizeof(cmpk_intr_sta_t) - 2)) { DMESG("cmpk_Handle_Interrupt_Status: wrong length!\n"); return; } // Statistics of beacon for ad-hoc mode. if( priv->ieee80211->iw_mode == IW_MODE_ADHOC) { //2 maybe need endian transform? rx_intr_status.interrupt_status = *((u32 *)(pmsg + 4)); //rx_intr_status.InterruptStatus = N2H4BYTE(*((UINT32 *)(pMsg + 4))); DMESG("interrupt status = 0x%x\n", rx_intr_status.interrupt_status); if (rx_intr_status.interrupt_status & ISR_TxBcnOk) { priv->ieee80211->bibsscoordinator = true; priv->stats.txbeaconokint++; } else if (rx_intr_status.interrupt_status & ISR_TxBcnErr) { priv->ieee80211->bibsscoordinator = false; priv->stats.txbeaconerr++; } } // Other informations in interrupt status we need? DMESG("<---- cmpk_handle_interrupt_status()\n"); } /* * The function is responsible for extract the message from * firmware. It will contain dedicated info in * ws-06-0063-rtl8190-command-packet-specification. Please * refer to chapter "Beacon State Element". */ static void cmpk_handle_query_config_rx(struct r8192_priv *priv, u8 *pmsg) { cmpk_query_cfg_t rx_query_cfg; /* */ /* 0. Display received message. */ //cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg); /* 1. Extract TX feedback info from RFD to temp structure buffer. */ /* It seems that FW use big endian(MIPS) and DRV use little endian in windows OS. So we have to read the content byte by byte or transfer endian type before copy the message copy. */ //rx_query_cfg.Element_ID = pMsg[0]; //rx_query_cfg.Length = pMsg[1]; rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000)>>31; rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5; rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3; rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0; rx_query_cfg.cfg_offset = pmsg[7]; rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) | (pmsg[10] << 8) | (pmsg[11] << 0); rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) | (pmsg[14] << 8) | (pmsg[15] << 0); } /* * Count aggregated tx status from firmwar of one type rx command * packet element id = RX_TX_STATUS. */ static void cmpk_count_tx_status(struct r8192_priv *priv, cmpk_tx_status_t *pstx_status) { #ifdef ENABLE_PS RT_RF_POWER_STATE rtstate; pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState)); // When RF is off, we should not count the packet for hw/sw synchronize // reason, ie. there may be a duration while sw switch is changed and hw // switch is being changed. 2006.12.04, by shien chang. if (rtState == eRfOff) { return; } #endif priv->stats.txfeedbackok += pstx_status->txok; priv->stats.txoktotal += pstx_status->txok; priv->stats.txbytesunicast += pstx_status->txuclength; } /* * Firmware add a new tx feedback status to reduce rx command * packet buffer operation load. */ static void cmpk_handle_tx_status(struct r8192_priv *priv, u8 *pmsg) { cmpk_tx_status_t rx_tx_sts; /* */ memcpy((void*)&rx_tx_sts, (void*)pmsg, sizeof(cmpk_tx_status_t)); /* 2. Use tx feedback info to count TX statistics. */ cmpk_count_tx_status(priv, &rx_tx_sts); } /* Firmware add a new tx rate history */ static void cmpk_handle_tx_rate_history(struct r8192_priv *priv, u8 *pmsg) { u8 i; u16 length = sizeof(cmpk_tx_rahis_t); u32 *ptemp; #ifdef ENABLE_PS pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState)); // When RF is off, we should not count the packet for hw/sw synchronize // reason, ie. there may be a duration while sw switch is changed and hw // switch is being changed. 2006.12.04, by shien chang. if (rtState == eRfOff) { return; } #endif ptemp = (u32 *)pmsg; // // Do endian transfer to word alignment(16 bits) for windows system. // You must do different endian transfer for linux and MAC OS // for (i = 0; i < (length/4); i++) { u16 temp1, temp2; temp1 = ptemp[i]&0x0000FFFF; temp2 = ptemp[i]>>16; ptemp[i] = (temp1<<16)|temp2; } } /* * In the function, we will capture different RX command packet * info. Every RX command packet element has different message * length and meaning in content. We only support three type of RX * command packet now. Please refer to document * ws-06-0063-rtl8190-command-packet-specification. */ u32 cmpk_message_handle_rx(struct r8192_priv *priv, struct ieee80211_rx_stats *pstats) { // u32 debug_level = DBG_LOUD; int total_length; u8 cmd_length, exe_cnt = 0; u8 element_id; u8 *pcmd_buff; RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx()\n"); /* 0. Check inpt arguments. If is is a command queue message or pointer is null. */ if (/*(prfd->queue_id != CMPK_RX_QUEUE_ID) || */(pstats== NULL)) { /* Print error message. */ /*RT_TRACE(COMP_SEND, DebugLevel, ("\n\r[CMPK]-->Err queue id or pointer"));*/ return 0; /* This is not a command packet. */ } /* 1. Read received command packet message length from RFD. */ total_length = pstats->Length; /* 2. Read virtual address from RFD. */ pcmd_buff = pstats->virtual_address; /* 3. Read command pakcet element id and length. */ element_id = pcmd_buff[0]; /*RT_TRACE(COMP_SEND, DebugLevel, ("\n\r[CMPK]-->element ID=%d Len=%d", element_id, total_length));*/ /* 4. Check every received command packet conent according to different element type. Because FW may aggregate RX command packet to minimize transmit time between DRV and FW.*/ // Add a counter to prevent to locked in the loop too long while (total_length > 0 || exe_cnt++ >100) { /* 2007/01/17 MH We support aggregation of different cmd in the same packet. */ element_id = pcmd_buff[0]; switch(element_id) { case RX_TX_FEEDBACK: RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_TX_FEEDBACK\n"); cmpk_handle_tx_feedback(priv, pcmd_buff); cmd_length = CMPK_RX_TX_FB_SIZE; break; case RX_INTERRUPT_STATUS: RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_INTERRUPT_STATUS\n"); cmpk_handle_interrupt_status(priv, pcmd_buff); cmd_length = sizeof(cmpk_intr_sta_t); break; case BOTH_QUERY_CONFIG: RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():BOTH_QUERY_CONFIG\n"); cmpk_handle_query_config_rx(priv, pcmd_buff); cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE; break; case RX_TX_STATUS: RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_TX_STATUS\n"); cmpk_handle_tx_status(priv, pcmd_buff); cmd_length = CMPK_RX_TX_STS_SIZE; break; case RX_TX_PER_PKT_FEEDBACK: // You must at lease add a switch case element here, // Otherwise, we will jump to default case. //DbgPrint("CCX Test\r\n"); RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_TX_PER_PKT_FEEDBACK\n"); cmd_length = CMPK_RX_TX_FB_SIZE; break; case RX_TX_RATE_HISTORY: //DbgPrint(" rx tx rate history\r\n"); RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_TX_HISTORY\n"); cmpk_handle_tx_rate_history(priv, pcmd_buff); cmd_length = CMPK_TX_RAHIS_SIZE; break; default: RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():unknown CMD Element\n"); return 1; /* This is a command packet. */ } total_length -= cmd_length; pcmd_buff += cmd_length; } /* while (total_length > 0) */ return 1; /* This is a command packet. */ RT_TRACE(COMP_EVENTS, "<----cmpk_message_handle_rx()\n"); }
gpl-2.0
civato/Note8.0-StormBorn
drivers/staging/rtl8192e/r8192_pm.c
2924
3352
/* Power management interface routines. Written by Mariusz Matuszek. This code is currently just a placeholder for later work and does not do anything useful. This is part of rtl8180 OpenSource driver. Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it> Released under the terms of GPL (General Public Licence) */ #include "r8192E.h" #include "r8192E_hw.h" #include "r8192_pm.h" #include "r8190_rtl8256.h" int rtl8192E_save_state (struct pci_dev *dev, pm_message_t state) { printk(KERN_NOTICE "r8192E save state call (state %u).\n", state.event); return -EAGAIN; } int rtl8192E_suspend (struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct r8192_priv *priv = ieee80211_priv(dev); u32 ulRegRead; RT_TRACE(COMP_POWER, "============> r8192E suspend call.\n"); if (!netif_running(dev)) goto out_pci_suspend; if (dev->netdev_ops->ndo_stop) dev->netdev_ops->ndo_stop(dev); // Call MgntActSet_RF_State instead to prevent RF config race condition. if(!priv->ieee80211->bSupportRemoteWakeUp) { MgntActSet_RF_State(priv, eRfOff, RF_CHANGE_BY_INIT); // 2006.11.30. System reset bit ulRegRead = read_nic_dword(priv, CPU_GEN); ulRegRead|=CPU_GEN_SYSTEM_RESET; write_nic_dword(priv, CPU_GEN, ulRegRead); } else { //2008.06.03 for WOL write_nic_dword(priv, WFCRC0, 0xffffffff); write_nic_dword(priv, WFCRC1, 0xffffffff); write_nic_dword(priv, WFCRC2, 0xffffffff); //Write PMR register write_nic_byte(priv, PMR, 0x5); //Disable tx, enanble rx write_nic_byte(priv, MacBlkCtrl, 0xa); } out_pci_suspend: RT_TRACE(COMP_POWER, "r8192E support WOL call??????????????????????\n"); if(priv->ieee80211->bSupportRemoteWakeUp) { RT_TRACE(COMP_POWER, "r8192E support WOL call!!!!!!!!!!!!!!!!!!.\n"); } netif_device_detach(dev); pci_save_state(pdev); pci_disable_device(pdev); pci_enable_wake(pdev, pci_choose_state(pdev,state), priv->ieee80211->bSupportRemoteWakeUp?1:0); pci_set_power_state(pdev,pci_choose_state(pdev,state)); return 0; } int rtl8192E_resume (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); int err; u32 val; RT_TRACE(COMP_POWER, "================>r8192E resume call.\n"); pci_set_power_state(pdev, PCI_D0); err = pci_enable_device(pdev); if(err) { printk(KERN_ERR "%s: pci_enable_device failed on resume\n", dev->name); return err; } pci_restore_state(pdev); /* * Suspend/Resume resets the PCI configuration space, so we have to * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries * from interfering with C3 CPU state. pci_restore_state won't help * here since it only restores the first 64 bytes pci config header. */ pci_read_config_dword(pdev, 0x40, &val); if ((val & 0x0000ff00) != 0) { pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); } pci_enable_wake(pdev, PCI_D0, 0); if(!netif_running(dev)) goto out; netif_device_attach(dev); if (dev->netdev_ops->ndo_open) dev->netdev_ops->ndo_open(dev); out: RT_TRACE(COMP_POWER, "<================r8192E resume call.\n"); return 0; } int rtl8192E_enable_wake (struct pci_dev *dev, pm_message_t state, int enable) { printk(KERN_NOTICE "r8192E enable wake call (state %u, enable %d).\n", state.event, enable); return -EAGAIN; }
gpl-2.0
mirror-androidarmv6/android_kernel_lge_msm7x27-3.0.x
drivers/usb/host/isp1760-if.c
2924
10089
/* * Glue code for the ISP1760 driver and bus * Currently there is support for * - OpenFirmware * - PCI * - PDEV (generic platform device centralized driver model) * * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de> * */ #include <linux/usb.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/usb/isp1760.h> #include <linux/usb/hcd.h> #include "isp1760-hcd.h" #ifdef CONFIG_PPC_OF #include <linux/of.h> #include <linux/of_platform.h> #endif #ifdef CONFIG_PCI #include <linux/pci.h> #endif #ifdef CONFIG_PPC_OF static int of_isp1760_probe(struct platform_device *dev) { struct usb_hcd *hcd; struct device_node *dp = dev->dev.of_node; struct resource *res; struct resource memory; struct of_irq oirq; int virq; resource_size_t res_len; int ret; const unsigned int *prop; unsigned int devflags = 0; ret = of_address_to_resource(dp, 0, &memory); if (ret) return -ENXIO; res_len = resource_size(&memory); res = request_mem_region(memory.start, res_len, dev_name(&dev->dev)); if (!res) return -EBUSY; if (of_irq_map_one(dp, 0, &oirq)) { ret = -ENODEV; goto release_reg; } virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); if (of_device_is_compatible(dp, "nxp,usb-isp1761")) devflags |= ISP1760_FLAG_ISP1761; /* Some systems wire up only 16 of the 32 data lines */ prop = of_get_property(dp, "bus-width", NULL); if (prop && *prop == 16) devflags |= ISP1760_FLAG_BUS_WIDTH_16; if (of_get_property(dp, "port1-otg", NULL) != NULL) devflags |= ISP1760_FLAG_OTG_EN; if (of_get_property(dp, "analog-oc", NULL) != NULL) devflags |= ISP1760_FLAG_ANALOG_OC; if (of_get_property(dp, "dack-polarity", NULL) != NULL) devflags |= ISP1760_FLAG_DACK_POL_HIGH; if (of_get_property(dp, "dreq-polarity", NULL) != NULL) devflags |= ISP1760_FLAG_DREQ_POL_HIGH; hcd = isp1760_register(memory.start, res_len, virq, IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev_name(&dev->dev), devflags); if (IS_ERR(hcd)) { ret = PTR_ERR(hcd); goto release_reg; } dev_set_drvdata(&dev->dev, hcd); return ret; release_reg: release_mem_region(memory.start, res_len); return ret; } static int of_isp1760_remove(struct platform_device *dev) { struct usb_hcd *hcd = dev_get_drvdata(&dev->dev); dev_set_drvdata(&dev->dev, NULL); usb_remove_hcd(hcd); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); return 0; } static const struct of_device_id of_isp1760_match[] = { { .compatible = "nxp,usb-isp1760", }, { .compatible = "nxp,usb-isp1761", }, { }, }; MODULE_DEVICE_TABLE(of, of_isp1760_match); static struct platform_driver isp1760_of_driver = { .driver = { .name = "nxp-isp1760", .owner = THIS_MODULE, .of_match_table = of_isp1760_match, }, .probe = of_isp1760_probe, .remove = of_isp1760_remove, }; #endif #ifdef CONFIG_PCI static int __devinit isp1761_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { u8 latency, limit; __u32 reg_data; int retry_count; struct usb_hcd *hcd; unsigned int devflags = 0; int ret_status = 0; resource_size_t pci_mem_phy0; resource_size_t memlength; u8 __iomem *chip_addr; u8 __iomem *iobase; resource_size_t nxp_pci_io_base; resource_size_t iolength; if (usb_disabled()) return -ENODEV; if (pci_enable_device(dev) < 0) return -ENODEV; if (!dev->irq) return -ENODEV; /* Grab the PLX PCI mem maped port start address we need */ nxp_pci_io_base = pci_resource_start(dev, 0); iolength = pci_resource_len(dev, 0); if (!request_mem_region(nxp_pci_io_base, iolength, "ISP1761 IO MEM")) { printk(KERN_ERR "request region #1\n"); return -EBUSY; } iobase = ioremap_nocache(nxp_pci_io_base, iolength); if (!iobase) { printk(KERN_ERR "ioremap #1\n"); ret_status = -ENOMEM; goto cleanup1; } /* Grab the PLX PCI shared memory of the ISP 1761 we need */ pci_mem_phy0 = pci_resource_start(dev, 3); memlength = pci_resource_len(dev, 3); if (memlength < 0xffff) { printk(KERN_ERR "memory length for this resource is wrong\n"); ret_status = -ENOMEM; goto cleanup2; } if (!request_mem_region(pci_mem_phy0, memlength, "ISP-PCI")) { printk(KERN_ERR "host controller already in use\n"); ret_status = -EBUSY; goto cleanup2; } /* map available memory */ chip_addr = ioremap_nocache(pci_mem_phy0,memlength); if (!chip_addr) { printk(KERN_ERR "Error ioremap failed\n"); ret_status = -ENOMEM; goto cleanup3; } /* bad pci latencies can contribute to overruns */ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &latency); if (latency) { pci_read_config_byte(dev, PCI_MAX_LAT, &limit); if (limit && limit < latency) pci_write_config_byte(dev, PCI_LATENCY_TIMER, limit); } /* Try to check whether we can access Scratch Register of * Host Controller or not. The initial PCI access is retried until * local init for the PCI bridge is completed */ retry_count = 20; reg_data = 0; while ((reg_data != 0xFACE) && retry_count) { /*by default host is in 16bit mode, so * io operations at this stage must be 16 bit * */ writel(0xface, chip_addr + HC_SCRATCH_REG); udelay(100); reg_data = readl(chip_addr + HC_SCRATCH_REG) & 0x0000ffff; retry_count--; } iounmap(chip_addr); /* Host Controller presence is detected by writing to scratch register * and reading back and checking the contents are same or not */ if (reg_data != 0xFACE) { dev_err(&dev->dev, "scratch register mismatch %x\n", reg_data); ret_status = -ENOMEM; goto cleanup3; } pci_set_master(dev); /* configure PLX PCI chip to pass interrupts */ #define PLX_INT_CSR_REG 0x68 reg_data = readl(iobase + PLX_INT_CSR_REG); reg_data |= 0x900; writel(reg_data, iobase + PLX_INT_CSR_REG); dev->dev.dma_mask = NULL; hcd = isp1760_register(pci_mem_phy0, memlength, dev->irq, IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev_name(&dev->dev), devflags); if (IS_ERR(hcd)) { ret_status = -ENODEV; goto cleanup3; } /* done with PLX IO access */ iounmap(iobase); release_mem_region(nxp_pci_io_base, iolength); pci_set_drvdata(dev, hcd); return 0; cleanup3: release_mem_region(pci_mem_phy0, memlength); cleanup2: iounmap(iobase); cleanup1: release_mem_region(nxp_pci_io_base, iolength); return ret_status; } static void isp1761_pci_remove(struct pci_dev *dev) { struct usb_hcd *hcd; hcd = pci_get_drvdata(dev); usb_remove_hcd(hcd); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); pci_disable_device(dev); } static void isp1761_pci_shutdown(struct pci_dev *dev) { printk(KERN_ERR "ips1761_pci_shutdown\n"); } static const struct pci_device_id isp1760_plx [] = { { .class = PCI_CLASS_BRIDGE_OTHER << 8, .class_mask = ~0, .vendor = PCI_VENDOR_ID_PLX, .device = 0x5406, .subvendor = PCI_VENDOR_ID_PLX, .subdevice = 0x9054, }, { } }; MODULE_DEVICE_TABLE(pci, isp1760_plx); static struct pci_driver isp1761_pci_driver = { .name = "isp1760", .id_table = isp1760_plx, .probe = isp1761_pci_probe, .remove = isp1761_pci_remove, .shutdown = isp1761_pci_shutdown, }; #endif static int __devinit isp1760_plat_probe(struct platform_device *pdev) { int ret = 0; struct usb_hcd *hcd; struct resource *mem_res; struct resource *irq_res; resource_size_t mem_size; struct isp1760_platform_data *priv = pdev->dev.platform_data; unsigned int devflags = 0; unsigned long irqflags = IRQF_SHARED | IRQF_DISABLED; mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem_res) { pr_warning("isp1760: Memory resource not available\n"); ret = -ENODEV; goto out; } mem_size = resource_size(mem_res); if (!request_mem_region(mem_res->start, mem_size, "isp1760")) { pr_warning("isp1760: Cannot reserve the memory resource\n"); ret = -EBUSY; goto out; } irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq_res) { pr_warning("isp1760: IRQ resource not available\n"); return -ENODEV; } irqflags |= irq_res->flags & IRQF_TRIGGER_MASK; if (priv) { if (priv->is_isp1761) devflags |= ISP1760_FLAG_ISP1761; if (priv->bus_width_16) devflags |= ISP1760_FLAG_BUS_WIDTH_16; if (priv->port1_otg) devflags |= ISP1760_FLAG_OTG_EN; if (priv->analog_oc) devflags |= ISP1760_FLAG_ANALOG_OC; if (priv->dack_polarity_high) devflags |= ISP1760_FLAG_DACK_POL_HIGH; if (priv->dreq_polarity_high) devflags |= ISP1760_FLAG_DREQ_POL_HIGH; } hcd = isp1760_register(mem_res->start, mem_size, irq_res->start, irqflags, &pdev->dev, dev_name(&pdev->dev), devflags); if (IS_ERR(hcd)) { pr_warning("isp1760: Failed to register the HCD device\n"); ret = -ENODEV; goto cleanup; } pr_info("ISP1760 USB device initialised\n"); return ret; cleanup: release_mem_region(mem_res->start, mem_size); out: return ret; } static int __devexit isp1760_plat_remove(struct platform_device *pdev) { struct resource *mem_res; resource_size_t mem_size; mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mem_size = resource_size(mem_res); release_mem_region(mem_res->start, mem_size); return 0; } static struct platform_driver isp1760_plat_driver = { .probe = isp1760_plat_probe, .remove = __devexit_p(isp1760_plat_remove), .driver = { .name = "isp1760", }, }; static int __init isp1760_init(void) { int ret, any_ret = -ENODEV; init_kmem_once(); ret = platform_driver_register(&isp1760_plat_driver); if (!ret) any_ret = 0; #ifdef CONFIG_PPC_OF ret = platform_driver_register(&isp1760_of_driver); if (!ret) any_ret = 0; #endif #ifdef CONFIG_PCI ret = pci_register_driver(&isp1761_pci_driver); if (!ret) any_ret = 0; #endif if (any_ret) deinit_kmem_cache(); return any_ret; } module_init(isp1760_init); static void __exit isp1760_exit(void) { platform_driver_unregister(&isp1760_plat_driver); #ifdef CONFIG_PPC_OF platform_driver_unregister(&isp1760_of_driver); #endif #ifdef CONFIG_PCI pci_unregister_driver(&isp1761_pci_driver); #endif deinit_kmem_cache(); } module_exit(isp1760_exit);
gpl-2.0
JerryScript/VaeVictus
drivers/staging/rtl8192e/r819xE_cmdpkt.c
2924
13662
/****************************************************************************** (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved. Module: r819xusb_cmdpkt.c (RTL8190 TX/RX command packet handler Source C File) Note: The module is responsible for handling TX and RX command packet. 1. TX : Send set and query configuration command packet. 2. RX : Receive tx feedback, beacon state, query configuration command packet. Function: Export: Abbrev: History: Data Who Remark 05/06/2008 amy Create initial version porting from windows driver. ******************************************************************************/ #include "r8192E.h" #include "r8192E_hw.h" #include "r819xE_cmdpkt.h" /* * Driver internal module can call the API to send message to * firmware side. For example, you can send a debug command packet. * Or you can send a request for FW to modify RLX4181 LBUS HW bank. * Otherwise, you can change MAC/PHT/RF register by firmware at * run time. We do not support message more than one segment now. */ RT_STATUS cmpk_message_handle_tx( struct r8192_priv *priv, u8* code_virtual_address, u32 packettype, u32 buffer_len) { RT_STATUS rt_status = RT_STATUS_SUCCESS; u16 frag_threshold; u16 frag_length = 0, frag_offset = 0; rt_firmware *pfirmware = priv->pFirmware; struct sk_buff *skb; unsigned char *seg_ptr; cb_desc *tcb_desc; u8 bLastIniPkt; PTX_FWINFO_8190PCI pTxFwInfo = NULL; int i; RT_TRACE(COMP_CMDPKT,"%s(),buffer_len is %d\n",__FUNCTION__,buffer_len); firmware_init_param(priv); //Fragmentation might be required frag_threshold = pfirmware->cmdpacket_frag_thresold; do { if((buffer_len - frag_offset) > frag_threshold) { frag_length = frag_threshold ; bLastIniPkt = 0; } else { frag_length =(u16)(buffer_len - frag_offset); bLastIniPkt = 1; } /* Allocate skb buffer to contain firmware info and tx descriptor info * add 4 to avoid packet appending overflow. * */ skb = dev_alloc_skb(frag_length + priv->ieee80211->tx_headroom + 4); if(skb == NULL) { rt_status = RT_STATUS_FAILURE; goto Failed; } tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->queue_index = TXCMD_QUEUE; tcb_desc->bCmdOrInit = packettype; tcb_desc->bLastIniPkt = bLastIniPkt; tcb_desc->pkt_size = frag_length; //seg_ptr = skb_put(skb, frag_length + priv->ieee80211->tx_headroom); seg_ptr = skb_put(skb, priv->ieee80211->tx_headroom); pTxFwInfo = (PTX_FWINFO_8190PCI)seg_ptr; memset(pTxFwInfo,0,sizeof(TX_FWINFO_8190PCI)); memset(pTxFwInfo,0x12,8); seg_ptr +=sizeof(TX_FWINFO_8190PCI); /* * Transform from little endian to big endian * and pending zero */ seg_ptr = skb_tail_pointer(skb); for(i=0 ; i < frag_length; i+=4) { *seg_ptr++ = ((i+0)<frag_length)?code_virtual_address[i+3]:0; *seg_ptr++ = ((i+1)<frag_length)?code_virtual_address[i+2]:0; *seg_ptr++ = ((i+2)<frag_length)?code_virtual_address[i+1]:0; *seg_ptr++ = ((i+3)<frag_length)?code_virtual_address[i+0]:0; } skb_put(skb, i); priv->ieee80211->softmac_hard_start_xmit(skb, priv->ieee80211); code_virtual_address += frag_length; frag_offset += frag_length; }while(frag_offset < buffer_len); Failed: return rt_status; } static void cmpk_count_txstatistic(struct r8192_priv *priv, cmpk_txfb_t *pstx_fb) { #ifdef ENABLE_PS RT_RF_POWER_STATE rtState; pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState)); // When RF is off, we should not count the packet for hw/sw synchronize // reason, ie. there may be a duration while sw switch is changed and hw // switch is being changed. 2006.12.04, by shien chang. if (rtState == eRfOff) { return; } #endif #ifdef TODO if(pAdapter->bInHctTest) return; #endif /* We can not know the packet length and transmit type: broadcast or uni or multicast. So the relative statistics must be collected in tx feedback info. */ if (pstx_fb->tok) { priv->stats.txoktotal++; /* We can not make sure broadcast/multicast or unicast mode. */ if (pstx_fb->pkt_type != PACKET_MULTICAST && pstx_fb->pkt_type != PACKET_BROADCAST) { priv->stats.txbytesunicast += pstx_fb->pkt_length; } } } /* * The function is responsible for extract the message inside TX * feedbck message from firmware. It will contain dedicated info in * ws-06-0063-rtl8190-command-packet-specification. Please * refer to chapter "TX Feedback Element". We have to read 20 bytes * in the command packet. */ static void cmpk_handle_tx_feedback(struct r8192_priv *priv, u8 *pmsg) { cmpk_txfb_t rx_tx_fb; /* */ priv->stats.txfeedback++; memcpy((u8*)&rx_tx_fb, pmsg, sizeof(cmpk_txfb_t)); /* Use tx feedback info to count TX statistics. */ cmpk_count_txstatistic(priv, &rx_tx_fb); } /* * The function is responsible for extract the message from * firmware. It will contain dedicated info in * ws-07-0063-v06-rtl819x-command-packet-specification-070315.doc. * Please refer to chapter "Interrupt Status Element". */ static void cmpk_handle_interrupt_status(struct r8192_priv *priv, u8 *pmsg) { cmpk_intr_sta_t rx_intr_status; /* */ DMESG("---> cmpk_Handle_Interrupt_Status()\n"); /* 1. Extract TX feedback info from RFD to temp structure buffer. */ /* It seems that FW use big endian(MIPS) and DRV use little endian in windows OS. So we have to read the content byte by byte or transfer endian type before copy the message copy. */ //rx_bcn_state.Element_ID = pMsg[0]; //rx_bcn_state.Length = pMsg[1]; rx_intr_status.length = pmsg[1]; if (rx_intr_status.length != (sizeof(cmpk_intr_sta_t) - 2)) { DMESG("cmpk_Handle_Interrupt_Status: wrong length!\n"); return; } // Statistics of beacon for ad-hoc mode. if( priv->ieee80211->iw_mode == IW_MODE_ADHOC) { //2 maybe need endian transform? rx_intr_status.interrupt_status = *((u32 *)(pmsg + 4)); //rx_intr_status.InterruptStatus = N2H4BYTE(*((UINT32 *)(pMsg + 4))); DMESG("interrupt status = 0x%x\n", rx_intr_status.interrupt_status); if (rx_intr_status.interrupt_status & ISR_TxBcnOk) { priv->ieee80211->bibsscoordinator = true; priv->stats.txbeaconokint++; } else if (rx_intr_status.interrupt_status & ISR_TxBcnErr) { priv->ieee80211->bibsscoordinator = false; priv->stats.txbeaconerr++; } } // Other informations in interrupt status we need? DMESG("<---- cmpk_handle_interrupt_status()\n"); } /* * The function is responsible for extract the message from * firmware. It will contain dedicated info in * ws-06-0063-rtl8190-command-packet-specification. Please * refer to chapter "Beacon State Element". */ static void cmpk_handle_query_config_rx(struct r8192_priv *priv, u8 *pmsg) { cmpk_query_cfg_t rx_query_cfg; /* */ /* 0. Display received message. */ //cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg); /* 1. Extract TX feedback info from RFD to temp structure buffer. */ /* It seems that FW use big endian(MIPS) and DRV use little endian in windows OS. So we have to read the content byte by byte or transfer endian type before copy the message copy. */ //rx_query_cfg.Element_ID = pMsg[0]; //rx_query_cfg.Length = pMsg[1]; rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000)>>31; rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5; rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3; rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0; rx_query_cfg.cfg_offset = pmsg[7]; rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) | (pmsg[10] << 8) | (pmsg[11] << 0); rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) | (pmsg[14] << 8) | (pmsg[15] << 0); } /* * Count aggregated tx status from firmwar of one type rx command * packet element id = RX_TX_STATUS. */ static void cmpk_count_tx_status(struct r8192_priv *priv, cmpk_tx_status_t *pstx_status) { #ifdef ENABLE_PS RT_RF_POWER_STATE rtstate; pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState)); // When RF is off, we should not count the packet for hw/sw synchronize // reason, ie. there may be a duration while sw switch is changed and hw // switch is being changed. 2006.12.04, by shien chang. if (rtState == eRfOff) { return; } #endif priv->stats.txfeedbackok += pstx_status->txok; priv->stats.txoktotal += pstx_status->txok; priv->stats.txbytesunicast += pstx_status->txuclength; } /* * Firmware add a new tx feedback status to reduce rx command * packet buffer operation load. */ static void cmpk_handle_tx_status(struct r8192_priv *priv, u8 *pmsg) { cmpk_tx_status_t rx_tx_sts; /* */ memcpy((void*)&rx_tx_sts, (void*)pmsg, sizeof(cmpk_tx_status_t)); /* 2. Use tx feedback info to count TX statistics. */ cmpk_count_tx_status(priv, &rx_tx_sts); } /* Firmware add a new tx rate history */ static void cmpk_handle_tx_rate_history(struct r8192_priv *priv, u8 *pmsg) { u8 i; u16 length = sizeof(cmpk_tx_rahis_t); u32 *ptemp; #ifdef ENABLE_PS pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState)); // When RF is off, we should not count the packet for hw/sw synchronize // reason, ie. there may be a duration while sw switch is changed and hw // switch is being changed. 2006.12.04, by shien chang. if (rtState == eRfOff) { return; } #endif ptemp = (u32 *)pmsg; // // Do endian transfer to word alignment(16 bits) for windows system. // You must do different endian transfer for linux and MAC OS // for (i = 0; i < (length/4); i++) { u16 temp1, temp2; temp1 = ptemp[i]&0x0000FFFF; temp2 = ptemp[i]>>16; ptemp[i] = (temp1<<16)|temp2; } } /* * In the function, we will capture different RX command packet * info. Every RX command packet element has different message * length and meaning in content. We only support three type of RX * command packet now. Please refer to document * ws-06-0063-rtl8190-command-packet-specification. */ u32 cmpk_message_handle_rx(struct r8192_priv *priv, struct ieee80211_rx_stats *pstats) { // u32 debug_level = DBG_LOUD; int total_length; u8 cmd_length, exe_cnt = 0; u8 element_id; u8 *pcmd_buff; RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx()\n"); /* 0. Check inpt arguments. If is is a command queue message or pointer is null. */ if (/*(prfd->queue_id != CMPK_RX_QUEUE_ID) || */(pstats== NULL)) { /* Print error message. */ /*RT_TRACE(COMP_SEND, DebugLevel, ("\n\r[CMPK]-->Err queue id or pointer"));*/ return 0; /* This is not a command packet. */ } /* 1. Read received command packet message length from RFD. */ total_length = pstats->Length; /* 2. Read virtual address from RFD. */ pcmd_buff = pstats->virtual_address; /* 3. Read command pakcet element id and length. */ element_id = pcmd_buff[0]; /*RT_TRACE(COMP_SEND, DebugLevel, ("\n\r[CMPK]-->element ID=%d Len=%d", element_id, total_length));*/ /* 4. Check every received command packet conent according to different element type. Because FW may aggregate RX command packet to minimize transmit time between DRV and FW.*/ // Add a counter to prevent to locked in the loop too long while (total_length > 0 || exe_cnt++ >100) { /* 2007/01/17 MH We support aggregation of different cmd in the same packet. */ element_id = pcmd_buff[0]; switch(element_id) { case RX_TX_FEEDBACK: RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_TX_FEEDBACK\n"); cmpk_handle_tx_feedback(priv, pcmd_buff); cmd_length = CMPK_RX_TX_FB_SIZE; break; case RX_INTERRUPT_STATUS: RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_INTERRUPT_STATUS\n"); cmpk_handle_interrupt_status(priv, pcmd_buff); cmd_length = sizeof(cmpk_intr_sta_t); break; case BOTH_QUERY_CONFIG: RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():BOTH_QUERY_CONFIG\n"); cmpk_handle_query_config_rx(priv, pcmd_buff); cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE; break; case RX_TX_STATUS: RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_TX_STATUS\n"); cmpk_handle_tx_status(priv, pcmd_buff); cmd_length = CMPK_RX_TX_STS_SIZE; break; case RX_TX_PER_PKT_FEEDBACK: // You must at lease add a switch case element here, // Otherwise, we will jump to default case. //DbgPrint("CCX Test\r\n"); RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_TX_PER_PKT_FEEDBACK\n"); cmd_length = CMPK_RX_TX_FB_SIZE; break; case RX_TX_RATE_HISTORY: //DbgPrint(" rx tx rate history\r\n"); RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():RX_TX_HISTORY\n"); cmpk_handle_tx_rate_history(priv, pcmd_buff); cmd_length = CMPK_TX_RAHIS_SIZE; break; default: RT_TRACE(COMP_EVENTS, "---->cmpk_message_handle_rx():unknown CMD Element\n"); return 1; /* This is a command packet. */ } total_length -= cmd_length; pcmd_buff += cmd_length; } /* while (total_length > 0) */ return 1; /* This is a command packet. */ RT_TRACE(COMP_EVENTS, "<----cmpk_message_handle_rx()\n"); }
gpl-2.0
yagay/android_kernel_zte_nx507j
arch/powerpc/kernel/kvm.c
4460
21349
/* * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. * Copyright 2010-2011 Freescale Semiconductor, Inc. * * Authors: * Alexander Graf <agraf@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/kvm_host.h> #include <linux/init.h> #include <linux/export.h> #include <linux/kvm_para.h> #include <linux/slab.h> #include <linux/of.h> #include <asm/reg.h> #include <asm/sections.h> #include <asm/cacheflush.h> #include <asm/disassemble.h> #include <asm/ppc-opcode.h> #define KVM_MAGIC_PAGE (-4096L) #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x) #define KVM_INST_LWZ 0x80000000 #define KVM_INST_STW 0x90000000 #define KVM_INST_LD 0xe8000000 #define KVM_INST_STD 0xf8000000 #define KVM_INST_NOP 0x60000000 #define KVM_INST_B 0x48000000 #define KVM_INST_B_MASK 0x03ffffff #define KVM_INST_B_MAX 0x01ffffff #define KVM_INST_LI 0x38000000 #define KVM_MASK_RT 0x03e00000 #define KVM_RT_30 0x03c00000 #define KVM_MASK_RB 0x0000f800 #define KVM_INST_MFMSR 0x7c0000a6 #define SPR_FROM 0 #define SPR_TO 0x100 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \ (((sprn) & 0x1f) << 16) | \ (((sprn) & 0x3e0) << 6) | \ (moveto)) #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM) #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO) #define KVM_INST_TLBSYNC 0x7c00046c #define KVM_INST_MTMSRD_L0 0x7c000164 #define KVM_INST_MTMSRD_L1 0x7c010164 #define KVM_INST_MTMSR 0x7c000124 #define KVM_INST_WRTEE 0x7c000106 #define KVM_INST_WRTEEI_0 0x7c000146 #define KVM_INST_WRTEEI_1 0x7c008146 #define KVM_INST_MTSRIN 0x7c0001e4 static bool kvm_patching_worked = true; static char kvm_tmp[1024 * 1024]; static int kvm_tmp_index; static inline void kvm_patch_ins(u32 *inst, u32 new_inst) { *inst = new_inst; flush_icache_range((ulong)inst, (ulong)inst + 4); } static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt) { #ifdef CONFIG_64BIT kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); #else kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc)); #endif } static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt) { #ifdef CONFIG_64BIT kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); #else kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc)); #endif } static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt) { kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff)); } static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt) { #ifdef CONFIG_64BIT kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc)); #else kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc)); #endif } static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt) { kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc)); } static void kvm_patch_ins_nop(u32 *inst) { kvm_patch_ins(inst, KVM_INST_NOP); } static void kvm_patch_ins_b(u32 *inst, int addr) { #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S) /* On relocatable kernels interrupts handlers and our code can be in different regions, so we don't patch them */ if ((ulong)inst < (ulong)&__end_interrupts) return; #endif kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK)); } static u32 *kvm_alloc(int len) { u32 *p; if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) { printk(KERN_ERR "KVM: No more space (%d + %d)\n", kvm_tmp_index, len); kvm_patching_worked = false; return NULL; } p = (void*)&kvm_tmp[kvm_tmp_index]; kvm_tmp_index += len; return p; } extern u32 kvm_emulate_mtmsrd_branch_offs; extern u32 kvm_emulate_mtmsrd_reg_offs; extern u32 kvm_emulate_mtmsrd_orig_ins_offs; extern u32 kvm_emulate_mtmsrd_len; extern u32 kvm_emulate_mtmsrd[]; static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt) { u32 *p; int distance_start; int distance_end; ulong next_inst; p = kvm_alloc(kvm_emulate_mtmsrd_len * 4); if (!p) return; /* Find out where we are and put everything there */ distance_start = (ulong)p - (ulong)inst; next_inst = ((ulong)inst + 4); distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs]; /* Make sure we only write valid b instructions */ if (distance_start > KVM_INST_B_MAX) { kvm_patching_worked = false; return; } /* Modify the chunk to fit the invocation */ memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4); p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK; switch (get_rt(rt)) { case 30: kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs], magic_var(scratch2), KVM_RT_30); break; case 31: kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs], magic_var(scratch1), KVM_RT_30); break; default: p[kvm_emulate_mtmsrd_reg_offs] |= rt; break; } p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst; flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4); /* Patch the invocation */ kvm_patch_ins_b(inst, distance_start); } extern u32 kvm_emulate_mtmsr_branch_offs; extern u32 kvm_emulate_mtmsr_reg1_offs; extern u32 kvm_emulate_mtmsr_reg2_offs; extern u32 kvm_emulate_mtmsr_orig_ins_offs; extern u32 kvm_emulate_mtmsr_len; extern u32 kvm_emulate_mtmsr[]; static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt) { u32 *p; int distance_start; int distance_end; ulong next_inst; p = kvm_alloc(kvm_emulate_mtmsr_len * 4); if (!p) return; /* Find out where we are and put everything there */ distance_start = (ulong)p - (ulong)inst; next_inst = ((ulong)inst + 4); distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs]; /* Make sure we only write valid b instructions */ if (distance_start > KVM_INST_B_MAX) { kvm_patching_worked = false; return; } /* Modify the chunk to fit the invocation */ memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4); p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK; /* Make clobbered registers work too */ switch (get_rt(rt)) { case 30: kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs], magic_var(scratch2), KVM_RT_30); kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs], magic_var(scratch2), KVM_RT_30); break; case 31: kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs], magic_var(scratch1), KVM_RT_30); kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs], magic_var(scratch1), KVM_RT_30); break; default: p[kvm_emulate_mtmsr_reg1_offs] |= rt; p[kvm_emulate_mtmsr_reg2_offs] |= rt; break; } p[kvm_emulate_mtmsr_orig_ins_offs] = *inst; flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4); /* Patch the invocation */ kvm_patch_ins_b(inst, distance_start); } #ifdef CONFIG_BOOKE extern u32 kvm_emulate_wrtee_branch_offs; extern u32 kvm_emulate_wrtee_reg_offs; extern u32 kvm_emulate_wrtee_orig_ins_offs; extern u32 kvm_emulate_wrtee_len; extern u32 kvm_emulate_wrtee[]; static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one) { u32 *p; int distance_start; int distance_end; ulong next_inst; p = kvm_alloc(kvm_emulate_wrtee_len * 4); if (!p) return; /* Find out where we are and put everything there */ distance_start = (ulong)p - (ulong)inst; next_inst = ((ulong)inst + 4); distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs]; /* Make sure we only write valid b instructions */ if (distance_start > KVM_INST_B_MAX) { kvm_patching_worked = false; return; } /* Modify the chunk to fit the invocation */ memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4); p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK; if (imm_one) { p[kvm_emulate_wrtee_reg_offs] = KVM_INST_LI | __PPC_RT(30) | MSR_EE; } else { /* Make clobbered registers work too */ switch (get_rt(rt)) { case 30: kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs], magic_var(scratch2), KVM_RT_30); break; case 31: kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs], magic_var(scratch1), KVM_RT_30); break; default: p[kvm_emulate_wrtee_reg_offs] |= rt; break; } } p[kvm_emulate_wrtee_orig_ins_offs] = *inst; flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4); /* Patch the invocation */ kvm_patch_ins_b(inst, distance_start); } extern u32 kvm_emulate_wrteei_0_branch_offs; extern u32 kvm_emulate_wrteei_0_len; extern u32 kvm_emulate_wrteei_0[]; static void kvm_patch_ins_wrteei_0(u32 *inst) { u32 *p; int distance_start; int distance_end; ulong next_inst; p = kvm_alloc(kvm_emulate_wrteei_0_len * 4); if (!p) return; /* Find out where we are and put everything there */ distance_start = (ulong)p - (ulong)inst; next_inst = ((ulong)inst + 4); distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs]; /* Make sure we only write valid b instructions */ if (distance_start > KVM_INST_B_MAX) { kvm_patching_worked = false; return; } memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4); p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK; flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4); /* Patch the invocation */ kvm_patch_ins_b(inst, distance_start); } #endif #ifdef CONFIG_PPC_BOOK3S_32 extern u32 kvm_emulate_mtsrin_branch_offs; extern u32 kvm_emulate_mtsrin_reg1_offs; extern u32 kvm_emulate_mtsrin_reg2_offs; extern u32 kvm_emulate_mtsrin_orig_ins_offs; extern u32 kvm_emulate_mtsrin_len; extern u32 kvm_emulate_mtsrin[]; static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb) { u32 *p; int distance_start; int distance_end; ulong next_inst; p = kvm_alloc(kvm_emulate_mtsrin_len * 4); if (!p) return; /* Find out where we are and put everything there */ distance_start = (ulong)p - (ulong)inst; next_inst = ((ulong)inst + 4); distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs]; /* Make sure we only write valid b instructions */ if (distance_start > KVM_INST_B_MAX) { kvm_patching_worked = false; return; } /* Modify the chunk to fit the invocation */ memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4); p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK; p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10); p[kvm_emulate_mtsrin_reg2_offs] |= rt; p[kvm_emulate_mtsrin_orig_ins_offs] = *inst; flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4); /* Patch the invocation */ kvm_patch_ins_b(inst, distance_start); } #endif static void kvm_map_magic_page(void *data) { u32 *features = data; ulong in[8]; ulong out[8]; in[0] = KVM_MAGIC_PAGE; in[1] = KVM_MAGIC_PAGE; kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE); *features = out[0]; } static void kvm_check_ins(u32 *inst, u32 features) { u32 _inst = *inst; u32 inst_no_rt = _inst & ~KVM_MASK_RT; u32 inst_rt = _inst & KVM_MASK_RT; switch (inst_no_rt) { /* Loads */ case KVM_INST_MFMSR: kvm_patch_ins_ld(inst, magic_var(msr), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG0): kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG1): kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG2): kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG3): kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt); break; case KVM_INST_MFSPR(SPRN_SRR0): kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt); break; case KVM_INST_MFSPR(SPRN_SRR1): kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt); break; #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_DEAR): #else case KVM_INST_MFSPR(SPRN_DAR): #endif kvm_patch_ins_ld(inst, magic_var(dar), inst_rt); break; case KVM_INST_MFSPR(SPRN_DSISR): kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt); break; #ifdef CONFIG_PPC_BOOK3E_MMU case KVM_INST_MFSPR(SPRN_MAS0): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS1): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS2): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS3): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS4): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS6): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS7): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt); break; #endif /* CONFIG_PPC_BOOK3E_MMU */ case KVM_INST_MFSPR(SPRN_SPRG4): #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_SPRG4R): #endif if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG5): #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_SPRG5R): #endif if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG6): #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_SPRG6R): #endif if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG7): #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_SPRG7R): #endif if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt); break; #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_ESR): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt); break; #endif case KVM_INST_MFSPR(SPRN_PIR): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt); break; /* Stores */ case KVM_INST_MTSPR(SPRN_SPRG0): kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG1): kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG2): kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG3): kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt); break; case KVM_INST_MTSPR(SPRN_SRR0): kvm_patch_ins_std(inst, magic_var(srr0), inst_rt); break; case KVM_INST_MTSPR(SPRN_SRR1): kvm_patch_ins_std(inst, magic_var(srr1), inst_rt); break; #ifdef CONFIG_BOOKE case KVM_INST_MTSPR(SPRN_DEAR): #else case KVM_INST_MTSPR(SPRN_DAR): #endif kvm_patch_ins_std(inst, magic_var(dar), inst_rt); break; case KVM_INST_MTSPR(SPRN_DSISR): kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt); break; #ifdef CONFIG_PPC_BOOK3E_MMU case KVM_INST_MTSPR(SPRN_MAS0): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS1): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS2): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_std(inst, magic_var(mas2), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS3): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS4): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS6): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS7): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt); break; #endif /* CONFIG_PPC_BOOK3E_MMU */ case KVM_INST_MTSPR(SPRN_SPRG4): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG5): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG6): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG7): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt); break; #ifdef CONFIG_BOOKE case KVM_INST_MTSPR(SPRN_ESR): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(esr), inst_rt); break; #endif /* Nops */ case KVM_INST_TLBSYNC: kvm_patch_ins_nop(inst); break; /* Rewrites */ case KVM_INST_MTMSRD_L1: kvm_patch_ins_mtmsrd(inst, inst_rt); break; case KVM_INST_MTMSR: case KVM_INST_MTMSRD_L0: kvm_patch_ins_mtmsr(inst, inst_rt); break; #ifdef CONFIG_BOOKE case KVM_INST_WRTEE: kvm_patch_ins_wrtee(inst, inst_rt, 0); break; #endif } switch (inst_no_rt & ~KVM_MASK_RB) { #ifdef CONFIG_PPC_BOOK3S_32 case KVM_INST_MTSRIN: if (features & KVM_MAGIC_FEAT_SR) { u32 inst_rb = _inst & KVM_MASK_RB; kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb); } break; break; #endif } switch (_inst) { #ifdef CONFIG_BOOKE case KVM_INST_WRTEEI_0: kvm_patch_ins_wrteei_0(inst); break; case KVM_INST_WRTEEI_1: kvm_patch_ins_wrtee(inst, 0, 1); break; #endif } } extern u32 kvm_template_start[]; extern u32 kvm_template_end[]; static void kvm_use_magic_page(void) { u32 *p; u32 *start, *end; u32 tmp; u32 features; /* Tell the host to map the magic page to -4096 on all CPUs */ on_each_cpu(kvm_map_magic_page, &features, 1); /* Quick self-test to see if the mapping works */ if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) { kvm_patching_worked = false; return; } /* Now loop through all code and find instructions */ start = (void*)_stext; end = (void*)_etext; /* * Being interrupted in the middle of patching would * be bad for SPRG4-7, which KVM can't keep in sync * with emulated accesses because reads don't trap. */ local_irq_disable(); for (p = start; p < end; p++) { /* Avoid patching the template code */ if (p >= kvm_template_start && p < kvm_template_end) { p = kvm_template_end - 1; continue; } kvm_check_ins(p, features); } local_irq_enable(); printk(KERN_INFO "KVM: Live patching for a fast VM %s\n", kvm_patching_worked ? "worked" : "failed"); } unsigned long kvm_hypercall(unsigned long *in, unsigned long *out, unsigned long nr) { unsigned long register r0 asm("r0"); unsigned long register r3 asm("r3") = in[0]; unsigned long register r4 asm("r4") = in[1]; unsigned long register r5 asm("r5") = in[2]; unsigned long register r6 asm("r6") = in[3]; unsigned long register r7 asm("r7") = in[4]; unsigned long register r8 asm("r8") = in[5]; unsigned long register r9 asm("r9") = in[6]; unsigned long register r10 asm("r10") = in[7]; unsigned long register r11 asm("r11") = nr; unsigned long register r12 asm("r12"); asm volatile("bl kvm_hypercall_start" : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6), "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11), "=r"(r12) : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8), "r"(r9), "r"(r10), "r"(r11) : "memory", "cc", "xer", "ctr", "lr"); out[0] = r4; out[1] = r5; out[2] = r6; out[3] = r7; out[4] = r8; out[5] = r9; out[6] = r10; out[7] = r11; return r3; } EXPORT_SYMBOL_GPL(kvm_hypercall); static int kvm_para_setup(void) { extern u32 kvm_hypercall_start; struct device_node *hyper_node; u32 *insts; int len, i; hyper_node = of_find_node_by_path("/hypervisor"); if (!hyper_node) return -1; insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len); if (len % 4) return -1; if (len > (4 * 4)) return -1; for (i = 0; i < (len / 4); i++) kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]); return 0; } static __init void kvm_free_tmp(void) { unsigned long start, end; start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK; end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK; /* Free the tmp space we don't need */ for (; start < end; start += PAGE_SIZE) { ClearPageReserved(virt_to_page(start)); init_page_count(virt_to_page(start)); free_page(start); totalram_pages++; } } static int __init kvm_guest_init(void) { if (!kvm_para_available()) goto free_tmp; if (kvm_para_setup()) goto free_tmp; if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE)) kvm_use_magic_page(); #ifdef CONFIG_PPC_BOOK3S_64 /* Enable napping */ powersave_nap = 1; #endif free_tmp: kvm_free_tmp(); return 0; } postcore_initcall(kvm_guest_init);
gpl-2.0
hei1125/Nova_Kernel
drivers/infiniband/hw/qib/qib_twsi.c
8044
13007
/* * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/delay.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include "qib.h" /* * QLogic_IB "Two Wire Serial Interface" driver. * Originally written for a not-quite-i2c serial eeprom, which is * still used on some supported boards. Later boards have added a * variety of other uses, most board-specific, so the bit-boffing * part has been split off to this file, while the other parts * have been moved to chip-specific files. * * We have also dropped all pretense of fully generic (e.g. pretend * we don't know whether '1' is the higher voltage) interface, as * the restrictions of the generic i2c interface (e.g. no access from * driver itself) make it unsuitable for this use. */ #define READ_CMD 1 #define WRITE_CMD 0 /** * i2c_wait_for_writes - wait for a write * @dd: the qlogic_ib device * * We use this instead of udelay directly, so we can make sure * that previous register writes have been flushed all the way * to the chip. Since we are delaying anyway, the cost doesn't * hurt, and makes the bit twiddling more regular */ static void i2c_wait_for_writes(struct qib_devdata *dd) { /* * implicit read of EXTStatus is as good as explicit * read of scratch, if all we want to do is flush * writes. */ dd->f_gpio_mod(dd, 0, 0, 0); rmb(); /* inlined, so prevent compiler reordering */ } /* * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that * for "almost compliant" modules */ #define SCL_WAIT_USEC 1000 /* BUF_WAIT is time bus must be free between STOP or ACK and to next START. * Should be 20, but some chips need more. */ #define TWSI_BUF_WAIT_USEC 60 static void scl_out(struct qib_devdata *dd, u8 bit) { u32 mask; udelay(1); mask = 1UL << dd->gpio_scl_num; /* SCL is meant to be bare-drain, so never set "OUT", just DIR */ dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask); /* * Allow for slow slaves by simple * delay for falling edge, sampling on rise. */ if (!bit) udelay(2); else { int rise_usec; for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) { if (mask & dd->f_gpio_mod(dd, 0, 0, 0)) break; udelay(2); } if (rise_usec <= 0) qib_dev_err(dd, "SCL interface stuck low > %d uSec\n", SCL_WAIT_USEC); } i2c_wait_for_writes(dd); } static void sda_out(struct qib_devdata *dd, u8 bit) { u32 mask; mask = 1UL << dd->gpio_sda_num; /* SDA is meant to be bare-drain, so never set "OUT", just DIR */ dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask); i2c_wait_for_writes(dd); udelay(2); } static u8 sda_in(struct qib_devdata *dd, int wait) { int bnum; u32 read_val, mask; bnum = dd->gpio_sda_num; mask = (1UL << bnum); /* SDA is meant to be bare-drain, so never set "OUT", just DIR */ dd->f_gpio_mod(dd, 0, 0, mask); read_val = dd->f_gpio_mod(dd, 0, 0, 0); if (wait) i2c_wait_for_writes(dd); return (read_val & mask) >> bnum; } /** * i2c_ackrcv - see if ack following write is true * @dd: the qlogic_ib device */ static int i2c_ackrcv(struct qib_devdata *dd) { u8 ack_received; /* AT ENTRY SCL = LOW */ /* change direction, ignore data */ ack_received = sda_in(dd, 1); scl_out(dd, 1); ack_received = sda_in(dd, 1) == 0; scl_out(dd, 0); return ack_received; } static void stop_cmd(struct qib_devdata *dd); /** * rd_byte - read a byte, sending STOP on last, else ACK * @dd: the qlogic_ib device * * Returns byte shifted out of device */ static int rd_byte(struct qib_devdata *dd, int last) { int bit_cntr, data; data = 0; for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) { data <<= 1; scl_out(dd, 1); data |= sda_in(dd, 0); scl_out(dd, 0); } if (last) { scl_out(dd, 1); stop_cmd(dd); } else { sda_out(dd, 0); scl_out(dd, 1); scl_out(dd, 0); sda_out(dd, 1); } return data; } /** * wr_byte - write a byte, one bit at a time * @dd: the qlogic_ib device * @data: the byte to write * * Returns 0 if we got the following ack, otherwise 1 */ static int wr_byte(struct qib_devdata *dd, u8 data) { int bit_cntr; u8 bit; for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) { bit = (data >> bit_cntr) & 1; sda_out(dd, bit); scl_out(dd, 1); scl_out(dd, 0); } return (!i2c_ackrcv(dd)) ? 1 : 0; } /* * issue TWSI start sequence: * (both clock/data high, clock high, data low while clock is high) */ static void start_seq(struct qib_devdata *dd) { sda_out(dd, 1); scl_out(dd, 1); sda_out(dd, 0); udelay(1); scl_out(dd, 0); } /** * stop_seq - transmit the stop sequence * @dd: the qlogic_ib device * * (both clock/data low, clock high, data high while clock is high) */ static void stop_seq(struct qib_devdata *dd) { scl_out(dd, 0); sda_out(dd, 0); scl_out(dd, 1); sda_out(dd, 1); } /** * stop_cmd - transmit the stop condition * @dd: the qlogic_ib device * * (both clock/data low, clock high, data high while clock is high) */ static void stop_cmd(struct qib_devdata *dd) { stop_seq(dd); udelay(TWSI_BUF_WAIT_USEC); } /** * qib_twsi_reset - reset I2C communication * @dd: the qlogic_ib device */ int qib_twsi_reset(struct qib_devdata *dd) { int clock_cycles_left = 9; int was_high = 0; u32 pins, mask; /* Both SCL and SDA should be high. If not, there * is something wrong. */ mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num); /* * Force pins to desired innocuous state. * This is the default power-on state with out=0 and dir=0, * So tri-stated and should be floating high (barring HW problems) */ dd->f_gpio_mod(dd, 0, 0, mask); /* * Clock nine times to get all listeners into a sane state. * If SDA does not go high at any point, we are wedged. * One vendor recommends then issuing START followed by STOP. * we cannot use our "normal" functions to do that, because * if SCL drops between them, another vendor's part will * wedge, dropping SDA and keeping it low forever, at the end of * the next transaction (even if it was not the device addressed). * So our START and STOP take place with SCL held high. */ while (clock_cycles_left--) { scl_out(dd, 0); scl_out(dd, 1); /* Note if SDA is high, but keep clocking to sync slave */ was_high |= sda_in(dd, 0); } if (was_high) { /* * We saw a high, which we hope means the slave is sync'd. * Issue START, STOP, pause for T_BUF. */ pins = dd->f_gpio_mod(dd, 0, 0, 0); if ((pins & mask) != mask) qib_dev_err(dd, "GPIO pins not at rest: %d\n", pins & mask); /* Drop SDA to issue START */ udelay(1); /* Guarantee .6 uSec setup */ sda_out(dd, 0); udelay(1); /* Guarantee .6 uSec hold */ /* At this point, SCL is high, SDA low. Raise SDA for STOP */ sda_out(dd, 1); udelay(TWSI_BUF_WAIT_USEC); } return !was_high; } #define QIB_TWSI_START 0x100 #define QIB_TWSI_STOP 0x200 /* Write byte to TWSI, optionally prefixed with START or suffixed with * STOP. * returns 0 if OK (ACK received), else != 0 */ static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags) { int ret = 1; if (flags & QIB_TWSI_START) start_seq(dd); ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */ if (flags & QIB_TWSI_STOP) stop_cmd(dd); return ret; } /* Added functionality for IBA7220-based cards */ #define QIB_TEMP_DEV 0x98 /* * qib_twsi_blk_rd * Formerly called qib_eeprom_internal_read, and only used for eeprom, * but now the general interface for data transfer from twsi devices. * One vestige of its former role is that it recognizes a device * QIB_TWSI_NO_DEV and does the correct operation for the legacy part, * which responded to all TWSI device codes, interpreting them as * address within device. On all other devices found on board handled by * this driver, the device is followed by a one-byte "address" which selects * the "register" or "offset" within the device from which data should * be read. */ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer, int len) { int ret; u8 *bp = buffer; ret = 1; if (dev == QIB_TWSI_NO_DEV) { /* legacy not-really-I2C */ addr = (addr << 1) | READ_CMD; ret = qib_twsi_wr(dd, addr, QIB_TWSI_START); } else { /* Actual I2C */ ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START); if (ret) { stop_cmd(dd); ret = 1; goto bail; } /* * SFF spec claims we do _not_ stop after the addr * but simply issue a start with the "read" dev-addr. * Since we are implicitely waiting for ACK here, * we need t_buf (nominally 20uSec) before that start, * and cannot rely on the delay built in to the STOP */ ret = qib_twsi_wr(dd, addr, 0); udelay(TWSI_BUF_WAIT_USEC); if (ret) { qib_dev_err(dd, "Failed to write interface read addr %02X\n", addr); ret = 1; goto bail; } ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START); } if (ret) { stop_cmd(dd); ret = 1; goto bail; } /* * block devices keeps clocking data out as long as we ack, * automatically incrementing the address. Some have "pages" * whose boundaries will not be crossed, but the handling * of these is left to the caller, who is in a better * position to know. */ while (len-- > 0) { /* * Get and store data, sending ACK if length remaining, * else STOP */ *bp++ = rd_byte(dd, !len); } ret = 0; bail: return ret; } /* * qib_twsi_blk_wr * Formerly called qib_eeprom_internal_write, and only used for eeprom, * but now the general interface for data transfer to twsi devices. * One vestige of its former role is that it recognizes a device * QIB_TWSI_NO_DEV and does the correct operation for the legacy part, * which responded to all TWSI device codes, interpreting them as * address within device. On all other devices found on board handled by * this driver, the device is followed by a one-byte "address" which selects * the "register" or "offset" within the device to which data should * be written. */ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, const void *buffer, int len) { int sub_len; const u8 *bp = buffer; int max_wait_time, i; int ret; ret = 1; while (len > 0) { if (dev == QIB_TWSI_NO_DEV) { if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD, QIB_TWSI_START)) { goto failed_write; } } else { /* Real I2C */ if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START)) goto failed_write; ret = qib_twsi_wr(dd, addr, 0); if (ret) { qib_dev_err(dd, "Failed to write interface" " write addr %02X\n", addr); goto failed_write; } } sub_len = min(len, 4); addr += sub_len; len -= sub_len; for (i = 0; i < sub_len; i++) if (qib_twsi_wr(dd, *bp++, 0)) goto failed_write; stop_cmd(dd); /* * Wait for write complete by waiting for a successful * read (the chip replies with a zero after the write * cmd completes, and before it writes to the eeprom. * The startcmd for the read will fail the ack until * the writes have completed. We do this inline to avoid * the debug prints that are in the real read routine * if the startcmd fails. * We also use the proper device address, so it doesn't matter * whether we have real eeprom_dev. Legacy likes any address. */ max_wait_time = 100; while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) { stop_cmd(dd); if (!--max_wait_time) goto failed_write; } /* now read (and ignore) the resulting byte */ rd_byte(dd, 1); } ret = 0; goto bail; failed_write: stop_cmd(dd); ret = 1; bail: return ret; }
gpl-2.0
delapuente/codeaurora_kernel_msm
arch/blackfin/mach-bf548/ints-priority.c
13676
5330
/* * Copyright 2007-2008 Analog Devices Inc. * * Licensed under the GPL-2 or later. * * Set up the interrupt priorities */ #include <linux/module.h> #include <linux/irq.h> #include <asm/blackfin.h> void __init program_IAR(void) { /* Program the IAR0 Register with the configured priority */ bfin_write_SIC_IAR0(((CONFIG_IRQ_PLL_WAKEUP - 7) << IRQ_PLL_WAKEUP_POS) | ((CONFIG_IRQ_DMAC0_ERR - 7) << IRQ_DMAC0_ERR_POS) | ((CONFIG_IRQ_EPPI0_ERR - 7) << IRQ_EPPI0_ERR_POS) | ((CONFIG_IRQ_SPORT0_ERR - 7) << IRQ_SPORT0_ERR_POS) | ((CONFIG_IRQ_SPORT1_ERR - 7) << IRQ_SPORT1_ERR_POS) | ((CONFIG_IRQ_SPI0_ERR - 7) << IRQ_SPI0_ERR_POS) | ((CONFIG_IRQ_UART0_ERR - 7) << IRQ_UART0_ERR_POS) | ((CONFIG_IRQ_RTC - 7) << IRQ_RTC_POS)); bfin_write_SIC_IAR1(((CONFIG_IRQ_EPPI0 - 7) << IRQ_EPPI0_POS) | ((CONFIG_IRQ_SPORT0_RX - 7) << IRQ_SPORT0_RX_POS) | ((CONFIG_IRQ_SPORT0_TX - 7) << IRQ_SPORT0_TX_POS) | ((CONFIG_IRQ_SPORT1_RX - 7) << IRQ_SPORT1_RX_POS) | ((CONFIG_IRQ_SPORT1_TX - 7) << IRQ_SPORT1_TX_POS) | ((CONFIG_IRQ_SPI0 - 7) << IRQ_SPI0_POS) | ((CONFIG_IRQ_UART0_RX - 7) << IRQ_UART0_RX_POS) | ((CONFIG_IRQ_UART0_TX - 7) << IRQ_UART0_TX_POS)); bfin_write_SIC_IAR2(((CONFIG_IRQ_TIMER8 - 7) << IRQ_TIMER8_POS) | ((CONFIG_IRQ_TIMER9 - 7) << IRQ_TIMER9_POS) | ((CONFIG_IRQ_PINT0 - 7) << IRQ_PINT0_POS) | ((CONFIG_IRQ_PINT1 - 7) << IRQ_PINT1_POS) | ((CONFIG_IRQ_MDMAS0 - 7) << IRQ_MDMAS0_POS) | ((CONFIG_IRQ_MDMAS1 - 7) << IRQ_MDMAS1_POS) | ((CONFIG_IRQ_WATCHDOG - 7) << IRQ_WATCH_POS)); bfin_write_SIC_IAR3(((CONFIG_IRQ_DMAC1_ERR - 7) << IRQ_DMAC1_ERR_POS) | ((CONFIG_IRQ_SPORT2_ERR - 7) << IRQ_SPORT2_ERR_POS) | ((CONFIG_IRQ_SPORT3_ERR - 7) << IRQ_SPORT3_ERR_POS) | ((CONFIG_IRQ_MXVR_DATA - 7) << IRQ_MXVR_DATA_POS) | ((CONFIG_IRQ_SPI1_ERR - 7) << IRQ_SPI1_ERR_POS) | ((CONFIG_IRQ_SPI2_ERR - 7) << IRQ_SPI2_ERR_POS) | ((CONFIG_IRQ_UART1_ERR - 7) << IRQ_UART1_ERR_POS) | ((CONFIG_IRQ_UART2_ERR - 7) << IRQ_UART2_ERR_POS)); bfin_write_SIC_IAR4(((CONFIG_IRQ_CAN0_ERR - 7) << IRQ_CAN0_ERR_POS) | ((CONFIG_IRQ_SPORT2_RX - 7) << IRQ_SPORT2_RX_POS) | ((CONFIG_IRQ_SPORT2_TX - 7) << IRQ_SPORT2_TX_POS) | ((CONFIG_IRQ_SPORT3_RX - 7) << IRQ_SPORT3_RX_POS) | ((CONFIG_IRQ_SPORT3_TX - 7) << IRQ_SPORT3_TX_POS) | ((CONFIG_IRQ_EPPI1 - 7) << IRQ_EPPI1_POS) | ((CONFIG_IRQ_EPPI2 - 7) << IRQ_EPPI2_POS) | ((CONFIG_IRQ_SPI1 - 7) << IRQ_SPI1_POS)); bfin_write_SIC_IAR5(((CONFIG_IRQ_SPI2 - 7) << IRQ_SPI2_POS) | ((CONFIG_IRQ_UART1_RX - 7) << IRQ_UART1_RX_POS) | ((CONFIG_IRQ_UART1_TX - 7) << IRQ_UART1_TX_POS) | ((CONFIG_IRQ_ATAPI_RX - 7) << IRQ_ATAPI_RX_POS) | ((CONFIG_IRQ_ATAPI_TX - 7) << IRQ_ATAPI_TX_POS) | ((CONFIG_IRQ_TWI0 - 7) << IRQ_TWI0_POS) | ((CONFIG_IRQ_TWI1 - 7) << IRQ_TWI1_POS) | ((CONFIG_IRQ_CAN0_RX - 7) << IRQ_CAN0_RX_POS)); bfin_write_SIC_IAR6(((CONFIG_IRQ_CAN0_TX - 7) << IRQ_CAN0_TX_POS) | ((CONFIG_IRQ_MDMAS2 - 7) << IRQ_MDMAS2_POS) | ((CONFIG_IRQ_MDMAS3 - 7) << IRQ_MDMAS3_POS) | ((CONFIG_IRQ_MXVR_ERR - 7) << IRQ_MXVR_ERR_POS) | ((CONFIG_IRQ_MXVR_MSG - 7) << IRQ_MXVR_MSG_POS) | ((CONFIG_IRQ_MXVR_PKT - 7) << IRQ_MXVR_PKT_POS) | ((CONFIG_IRQ_EPPI1_ERR - 7) << IRQ_EPPI1_ERR_POS) | ((CONFIG_IRQ_EPPI2_ERR - 7) << IRQ_EPPI2_ERR_POS)); bfin_write_SIC_IAR7(((CONFIG_IRQ_UART3_ERR - 7) << IRQ_UART3_ERR_POS) | ((CONFIG_IRQ_HOST_ERR - 7) << IRQ_HOST_ERR_POS) | ((CONFIG_IRQ_PIXC_ERR - 7) << IRQ_PIXC_ERR_POS) | ((CONFIG_IRQ_NFC_ERR - 7) << IRQ_NFC_ERR_POS) | ((CONFIG_IRQ_ATAPI_ERR - 7) << IRQ_ATAPI_ERR_POS) | ((CONFIG_IRQ_CAN1_ERR - 7) << IRQ_CAN1_ERR_POS) | ((CONFIG_IRQ_HS_DMA_ERR - 7) << IRQ_HS_DMA_ERR_POS)); bfin_write_SIC_IAR8(((CONFIG_IRQ_PIXC_IN0 - 7) << IRQ_PIXC_IN1_POS) | ((CONFIG_IRQ_PIXC_IN1 - 7) << IRQ_PIXC_IN1_POS) | ((CONFIG_IRQ_PIXC_OUT - 7) << IRQ_PIXC_OUT_POS) | ((CONFIG_IRQ_SDH - 7) << IRQ_SDH_POS) | ((CONFIG_IRQ_CNT - 7) << IRQ_CNT_POS) | ((CONFIG_IRQ_KEY - 7) << IRQ_KEY_POS) | ((CONFIG_IRQ_CAN1_RX - 7) << IRQ_CAN1_RX_POS) | ((CONFIG_IRQ_CAN1_TX - 7) << IRQ_CAN1_TX_POS)); bfin_write_SIC_IAR9(((CONFIG_IRQ_SDH_MASK0 - 7) << IRQ_SDH_MASK0_POS) | ((CONFIG_IRQ_SDH_MASK1 - 7) << IRQ_SDH_MASK1_POS) | ((CONFIG_IRQ_USB_INT0 - 7) << IRQ_USB_INT0_POS) | ((CONFIG_IRQ_USB_INT1 - 7) << IRQ_USB_INT1_POS) | ((CONFIG_IRQ_USB_INT2 - 7) << IRQ_USB_INT2_POS) | ((CONFIG_IRQ_USB_DMA - 7) << IRQ_USB_DMA_POS) | ((CONFIG_IRQ_OTPSEC - 7) << IRQ_OTPSEC_POS)); bfin_write_SIC_IAR10(((CONFIG_IRQ_TIMER0 - 7) << IRQ_TIMER0_POS) | ((CONFIG_IRQ_TIMER1 - 7) << IRQ_TIMER1_POS)); bfin_write_SIC_IAR11(((CONFIG_IRQ_TIMER2 - 7) << IRQ_TIMER2_POS) | ((CONFIG_IRQ_TIMER3 - 7) << IRQ_TIMER3_POS) | ((CONFIG_IRQ_TIMER4 - 7) << IRQ_TIMER4_POS) | ((CONFIG_IRQ_TIMER5 - 7) << IRQ_TIMER5_POS) | ((CONFIG_IRQ_TIMER6 - 7) << IRQ_TIMER6_POS) | ((CONFIG_IRQ_TIMER7 - 7) << IRQ_TIMER7_POS) | ((CONFIG_IRQ_PINT2 - 7) << IRQ_PINT2_POS) | ((CONFIG_IRQ_PINT3 - 7) << IRQ_PINT3_POS)); SSYNC(); }
gpl-2.0
abusnooze/mint-v3.2-psp26
arch/blackfin/mach-bf548/ints-priority.c
13676
5330
/* * Copyright 2007-2008 Analog Devices Inc. * * Licensed under the GPL-2 or later. * * Set up the interrupt priorities */ #include <linux/module.h> #include <linux/irq.h> #include <asm/blackfin.h> void __init program_IAR(void) { /* Program the IAR0 Register with the configured priority */ bfin_write_SIC_IAR0(((CONFIG_IRQ_PLL_WAKEUP - 7) << IRQ_PLL_WAKEUP_POS) | ((CONFIG_IRQ_DMAC0_ERR - 7) << IRQ_DMAC0_ERR_POS) | ((CONFIG_IRQ_EPPI0_ERR - 7) << IRQ_EPPI0_ERR_POS) | ((CONFIG_IRQ_SPORT0_ERR - 7) << IRQ_SPORT0_ERR_POS) | ((CONFIG_IRQ_SPORT1_ERR - 7) << IRQ_SPORT1_ERR_POS) | ((CONFIG_IRQ_SPI0_ERR - 7) << IRQ_SPI0_ERR_POS) | ((CONFIG_IRQ_UART0_ERR - 7) << IRQ_UART0_ERR_POS) | ((CONFIG_IRQ_RTC - 7) << IRQ_RTC_POS)); bfin_write_SIC_IAR1(((CONFIG_IRQ_EPPI0 - 7) << IRQ_EPPI0_POS) | ((CONFIG_IRQ_SPORT0_RX - 7) << IRQ_SPORT0_RX_POS) | ((CONFIG_IRQ_SPORT0_TX - 7) << IRQ_SPORT0_TX_POS) | ((CONFIG_IRQ_SPORT1_RX - 7) << IRQ_SPORT1_RX_POS) | ((CONFIG_IRQ_SPORT1_TX - 7) << IRQ_SPORT1_TX_POS) | ((CONFIG_IRQ_SPI0 - 7) << IRQ_SPI0_POS) | ((CONFIG_IRQ_UART0_RX - 7) << IRQ_UART0_RX_POS) | ((CONFIG_IRQ_UART0_TX - 7) << IRQ_UART0_TX_POS)); bfin_write_SIC_IAR2(((CONFIG_IRQ_TIMER8 - 7) << IRQ_TIMER8_POS) | ((CONFIG_IRQ_TIMER9 - 7) << IRQ_TIMER9_POS) | ((CONFIG_IRQ_PINT0 - 7) << IRQ_PINT0_POS) | ((CONFIG_IRQ_PINT1 - 7) << IRQ_PINT1_POS) | ((CONFIG_IRQ_MDMAS0 - 7) << IRQ_MDMAS0_POS) | ((CONFIG_IRQ_MDMAS1 - 7) << IRQ_MDMAS1_POS) | ((CONFIG_IRQ_WATCHDOG - 7) << IRQ_WATCH_POS)); bfin_write_SIC_IAR3(((CONFIG_IRQ_DMAC1_ERR - 7) << IRQ_DMAC1_ERR_POS) | ((CONFIG_IRQ_SPORT2_ERR - 7) << IRQ_SPORT2_ERR_POS) | ((CONFIG_IRQ_SPORT3_ERR - 7) << IRQ_SPORT3_ERR_POS) | ((CONFIG_IRQ_MXVR_DATA - 7) << IRQ_MXVR_DATA_POS) | ((CONFIG_IRQ_SPI1_ERR - 7) << IRQ_SPI1_ERR_POS) | ((CONFIG_IRQ_SPI2_ERR - 7) << IRQ_SPI2_ERR_POS) | ((CONFIG_IRQ_UART1_ERR - 7) << IRQ_UART1_ERR_POS) | ((CONFIG_IRQ_UART2_ERR - 7) << IRQ_UART2_ERR_POS)); bfin_write_SIC_IAR4(((CONFIG_IRQ_CAN0_ERR - 7) << IRQ_CAN0_ERR_POS) | ((CONFIG_IRQ_SPORT2_RX - 7) << IRQ_SPORT2_RX_POS) | ((CONFIG_IRQ_SPORT2_TX - 7) << IRQ_SPORT2_TX_POS) | ((CONFIG_IRQ_SPORT3_RX - 7) << IRQ_SPORT3_RX_POS) | ((CONFIG_IRQ_SPORT3_TX - 7) << IRQ_SPORT3_TX_POS) | ((CONFIG_IRQ_EPPI1 - 7) << IRQ_EPPI1_POS) | ((CONFIG_IRQ_EPPI2 - 7) << IRQ_EPPI2_POS) | ((CONFIG_IRQ_SPI1 - 7) << IRQ_SPI1_POS)); bfin_write_SIC_IAR5(((CONFIG_IRQ_SPI2 - 7) << IRQ_SPI2_POS) | ((CONFIG_IRQ_UART1_RX - 7) << IRQ_UART1_RX_POS) | ((CONFIG_IRQ_UART1_TX - 7) << IRQ_UART1_TX_POS) | ((CONFIG_IRQ_ATAPI_RX - 7) << IRQ_ATAPI_RX_POS) | ((CONFIG_IRQ_ATAPI_TX - 7) << IRQ_ATAPI_TX_POS) | ((CONFIG_IRQ_TWI0 - 7) << IRQ_TWI0_POS) | ((CONFIG_IRQ_TWI1 - 7) << IRQ_TWI1_POS) | ((CONFIG_IRQ_CAN0_RX - 7) << IRQ_CAN0_RX_POS)); bfin_write_SIC_IAR6(((CONFIG_IRQ_CAN0_TX - 7) << IRQ_CAN0_TX_POS) | ((CONFIG_IRQ_MDMAS2 - 7) << IRQ_MDMAS2_POS) | ((CONFIG_IRQ_MDMAS3 - 7) << IRQ_MDMAS3_POS) | ((CONFIG_IRQ_MXVR_ERR - 7) << IRQ_MXVR_ERR_POS) | ((CONFIG_IRQ_MXVR_MSG - 7) << IRQ_MXVR_MSG_POS) | ((CONFIG_IRQ_MXVR_PKT - 7) << IRQ_MXVR_PKT_POS) | ((CONFIG_IRQ_EPPI1_ERR - 7) << IRQ_EPPI1_ERR_POS) | ((CONFIG_IRQ_EPPI2_ERR - 7) << IRQ_EPPI2_ERR_POS)); bfin_write_SIC_IAR7(((CONFIG_IRQ_UART3_ERR - 7) << IRQ_UART3_ERR_POS) | ((CONFIG_IRQ_HOST_ERR - 7) << IRQ_HOST_ERR_POS) | ((CONFIG_IRQ_PIXC_ERR - 7) << IRQ_PIXC_ERR_POS) | ((CONFIG_IRQ_NFC_ERR - 7) << IRQ_NFC_ERR_POS) | ((CONFIG_IRQ_ATAPI_ERR - 7) << IRQ_ATAPI_ERR_POS) | ((CONFIG_IRQ_CAN1_ERR - 7) << IRQ_CAN1_ERR_POS) | ((CONFIG_IRQ_HS_DMA_ERR - 7) << IRQ_HS_DMA_ERR_POS)); bfin_write_SIC_IAR8(((CONFIG_IRQ_PIXC_IN0 - 7) << IRQ_PIXC_IN1_POS) | ((CONFIG_IRQ_PIXC_IN1 - 7) << IRQ_PIXC_IN1_POS) | ((CONFIG_IRQ_PIXC_OUT - 7) << IRQ_PIXC_OUT_POS) | ((CONFIG_IRQ_SDH - 7) << IRQ_SDH_POS) | ((CONFIG_IRQ_CNT - 7) << IRQ_CNT_POS) | ((CONFIG_IRQ_KEY - 7) << IRQ_KEY_POS) | ((CONFIG_IRQ_CAN1_RX - 7) << IRQ_CAN1_RX_POS) | ((CONFIG_IRQ_CAN1_TX - 7) << IRQ_CAN1_TX_POS)); bfin_write_SIC_IAR9(((CONFIG_IRQ_SDH_MASK0 - 7) << IRQ_SDH_MASK0_POS) | ((CONFIG_IRQ_SDH_MASK1 - 7) << IRQ_SDH_MASK1_POS) | ((CONFIG_IRQ_USB_INT0 - 7) << IRQ_USB_INT0_POS) | ((CONFIG_IRQ_USB_INT1 - 7) << IRQ_USB_INT1_POS) | ((CONFIG_IRQ_USB_INT2 - 7) << IRQ_USB_INT2_POS) | ((CONFIG_IRQ_USB_DMA - 7) << IRQ_USB_DMA_POS) | ((CONFIG_IRQ_OTPSEC - 7) << IRQ_OTPSEC_POS)); bfin_write_SIC_IAR10(((CONFIG_IRQ_TIMER0 - 7) << IRQ_TIMER0_POS) | ((CONFIG_IRQ_TIMER1 - 7) << IRQ_TIMER1_POS)); bfin_write_SIC_IAR11(((CONFIG_IRQ_TIMER2 - 7) << IRQ_TIMER2_POS) | ((CONFIG_IRQ_TIMER3 - 7) << IRQ_TIMER3_POS) | ((CONFIG_IRQ_TIMER4 - 7) << IRQ_TIMER4_POS) | ((CONFIG_IRQ_TIMER5 - 7) << IRQ_TIMER5_POS) | ((CONFIG_IRQ_TIMER6 - 7) << IRQ_TIMER6_POS) | ((CONFIG_IRQ_TIMER7 - 7) << IRQ_TIMER7_POS) | ((CONFIG_IRQ_PINT2 - 7) << IRQ_PINT2_POS) | ((CONFIG_IRQ_PINT3 - 7) << IRQ_PINT3_POS)); SSYNC(); }
gpl-2.0
Xperia-P/android_kernel_sony_u8500
lib/reed_solomon/decode_rs.c
14444
6959
/* * lib/reed_solomon/decode_rs.c * * Overview: * Generic Reed Solomon encoder / decoder library * * Copyright 2002, Phil Karn, KA9Q * May be used under the terms of the GNU General Public License (GPL) * * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) * * $Id: decode_rs.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $ * */ /* Generic data width independent code which is included by the * wrappers. */ { int deg_lambda, el, deg_omega; int i, j, r, k, pad; int nn = rs->nn; int nroots = rs->nroots; int fcr = rs->fcr; int prim = rs->prim; int iprim = rs->iprim; uint16_t *alpha_to = rs->alpha_to; uint16_t *index_of = rs->index_of; uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error; /* Err+Eras Locator poly and syndrome poly The maximum value * of nroots is 8. So the necessary stack size will be about * 220 bytes max. */ uint16_t lambda[nroots + 1], syn[nroots]; uint16_t b[nroots + 1], t[nroots + 1], omega[nroots + 1]; uint16_t root[nroots], reg[nroots + 1], loc[nroots]; int count = 0; uint16_t msk = (uint16_t) rs->nn; /* Check length parameter for validity */ pad = nn - nroots - len; BUG_ON(pad < 0 || pad >= nn); /* Does the caller provide the syndrome ? */ if (s != NULL) goto decode; /* form the syndromes; i.e., evaluate data(x) at roots of * g(x) */ for (i = 0; i < nroots; i++) syn[i] = (((uint16_t) data[0]) ^ invmsk) & msk; for (j = 1; j < len; j++) { for (i = 0; i < nroots; i++) { if (syn[i] == 0) { syn[i] = (((uint16_t) data[j]) ^ invmsk) & msk; } else { syn[i] = ((((uint16_t) data[j]) ^ invmsk) & msk) ^ alpha_to[rs_modnn(rs, index_of[syn[i]] + (fcr + i) * prim)]; } } } for (j = 0; j < nroots; j++) { for (i = 0; i < nroots; i++) { if (syn[i] == 0) { syn[i] = ((uint16_t) par[j]) & msk; } else { syn[i] = (((uint16_t) par[j]) & msk) ^ alpha_to[rs_modnn(rs, index_of[syn[i]] + (fcr+i)*prim)]; } } } s = syn; /* Convert syndromes to index form, checking for nonzero condition */ syn_error = 0; for (i = 0; i < nroots; i++) { syn_error |= s[i]; s[i] = index_of[s[i]]; } if (!syn_error) { /* if syndrome is zero, data[] is a codeword and there are no * errors to correct. So return data[] unmodified */ count = 0; goto finish; } decode: memset(&lambda[1], 0, nroots * sizeof(lambda[0])); lambda[0] = 1; if (no_eras > 0) { /* Init lambda to be the erasure locator polynomial */ lambda[1] = alpha_to[rs_modnn(rs, prim * (nn - 1 - eras_pos[0]))]; for (i = 1; i < no_eras; i++) { u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i])); for (j = i + 1; j > 0; j--) { tmp = index_of[lambda[j - 1]]; if (tmp != nn) { lambda[j] ^= alpha_to[rs_modnn(rs, u + tmp)]; } } } } for (i = 0; i < nroots + 1; i++) b[i] = index_of[lambda[i]]; /* * Begin Berlekamp-Massey algorithm to determine error+erasure * locator polynomial */ r = no_eras; el = no_eras; while (++r <= nroots) { /* r is the step number */ /* Compute discrepancy at the r-th step in poly-form */ discr_r = 0; for (i = 0; i < r; i++) { if ((lambda[i] != 0) && (s[r - i - 1] != nn)) { discr_r ^= alpha_to[rs_modnn(rs, index_of[lambda[i]] + s[r - i - 1])]; } } discr_r = index_of[discr_r]; /* Index form */ if (discr_r == nn) { /* 2 lines below: B(x) <-- x*B(x) */ memmove (&b[1], b, nroots * sizeof (b[0])); b[0] = nn; } else { /* 7 lines below: T(x) <-- lambda(x)-discr_r*x*b(x) */ t[0] = lambda[0]; for (i = 0; i < nroots; i++) { if (b[i] != nn) { t[i + 1] = lambda[i + 1] ^ alpha_to[rs_modnn(rs, discr_r + b[i])]; } else t[i + 1] = lambda[i + 1]; } if (2 * el <= r + no_eras - 1) { el = r + no_eras - el; /* * 2 lines below: B(x) <-- inv(discr_r) * * lambda(x) */ for (i = 0; i <= nroots; i++) { b[i] = (lambda[i] == 0) ? nn : rs_modnn(rs, index_of[lambda[i]] - discr_r + nn); } } else { /* 2 lines below: B(x) <-- x*B(x) */ memmove(&b[1], b, nroots * sizeof(b[0])); b[0] = nn; } memcpy(lambda, t, (nroots + 1) * sizeof(t[0])); } } /* Convert lambda to index form and compute deg(lambda(x)) */ deg_lambda = 0; for (i = 0; i < nroots + 1; i++) { lambda[i] = index_of[lambda[i]]; if (lambda[i] != nn) deg_lambda = i; } /* Find roots of error+erasure locator polynomial by Chien search */ memcpy(&reg[1], &lambda[1], nroots * sizeof(reg[0])); count = 0; /* Number of roots of lambda(x) */ for (i = 1, k = iprim - 1; i <= nn; i++, k = rs_modnn(rs, k + iprim)) { q = 1; /* lambda[0] is always 0 */ for (j = deg_lambda; j > 0; j--) { if (reg[j] != nn) { reg[j] = rs_modnn(rs, reg[j] + j); q ^= alpha_to[reg[j]]; } } if (q != 0) continue; /* Not a root */ /* store root (index-form) and error location number */ root[count] = i; loc[count] = k; /* If we've already found max possible roots, * abort the search to save time */ if (++count == deg_lambda) break; } if (deg_lambda != count) { /* * deg(lambda) unequal to number of roots => uncorrectable * error detected */ count = -EBADMSG; goto finish; } /* * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo * x**nroots). in index form. Also find deg(omega). */ deg_omega = deg_lambda - 1; for (i = 0; i <= deg_omega; i++) { tmp = 0; for (j = i; j >= 0; j--) { if ((s[i - j] != nn) && (lambda[j] != nn)) tmp ^= alpha_to[rs_modnn(rs, s[i - j] + lambda[j])]; } omega[i] = index_of[tmp]; } /* * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 = * inv(X(l))**(fcr-1) and den = lambda_pr(inv(X(l))) all in poly-form */ for (j = count - 1; j >= 0; j--) { num1 = 0; for (i = deg_omega; i >= 0; i--) { if (omega[i] != nn) num1 ^= alpha_to[rs_modnn(rs, omega[i] + i * root[j])]; } num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)]; den = 0; /* lambda[i+1] for i even is the formal derivative * lambda_pr of lambda[i] */ for (i = min(deg_lambda, nroots - 1) & ~1; i >= 0; i -= 2) { if (lambda[i + 1] != nn) { den ^= alpha_to[rs_modnn(rs, lambda[i + 1] + i * root[j])]; } } /* Apply error to data */ if (num1 != 0 && loc[j] >= pad) { uint16_t cor = alpha_to[rs_modnn(rs,index_of[num1] + index_of[num2] + nn - index_of[den])]; /* Store the error correction pattern, if a * correction buffer is available */ if (corr) { corr[j] = cor; } else { /* If a data buffer is given and the * error is inside the message, * correct it */ if (data && (loc[j] < (nn - nroots))) data[loc[j] - pad] ^= cor; } } } finish: if (eras_pos != NULL) { for (i = 0; i < count; i++) eras_pos[i] = loc[i] - pad; } return count; }
gpl-2.0
motley-git/Kernel-GT-P73xx-v2
drivers/usb/core/quirks.c
621
4324
/* * USB device quirk handling logic and table * * Copyright (c) 2007 Oliver Neukum * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation, version 2. * * */ #include <linux/usb.h> #include <linux/usb/quirks.h> #include "usb.h" /* List of quirky USB devices. Please keep this list ordered by: * 1) Vendor ID * 2) Product ID * 3) Class ID * * as we want specific devices to be overridden first, and only after that, any * class specific quirks. * * Right now the logic aborts if it finds a valid device in the table, we might * want to change that in the future if it turns out that a whole class of * devices is broken... */ static const struct usb_device_id usb_quirk_list[] = { /* CBM - Flash disk */ { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, /* HP 5300/5370C scanner */ { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 }, /* Creative SB Audigy 2 NX */ { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, /* Logitech Harmony 700-series */ { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT }, /* Philips PSC805 audio device */ { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, /* Artisman Watchdog Dongle */ { USB_DEVICE(0x04b4, 0x0526), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, /* Roland SC-8820 */ { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, /* Edirol SD-20 */ { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, /* appletouch */ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, /* Avision AV600U */ { USB_DEVICE(0x0638, 0x0a13), .driver_info = USB_QUIRK_STRING_FETCH_255 }, /* Saitek Cyborg Gold Joystick */ { USB_DEVICE(0x06a3, 0x0006), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, /* M-Systems Flash Disk Pioneers */ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, /* Broadcom BCM92035DGROM BT dongle */ { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME }, /* Action Semiconductor flash disk */ { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255 }, /* SKYMEDI USB_DRIVE */ { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME }, /* BUILDWIN Photo Frame */ { USB_DEVICE(0x1908, 0x1315), .driver_info = USB_QUIRK_HONOR_BNUMINTERFACES }, /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, { } /* terminating entry must be last */ }; static const struct usb_device_id *find_id(struct usb_device *udev) { const struct usb_device_id *id = usb_quirk_list; for (; id->idVendor || id->bDeviceClass || id->bInterfaceClass || id->driver_info; id++) { if (usb_match_device(udev, id)) return id; } return NULL; } /* * Detect any quirks the device has, and do any housekeeping for it if needed. */ void usb_detect_quirks(struct usb_device *udev) { const struct usb_device_id *id = usb_quirk_list; id = find_id(udev); if (id) udev->quirks = (u32)(id->driver_info); if (udev->quirks) dev_dbg(&udev->dev, "USB quirks for this device: %x\n", udev->quirks); #ifdef CONFIG_USB_SUSPEND /* By default, disable autosuspend for all devices. The hub driver * will enable it for hubs. */ usb_disable_autosuspend(udev); /* Autosuspend can also be disabled if the initial autosuspend_delay * is negative. */ if (udev->autosuspend_delay < 0) usb_autoresume_device(udev); #endif /* For the present, all devices default to USB-PERSIST enabled */ #if 0 /* was: #ifdef CONFIG_PM */ /* Hubs are automatically enabled for USB-PERSIST */ if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) udev->persist_enabled = 1; #else /* In the absence of PM, we can safely enable USB-PERSIST * for all devices. It will affect things like hub resets * and EMF-related port disables. */ if (!(udev->quirks & USB_QUIRK_RESET_MORPHS)) udev->persist_enabled = 1; #endif /* CONFIG_PM */ }
gpl-2.0
akw28888/msm
drivers/net/wireless/bcmdhd/bcmwifi.c
877
17306
/* * Misc utility routines used by kernel or app-level. * Contents are wifi-specific, used by any kernel or app-level * software that might want wifi things as it grows. * * Copyright (C) 1999-2012, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * $Id: bcmwifi.c 309193 2012-01-19 00:03:57Z $ */ #include <bcm_cfg.h> #include <typedefs.h> #ifdef BCMDRIVER #include <osl.h> #include <bcmutils.h> #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) #define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) #else #include <stdio.h> #include <stdlib.h> #include <ctype.h> #ifndef ASSERT #define ASSERT(exp) #endif #endif #include <bcmwifi.h> #if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL)) #include <bcmstdlib.h> #endif #ifndef D11AC_IOTYPES char * wf_chspec_ntoa(chanspec_t chspec, char *buf) { const char *band, *bw, *sb; uint channel; band = ""; bw = ""; sb = ""; channel = CHSPEC_CHANNEL(chspec); if ((CHSPEC_IS2G(chspec) && channel > CH_MAX_2G_CHANNEL) || (CHSPEC_IS5G(chspec) && channel <= CH_MAX_2G_CHANNEL)) band = (CHSPEC_IS2G(chspec)) ? "b" : "a"; if (CHSPEC_IS40(chspec)) { if (CHSPEC_SB_UPPER(chspec)) { sb = "u"; channel += CH_10MHZ_APART; } else { sb = "l"; channel -= CH_10MHZ_APART; } } else if (CHSPEC_IS10(chspec)) { bw = "n"; } snprintf(buf, 6, "%d%s%s%s", channel, band, bw, sb); return (buf); } chanspec_t wf_chspec_aton(const char *a) { char *endp = NULL; uint channel, band, bw, ctl_sb; char c; channel = strtoul(a, &endp, 10); if (endp == a) return 0; if (channel > MAXCHANNEL) return 0; band = ((channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G); bw = WL_CHANSPEC_BW_20; ctl_sb = WL_CHANSPEC_CTL_SB_NONE; a = endp; c = tolower(a[0]); if (c == '\0') goto done; if (c == 'a' || c == 'b') { band = (c == 'a') ? WL_CHANSPEC_BAND_5G : WL_CHANSPEC_BAND_2G; a++; c = tolower(a[0]); if (c == '\0') goto done; } if (c == 'n') { bw = WL_CHANSPEC_BW_10; } else if (c == 'l') { bw = WL_CHANSPEC_BW_40; ctl_sb = WL_CHANSPEC_CTL_SB_LOWER; if (channel <= (MAXCHANNEL - CH_20MHZ_APART)) channel += CH_10MHZ_APART; else return 0; } else if (c == 'u') { bw = WL_CHANSPEC_BW_40; ctl_sb = WL_CHANSPEC_CTL_SB_UPPER; if (channel > CH_20MHZ_APART) channel -= CH_10MHZ_APART; else return 0; } else { return 0; } done: return (channel | band | bw | ctl_sb); } bool wf_chspec_malformed(chanspec_t chanspec) { if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec)) return TRUE; if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec)) return TRUE; if (CHSPEC_IS20(chanspec)) { if (!CHSPEC_SB_NONE(chanspec)) return TRUE; } else { if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec)) return TRUE; } return FALSE; } uint8 wf_chspec_ctlchan(chanspec_t chspec) { uint8 ctl_chan; if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) { return CHSPEC_CHANNEL(chspec); } else { ASSERT(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_40); if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) { ctl_chan = UPPER_20_SB(CHSPEC_CHANNEL(chspec)); } else { ASSERT(CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_LOWER); ctl_chan = LOWER_20_SB(CHSPEC_CHANNEL(chspec)); } } return ctl_chan; } chanspec_t wf_chspec_ctlchspec(chanspec_t chspec) { chanspec_t ctl_chspec = 0; uint8 channel; ASSERT(!wf_chspec_malformed(chspec)); if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) { return chspec; } else { if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) { channel = UPPER_20_SB(CHSPEC_CHANNEL(chspec)); } else { channel = LOWER_20_SB(CHSPEC_CHANNEL(chspec)); } ctl_chspec = channel | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE; ctl_chspec |= CHSPEC_BAND(chspec); } return ctl_chspec; } #else static const char *wf_chspec_bw_str[] = { "5", "10", "20", "40", "80", "160", "80+80", "na" }; static const uint8 wf_chspec_bw_mhz[] = {5, 10, 20, 40, 80, 160, 160}; #define WF_NUM_BW \ (sizeof(wf_chspec_bw_mhz)/sizeof(uint8)) static const uint8 wf_5g_40m_chans[] = {38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159}; #define WF_NUM_5G_40M_CHANS \ (sizeof(wf_5g_40m_chans)/sizeof(uint8)) static const uint8 wf_5g_80m_chans[] = {42, 58, 106, 122, 138, 155}; #define WF_NUM_5G_80M_CHANS \ (sizeof(wf_5g_80m_chans)/sizeof(uint8)) static const uint8 wf_5g_160m_chans[] = {50, 114}; #define WF_NUM_5G_160M_CHANS \ (sizeof(wf_5g_160m_chans)/sizeof(uint8)) static uint bw_chspec_to_mhz(chanspec_t chspec) { uint bw; bw = (chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT; return (bw >= WF_NUM_BW ? 0 : wf_chspec_bw_mhz[bw]); } static uint8 center_chan_to_edge(uint bw) { return (uint8)(((bw - 20) / 2) / 5); } static uint8 channel_low_edge(uint center_ch, uint bw) { return (uint8)(center_ch - center_chan_to_edge(bw)); } static int channel_to_sb(uint center_ch, uint ctl_ch, uint bw) { uint lowest = channel_low_edge(center_ch, bw); uint sb; if ((ctl_ch - lowest) % 4) { return -1; } sb = ((ctl_ch - lowest) / 4); if (sb >= (bw / 20)) { return -1; } return sb; } static uint8 channel_to_ctl_chan(uint center_ch, uint bw, uint sb) { return (uint8)(channel_low_edge(center_ch, bw) + sb * 4); } static int channel_80mhz_to_id(uint ch) { uint i; for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) { if (ch == wf_5g_80m_chans[i]) return i; } return -1; } char * wf_chspec_ntoa(chanspec_t chspec, char *buf) { const char *band; uint ctl_chan; if (wf_chspec_malformed(chspec)) return NULL; band = ""; if ((CHSPEC_IS2G(chspec) && CHSPEC_CHANNEL(chspec) > CH_MAX_2G_CHANNEL) || (CHSPEC_IS5G(chspec) && CHSPEC_CHANNEL(chspec) <= CH_MAX_2G_CHANNEL)) band = (CHSPEC_IS2G(chspec)) ? "2g" : "5g"; ctl_chan = wf_chspec_ctlchan(chspec); if (CHSPEC_IS20(chspec)) { snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, ctl_chan); } else if (!CHSPEC_IS8080(chspec)) { const char *bw; const char *sb = ""; bw = wf_chspec_bw_str[(chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT]; #ifdef CHANSPEC_NEW_40MHZ_FORMAT if (CHSPEC_IS40(chspec) && CHSPEC_IS2G(chspec)) { sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l"; } snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, ctl_chan, bw, sb); #else if (CHSPEC_IS40(chspec)) { sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l"; snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, ctl_chan, sb); } else { snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, ctl_chan, bw); } #endif } else { uint chan1 = (chspec & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT; uint chan2 = (chspec & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT; chan1 = (chan1 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan1] : 0; chan2 = (chan2 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan2] : 0; snprintf(buf, CHANSPEC_STR_LEN, "%d/80+80/%d-%d", ctl_chan, chan1, chan2); } return (buf); } static int read_uint(const char **p, unsigned int *num) { unsigned long val; char *endp = NULL; val = strtoul(*p, &endp, 10); if (endp == *p) return 0; *p = endp; *num = (unsigned int)val; return 1; } chanspec_t wf_chspec_aton(const char *a) { chanspec_t chspec; uint chspec_ch, chspec_band, bw, chspec_bw, chspec_sb; uint num, ctl_ch; uint ch1, ch2; char c, sb_ul = '\0'; int i; bw = 20; chspec_sb = 0; chspec_ch = ch1 = ch2 = 0; if (!read_uint(&a, &num)) return 0; c = tolower(a[0]); if (c == 'g') { a ++; if (num == 2) chspec_band = WL_CHANSPEC_BAND_2G; else if (num == 5) chspec_band = WL_CHANSPEC_BAND_5G; else return 0; if (!read_uint(&a, &ctl_ch)) return 0; c = tolower(a[0]); } else { ctl_ch = num; chspec_band = ((ctl_ch <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G); } if (c == '\0') { chspec_bw = WL_CHANSPEC_BW_20; goto done_read; } a ++; if (c == 'u' || c == 'l') { sb_ul = c; chspec_bw = WL_CHANSPEC_BW_40; goto done_read; } if (c != '/') return 0; if (!read_uint(&a, &bw)) return 0; if (bw == 20) { chspec_bw = WL_CHANSPEC_BW_20; } else if (bw == 40) { chspec_bw = WL_CHANSPEC_BW_40; } else if (bw == 80) { chspec_bw = WL_CHANSPEC_BW_80; } else if (bw == 160) { chspec_bw = WL_CHANSPEC_BW_160; } else { return 0; } c = tolower(a[0]); if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) { if (c == 'u' || c == 'l') { a ++; sb_ul = c; goto done_read; } } if (c == '+') { static const char *plus80 = "80/"; chspec_bw = WL_CHANSPEC_BW_8080; a ++; for (i = 0; i < 3; i++) { if (*a++ != *plus80++) { return 0; } } if (!read_uint(&a, &ch1)) return 0; if (a[0] != '-') return 0; a ++; if (!read_uint(&a, &ch2)) return 0; } done_read: while (a[0] == ' ') { a ++; } if (a[0] != '\0') return 0; if (sb_ul != '\0') { if (sb_ul == 'l') { chspec_ch = UPPER_20_SB(ctl_ch); chspec_sb = WL_CHANSPEC_CTL_SB_LLL; } else if (sb_ul == 'u') { chspec_ch = LOWER_20_SB(ctl_ch); chspec_sb = WL_CHANSPEC_CTL_SB_LLU; } } else if (chspec_bw == WL_CHANSPEC_BW_20) { chspec_ch = ctl_ch; chspec_sb = 0; } else if (chspec_bw != WL_CHANSPEC_BW_8080) { const uint8 *center_ch = NULL; int num_ch = 0; int sb = -1; if (chspec_bw == WL_CHANSPEC_BW_40) { center_ch = wf_5g_40m_chans; num_ch = WF_NUM_5G_40M_CHANS; } else if (chspec_bw == WL_CHANSPEC_BW_80) { center_ch = wf_5g_80m_chans; num_ch = WF_NUM_5G_80M_CHANS; } else if (chspec_bw == WL_CHANSPEC_BW_160) { center_ch = wf_5g_160m_chans; num_ch = WF_NUM_5G_160M_CHANS; } else { return 0; } for (i = 0; i < num_ch; i ++) { sb = channel_to_sb(center_ch[i], ctl_ch, bw); if (sb >= 0) { chspec_ch = center_ch[i]; chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT; break; } } if (sb < 0) { return 0; } } else { int ch1_id = 0, ch2_id = 0; int sb; ch1_id = channel_80mhz_to_id(ch1); ch2_id = channel_80mhz_to_id(ch2); if (ch1 >= ch2 || ch1_id < 0 || ch2_id < 0) return 0; chspec_ch = (((uint16)ch1_id << WL_CHANSPEC_CHAN1_SHIFT) | ((uint16)ch2_id << WL_CHANSPEC_CHAN2_SHIFT)); sb = channel_to_sb(ch1, ctl_ch, bw); if (sb < 0) { sb = channel_to_sb(ch2, ctl_ch, bw); if (sb < 0) { return 0; } sb += 4; } chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT; } chspec = (chspec_ch | chspec_band | chspec_bw | chspec_sb); if (wf_chspec_malformed(chspec)) return 0; return chspec; } bool wf_chspec_malformed(chanspec_t chanspec) { uint chspec_bw = CHSPEC_BW(chanspec); uint chspec_ch = CHSPEC_CHANNEL(chanspec); if (CHSPEC_IS2G(chanspec)) { if (chspec_bw != WL_CHANSPEC_BW_20 && chspec_bw != WL_CHANSPEC_BW_40) { return TRUE; } } else if (CHSPEC_IS5G(chanspec)) { if (chspec_bw == WL_CHANSPEC_BW_8080) { uint ch1_id, ch2_id; ch1_id = CHSPEC_CHAN1(chanspec); ch2_id = CHSPEC_CHAN2(chanspec); if (ch1_id >= WF_NUM_5G_80M_CHANS || ch2_id >= WF_NUM_5G_80M_CHANS) return TRUE; if (ch2_id <= ch1_id) return TRUE; } else if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40 || chspec_bw == WL_CHANSPEC_BW_80 || chspec_bw == WL_CHANSPEC_BW_160) { if (chspec_ch > MAXCHANNEL) { return TRUE; } } else { return TRUE; } } else { return TRUE; } if (chspec_bw == WL_CHANSPEC_BW_20) { if (CHSPEC_CTL_SB(chanspec) != WL_CHANSPEC_CTL_SB_LLL) return TRUE; } else if (chspec_bw == WL_CHANSPEC_BW_40) { if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LLU) return TRUE; } else if (chspec_bw == WL_CHANSPEC_BW_80) { if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LUU) return TRUE; } return FALSE; } bool wf_chspec_valid(chanspec_t chanspec) { uint chspec_bw = CHSPEC_BW(chanspec); uint chspec_ch = CHSPEC_CHANNEL(chanspec); if (wf_chspec_malformed(chanspec)) return FALSE; if (CHSPEC_IS2G(chanspec)) { if (chspec_bw == WL_CHANSPEC_BW_20) { if (chspec_ch >= 1 && chspec_ch <= 14) return TRUE; } else if (chspec_bw == WL_CHANSPEC_BW_40) { if (chspec_ch >= 3 && chspec_ch <= 11) return TRUE; } } else if (CHSPEC_IS5G(chanspec)) { if (chspec_bw == WL_CHANSPEC_BW_8080) { uint16 ch1, ch2; ch1 = wf_5g_80m_chans[CHSPEC_CHAN1(chanspec)]; ch2 = wf_5g_80m_chans[CHSPEC_CHAN2(chanspec)]; if (ch2 > ch1 + CH_80MHZ_APART) return TRUE; } else { const uint8 *center_ch; uint num_ch, i; if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40) { center_ch = wf_5g_40m_chans; num_ch = WF_NUM_5G_40M_CHANS; } else if (chspec_bw == WL_CHANSPEC_BW_80) { center_ch = wf_5g_80m_chans; num_ch = WF_NUM_5G_80M_CHANS; } else if (chspec_bw == WL_CHANSPEC_BW_160) { center_ch = wf_5g_160m_chans; num_ch = WF_NUM_5G_160M_CHANS; } else { return FALSE; } if (chspec_bw == WL_CHANSPEC_BW_20) { for (i = 0; i < num_ch; i ++) { if (chspec_ch == (uint)LOWER_20_SB(center_ch[i]) || chspec_ch == (uint)UPPER_20_SB(center_ch[i])) break; } if (i == num_ch) { if (chspec_ch == 34 || chspec_ch == 38 || chspec_ch == 42 || chspec_ch == 46) i = 0; } } else { for (i = 0; i < num_ch; i ++) { if (chspec_ch == center_ch[i]) break; } } if (i < num_ch) { return TRUE; } } } return FALSE; } uint8 wf_chspec_ctlchan(chanspec_t chspec) { uint center_chan; uint bw_mhz; uint sb; ASSERT(!wf_chspec_malformed(chspec)); if (CHSPEC_IS20(chspec)) { return CHSPEC_CHANNEL(chspec); } else { sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT; if (CHSPEC_IS8080(chspec)) { bw_mhz = 80; if (sb < 4) { center_chan = CHSPEC_CHAN1(chspec); } else { center_chan = CHSPEC_CHAN2(chspec); sb -= 4; } center_chan = wf_5g_80m_chans[center_chan]; } else { bw_mhz = bw_chspec_to_mhz(chspec); center_chan = CHSPEC_CHANNEL(chspec) >> WL_CHANSPEC_CHAN_SHIFT; } return (channel_to_ctl_chan(center_chan, bw_mhz, sb)); } } chanspec_t wf_chspec_ctlchspec(chanspec_t chspec) { chanspec_t ctl_chspec = chspec; uint8 ctl_chan; ASSERT(!wf_chspec_malformed(chspec)); if (!CHSPEC_IS20(chspec)) { ctl_chan = wf_chspec_ctlchan(chspec); ctl_chspec = ctl_chan | WL_CHANSPEC_BW_20; ctl_chspec |= CHSPEC_BAND(chspec); } return ctl_chspec; } #endif extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec) { chanspec_t chspec40 = chspec; uint center_chan; uint sb; ASSERT(!wf_chspec_malformed(chspec)); if (CHSPEC_IS80(chspec)) { center_chan = CHSPEC_CHANNEL(chspec); sb = CHSPEC_CTL_SB(chspec); if (sb == WL_CHANSPEC_CTL_SB_UL) { sb = WL_CHANSPEC_CTL_SB_L; center_chan += CH_20MHZ_APART; } else if (sb == WL_CHANSPEC_CTL_SB_UU) { sb = WL_CHANSPEC_CTL_SB_U; center_chan += CH_20MHZ_APART; } else { center_chan -= CH_20MHZ_APART; } chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 | sb | center_chan); } return chspec40; } int wf_mhz2channel(uint freq, uint start_factor) { int ch = -1; uint base; int offset; if (start_factor == 0) { if (freq >= 2400 && freq <= 2500) start_factor = WF_CHAN_FACTOR_2_4_G; else if (freq >= 5000 && freq <= 6000) start_factor = WF_CHAN_FACTOR_5_G; } if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G) return 14; base = start_factor / 2; if ((freq < base) || (freq > base + 1000)) return -1; offset = freq - base; ch = offset / 5; if (offset != (ch * 5)) return -1; if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13)) return -1; return ch; } int wf_channel2mhz(uint ch, uint start_factor) { int freq; if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) || (ch > 200)) freq = -1; else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14)) freq = 2484; else freq = ch * 5 + start_factor / 2; return freq; }
gpl-2.0
djvoleur/G_N92XP-R4_AOJ6
block/compat_ioctl.c
877
21270
#include <linux/blkdev.h> #include <linux/blkpg.h> #include <linux/blktrace_api.h> #include <linux/cdrom.h> #include <linux/compat.h> #include <linux/elevator.h> #include <linux/fd.h> #include <linux/hdreg.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/types.h> #include <linux/uaccess.h> static int compat_put_ushort(unsigned long arg, unsigned short val) { return put_user(val, (unsigned short __user *)compat_ptr(arg)); } static int compat_put_int(unsigned long arg, int val) { return put_user(val, (compat_int_t __user *)compat_ptr(arg)); } static int compat_put_uint(unsigned long arg, unsigned int val) { return put_user(val, (compat_uint_t __user *)compat_ptr(arg)); } static int compat_put_long(unsigned long arg, long val) { return put_user(val, (compat_long_t __user *)compat_ptr(arg)); } static int compat_put_ulong(unsigned long arg, compat_ulong_t val) { return put_user(val, (compat_ulong_t __user *)compat_ptr(arg)); } static int compat_put_u64(unsigned long arg, u64 val) { return put_user(val, (compat_u64 __user *)compat_ptr(arg)); } struct compat_hd_geometry { unsigned char heads; unsigned char sectors; unsigned short cylinders; u32 start; }; static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev, struct compat_hd_geometry __user *ugeo) { struct hd_geometry geo; int ret; if (!ugeo) return -EINVAL; if (!disk->fops->getgeo) return -ENOTTY; /* * We need to set the startsect first, the driver may * want to override it. */ geo.start = get_start_sect(bdev); ret = disk->fops->getgeo(bdev, &geo); if (ret) return ret; ret = copy_to_user(ugeo, &geo, 4); ret |= __put_user(geo.start, &ugeo->start); if (ret) ret = -EFAULT; return ret; } static int compat_hdio_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { mm_segment_t old_fs = get_fs(); unsigned long kval; unsigned int __user *uvp; int error; set_fs(KERNEL_DS); error = __blkdev_driver_ioctl(bdev, mode, cmd, (unsigned long)(&kval)); set_fs(old_fs); if (error == 0) { uvp = compat_ptr(arg); if (put_user(kval, uvp)) error = -EFAULT; } return error; } struct compat_cdrom_read_audio { union cdrom_addr addr; u8 addr_format; compat_int_t nframes; compat_caddr_t buf; }; struct compat_cdrom_generic_command { unsigned char cmd[CDROM_PACKET_SIZE]; compat_caddr_t buffer; compat_uint_t buflen; compat_int_t stat; compat_caddr_t sense; unsigned char data_direction; compat_int_t quiet; compat_int_t timeout; compat_caddr_t reserved[1]; }; static int compat_cdrom_read_audio(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct cdrom_read_audio __user *cdread_audio; struct compat_cdrom_read_audio __user *cdread_audio32; __u32 data; void __user *datap; cdread_audio = compat_alloc_user_space(sizeof(*cdread_audio)); cdread_audio32 = compat_ptr(arg); if (copy_in_user(&cdread_audio->addr, &cdread_audio32->addr, (sizeof(*cdread_audio32) - sizeof(compat_caddr_t)))) return -EFAULT; if (get_user(data, &cdread_audio32->buf)) return -EFAULT; datap = compat_ptr(data); if (put_user(datap, &cdread_audio->buf)) return -EFAULT; return __blkdev_driver_ioctl(bdev, mode, cmd, (unsigned long)cdread_audio); } static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct cdrom_generic_command __user *cgc; struct compat_cdrom_generic_command __user *cgc32; u32 data; unsigned char dir; int itmp; cgc = compat_alloc_user_space(sizeof(*cgc)); cgc32 = compat_ptr(arg); if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) || get_user(data, &cgc32->buffer) || put_user(compat_ptr(data), &cgc->buffer) || copy_in_user(&cgc->buflen, &cgc32->buflen, (sizeof(unsigned int) + sizeof(int))) || get_user(data, &cgc32->sense) || put_user(compat_ptr(data), &cgc->sense) || get_user(dir, &cgc32->data_direction) || put_user(dir, &cgc->data_direction) || get_user(itmp, &cgc32->quiet) || put_user(itmp, &cgc->quiet) || get_user(itmp, &cgc32->timeout) || put_user(itmp, &cgc->timeout) || get_user(data, &cgc32->reserved[0]) || put_user(compat_ptr(data), &cgc->reserved[0])) return -EFAULT; return __blkdev_driver_ioctl(bdev, mode, cmd, (unsigned long)cgc); } struct compat_blkpg_ioctl_arg { compat_int_t op; compat_int_t flags; compat_int_t datalen; compat_caddr_t data; }; static int compat_blkpg_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, struct compat_blkpg_ioctl_arg __user *ua32) { struct blkpg_ioctl_arg __user *a = compat_alloc_user_space(sizeof(*a)); compat_caddr_t udata; compat_int_t n; int err; err = get_user(n, &ua32->op); err |= put_user(n, &a->op); err |= get_user(n, &ua32->flags); err |= put_user(n, &a->flags); err |= get_user(n, &ua32->datalen); err |= put_user(n, &a->datalen); err |= get_user(udata, &ua32->data); err |= put_user(compat_ptr(udata), &a->data); if (err) return err; return blkdev_ioctl(bdev, mode, cmd, (unsigned long)a); } #define BLKBSZGET_32 _IOR(0x12, 112, int) #define BLKBSZSET_32 _IOW(0x12, 113, int) #define BLKGETSIZE64_32 _IOR(0x12, 114, int) struct compat_floppy_drive_params { char cmos; compat_ulong_t max_dtr; compat_ulong_t hlt; compat_ulong_t hut; compat_ulong_t srt; compat_ulong_t spinup; compat_ulong_t spindown; unsigned char spindown_offset; unsigned char select_delay; unsigned char rps; unsigned char tracks; compat_ulong_t timeout; unsigned char interleave_sect; struct floppy_max_errors max_errors; char flags; char read_track; short autodetect[8]; compat_int_t checkfreq; compat_int_t native_format; }; struct compat_floppy_drive_struct { signed char flags; compat_ulong_t spinup_date; compat_ulong_t select_date; compat_ulong_t first_read_date; short probed_format; short track; short maxblock; short maxtrack; compat_int_t generation; compat_int_t keep_data; compat_int_t fd_ref; compat_int_t fd_device; compat_int_t last_checked; compat_caddr_t dmabuf; compat_int_t bufblocks; }; struct compat_floppy_fdc_state { compat_int_t spec1; compat_int_t spec2; compat_int_t dtr; unsigned char version; unsigned char dor; compat_ulong_t address; unsigned int rawcmd:2; unsigned int reset:1; unsigned int need_configure:1; unsigned int perp_mode:2; unsigned int has_fifo:1; unsigned int driver_version; unsigned char track[4]; }; struct compat_floppy_write_errors { unsigned int write_errors; compat_ulong_t first_error_sector; compat_int_t first_error_generation; compat_ulong_t last_error_sector; compat_int_t last_error_generation; compat_uint_t badness; }; #define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct) #define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct) #define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params) #define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params) #define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct) #define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct) #define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state) #define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors) static struct { unsigned int cmd32; unsigned int cmd; } fd_ioctl_trans_table[] = { { FDSETPRM32, FDSETPRM }, { FDDEFPRM32, FDDEFPRM }, { FDGETPRM32, FDGETPRM }, { FDSETDRVPRM32, FDSETDRVPRM }, { FDGETDRVPRM32, FDGETDRVPRM }, { FDGETDRVSTAT32, FDGETDRVSTAT }, { FDPOLLDRVSTAT32, FDPOLLDRVSTAT }, { FDGETFDCSTAT32, FDGETFDCSTAT }, { FDWERRORGET32, FDWERRORGET } }; #define NR_FD_IOCTL_TRANS ARRAY_SIZE(fd_ioctl_trans_table) static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { mm_segment_t old_fs = get_fs(); void *karg = NULL; unsigned int kcmd = 0; int i, err; for (i = 0; i < NR_FD_IOCTL_TRANS; i++) if (cmd == fd_ioctl_trans_table[i].cmd32) { kcmd = fd_ioctl_trans_table[i].cmd; break; } if (!kcmd) return -EINVAL; switch (cmd) { case FDSETPRM32: case FDDEFPRM32: case FDGETPRM32: { compat_uptr_t name; struct compat_floppy_struct __user *uf; struct floppy_struct *f; uf = compat_ptr(arg); f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL); if (!karg) return -ENOMEM; if (cmd == FDGETPRM32) break; err = __get_user(f->size, &uf->size); err |= __get_user(f->sect, &uf->sect); err |= __get_user(f->head, &uf->head); err |= __get_user(f->track, &uf->track); err |= __get_user(f->stretch, &uf->stretch); err |= __get_user(f->gap, &uf->gap); err |= __get_user(f->rate, &uf->rate); err |= __get_user(f->spec1, &uf->spec1); err |= __get_user(f->fmt_gap, &uf->fmt_gap); err |= __get_user(name, &uf->name); f->name = compat_ptr(name); if (err) { err = -EFAULT; goto out; } break; } case FDSETDRVPRM32: case FDGETDRVPRM32: { struct compat_floppy_drive_params __user *uf; struct floppy_drive_params *f; uf = compat_ptr(arg); f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL); if (!karg) return -ENOMEM; if (cmd == FDGETDRVPRM32) break; err = __get_user(f->cmos, &uf->cmos); err |= __get_user(f->max_dtr, &uf->max_dtr); err |= __get_user(f->hlt, &uf->hlt); err |= __get_user(f->hut, &uf->hut); err |= __get_user(f->srt, &uf->srt); err |= __get_user(f->spinup, &uf->spinup); err |= __get_user(f->spindown, &uf->spindown); err |= __get_user(f->spindown_offset, &uf->spindown_offset); err |= __get_user(f->select_delay, &uf->select_delay); err |= __get_user(f->rps, &uf->rps); err |= __get_user(f->tracks, &uf->tracks); err |= __get_user(f->timeout, &uf->timeout); err |= __get_user(f->interleave_sect, &uf->interleave_sect); err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors)); err |= __get_user(f->flags, &uf->flags); err |= __get_user(f->read_track, &uf->read_track); err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect)); err |= __get_user(f->checkfreq, &uf->checkfreq); err |= __get_user(f->native_format, &uf->native_format); if (err) { err = -EFAULT; goto out; } break; } case FDGETDRVSTAT32: case FDPOLLDRVSTAT32: karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL); if (!karg) return -ENOMEM; break; case FDGETFDCSTAT32: karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL); if (!karg) return -ENOMEM; break; case FDWERRORGET32: karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL); if (!karg) return -ENOMEM; break; default: return -EINVAL; } set_fs(KERNEL_DS); err = __blkdev_driver_ioctl(bdev, mode, kcmd, (unsigned long)karg); set_fs(old_fs); if (err) goto out; switch (cmd) { case FDGETPRM32: { struct floppy_struct *f = karg; struct compat_floppy_struct __user *uf = compat_ptr(arg); err = __put_user(f->size, &uf->size); err |= __put_user(f->sect, &uf->sect); err |= __put_user(f->head, &uf->head); err |= __put_user(f->track, &uf->track); err |= __put_user(f->stretch, &uf->stretch); err |= __put_user(f->gap, &uf->gap); err |= __put_user(f->rate, &uf->rate); err |= __put_user(f->spec1, &uf->spec1); err |= __put_user(f->fmt_gap, &uf->fmt_gap); err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name); break; } case FDGETDRVPRM32: { struct compat_floppy_drive_params __user *uf; struct floppy_drive_params *f = karg; uf = compat_ptr(arg); err = __put_user(f->cmos, &uf->cmos); err |= __put_user(f->max_dtr, &uf->max_dtr); err |= __put_user(f->hlt, &uf->hlt); err |= __put_user(f->hut, &uf->hut); err |= __put_user(f->srt, &uf->srt); err |= __put_user(f->spinup, &uf->spinup); err |= __put_user(f->spindown, &uf->spindown); err |= __put_user(f->spindown_offset, &uf->spindown_offset); err |= __put_user(f->select_delay, &uf->select_delay); err |= __put_user(f->rps, &uf->rps); err |= __put_user(f->tracks, &uf->tracks); err |= __put_user(f->timeout, &uf->timeout); err |= __put_user(f->interleave_sect, &uf->interleave_sect); err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors)); err |= __put_user(f->flags, &uf->flags); err |= __put_user(f->read_track, &uf->read_track); err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect)); err |= __put_user(f->checkfreq, &uf->checkfreq); err |= __put_user(f->native_format, &uf->native_format); break; } case FDGETDRVSTAT32: case FDPOLLDRVSTAT32: { struct compat_floppy_drive_struct __user *uf; struct floppy_drive_struct *f = karg; uf = compat_ptr(arg); err = __put_user(f->flags, &uf->flags); err |= __put_user(f->spinup_date, &uf->spinup_date); err |= __put_user(f->select_date, &uf->select_date); err |= __put_user(f->first_read_date, &uf->first_read_date); err |= __put_user(f->probed_format, &uf->probed_format); err |= __put_user(f->track, &uf->track); err |= __put_user(f->maxblock, &uf->maxblock); err |= __put_user(f->maxtrack, &uf->maxtrack); err |= __put_user(f->generation, &uf->generation); err |= __put_user(f->keep_data, &uf->keep_data); err |= __put_user(f->fd_ref, &uf->fd_ref); err |= __put_user(f->fd_device, &uf->fd_device); err |= __put_user(f->last_checked, &uf->last_checked); err |= __put_user((u64)f->dmabuf, &uf->dmabuf); err |= __put_user((u64)f->bufblocks, &uf->bufblocks); break; } case FDGETFDCSTAT32: { struct compat_floppy_fdc_state __user *uf; struct floppy_fdc_state *f = karg; uf = compat_ptr(arg); err = __put_user(f->spec1, &uf->spec1); err |= __put_user(f->spec2, &uf->spec2); err |= __put_user(f->dtr, &uf->dtr); err |= __put_user(f->version, &uf->version); err |= __put_user(f->dor, &uf->dor); err |= __put_user(f->address, &uf->address); err |= __copy_to_user((char __user *)&uf->address + sizeof(uf->address), (char *)&f->address + sizeof(f->address), sizeof(int)); err |= __put_user(f->driver_version, &uf->driver_version); err |= __copy_to_user(uf->track, f->track, sizeof(f->track)); break; } case FDWERRORGET32: { struct compat_floppy_write_errors __user *uf; struct floppy_write_errors *f = karg; uf = compat_ptr(arg); err = __put_user(f->write_errors, &uf->write_errors); err |= __put_user(f->first_error_sector, &uf->first_error_sector); err |= __put_user(f->first_error_generation, &uf->first_error_generation); err |= __put_user(f->last_error_sector, &uf->last_error_sector); err |= __put_user(f->last_error_generation, &uf->last_error_generation); err |= __put_user(f->badness, &uf->badness); break; } default: break; } if (err) err = -EFAULT; out: kfree(karg); return err; } static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg) { switch (cmd) { case HDIO_GET_UNMASKINTR: case HDIO_GET_MULTCOUNT: case HDIO_GET_KEEPSETTINGS: case HDIO_GET_32BIT: case HDIO_GET_NOWERR: case HDIO_GET_DMA: case HDIO_GET_NICE: case HDIO_GET_WCACHE: case HDIO_GET_ACOUSTIC: case HDIO_GET_ADDRESS: case HDIO_GET_BUSSTATE: return compat_hdio_ioctl(bdev, mode, cmd, arg); case FDSETPRM32: case FDDEFPRM32: case FDGETPRM32: case FDSETDRVPRM32: case FDGETDRVPRM32: case FDGETDRVSTAT32: case FDPOLLDRVSTAT32: case FDGETFDCSTAT32: case FDWERRORGET32: return compat_fd_ioctl(bdev, mode, cmd, arg); case CDROMREADAUDIO: return compat_cdrom_read_audio(bdev, mode, cmd, arg); case CDROM_SEND_PACKET: return compat_cdrom_generic_command(bdev, mode, cmd, arg); /* * No handler required for the ones below, we just need to * convert arg to a 64 bit pointer. */ case BLKSECTSET: /* * 0x03 -- HD/IDE ioctl's used by hdparm and friends. * Some need translations, these do not. */ case HDIO_GET_IDENTITY: case HDIO_DRIVE_TASK: case HDIO_DRIVE_CMD: /* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */ case 0x330: /* 0x02 -- Floppy ioctls */ case FDMSGON: case FDMSGOFF: case FDSETEMSGTRESH: case FDFLUSH: case FDWERRORCLR: case FDSETMAXERRS: case FDGETMAXERRS: case FDGETDRVTYP: case FDEJECT: case FDCLRPRM: case FDFMTBEG: case FDFMTEND: case FDRESET: case FDTWADDLE: case FDFMTTRK: case FDRAWCMD: /* CDROM stuff */ case CDROMPAUSE: case CDROMRESUME: case CDROMPLAYMSF: case CDROMPLAYTRKIND: case CDROMREADTOCHDR: case CDROMREADTOCENTRY: case CDROMSTOP: case CDROMSTART: case CDROMEJECT: case CDROMVOLCTRL: case CDROMSUBCHNL: case CDROMMULTISESSION: case CDROM_GET_MCN: case CDROMRESET: case CDROMVOLREAD: case CDROMSEEK: case CDROMPLAYBLK: case CDROMCLOSETRAY: case CDROM_DISC_STATUS: case CDROM_CHANGER_NSLOTS: case CDROM_GET_CAPABILITY: /* Ignore cdrom.h about these next 5 ioctls, they absolutely do * not take a struct cdrom_read, instead they take a struct cdrom_msf * which is compatible. */ case CDROMREADMODE2: case CDROMREADMODE1: case CDROMREADRAW: case CDROMREADCOOKED: case CDROMREADALL: /* DVD ioctls */ case DVD_READ_STRUCT: case DVD_WRITE_STRUCT: case DVD_AUTH: arg = (unsigned long)compat_ptr(arg); /* These intepret arg as an unsigned long, not as a pointer, * so we must not do compat_ptr() conversion. */ case HDIO_SET_MULTCOUNT: case HDIO_SET_UNMASKINTR: case HDIO_SET_KEEPSETTINGS: case HDIO_SET_32BIT: case HDIO_SET_NOWERR: case HDIO_SET_DMA: case HDIO_SET_PIO_MODE: case HDIO_SET_NICE: case HDIO_SET_WCACHE: case HDIO_SET_ACOUSTIC: case HDIO_SET_BUSSTATE: case HDIO_SET_ADDRESS: case CDROMEJECT_SW: case CDROM_SET_OPTIONS: case CDROM_CLEAR_OPTIONS: case CDROM_SELECT_SPEED: case CDROM_SELECT_DISC: case CDROM_MEDIA_CHANGED: case CDROM_DRIVE_STATUS: case CDROM_LOCKDOOR: case CDROM_DEBUG: break; default: /* unknown ioctl number */ return -ENOIOCTLCMD; } return __blkdev_driver_ioctl(bdev, mode, cmd, arg); } /* Most of the generic ioctls are handled in the normal fallback path. This assumes the blkdev's low level compat_ioctl always returns ENOIOCTLCMD for unknown ioctls. */ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) { int ret = -ENOIOCTLCMD; struct inode *inode = file->f_mapping->host; struct block_device *bdev = inode->i_bdev; struct gendisk *disk = bdev->bd_disk; fmode_t mode = file->f_mode; struct backing_dev_info *bdi; loff_t size; /* * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have * to updated it before every ioctl. */ if (file->f_flags & O_NDELAY) mode |= FMODE_NDELAY; else mode &= ~FMODE_NDELAY; switch (cmd) { case HDIO_GETGEO: return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); case BLKPBSZGET: return compat_put_uint(arg, bdev_physical_block_size(bdev)); case BLKIOMIN: return compat_put_uint(arg, bdev_io_min(bdev)); case BLKIOOPT: return compat_put_uint(arg, bdev_io_opt(bdev)); case BLKALIGNOFF: return compat_put_int(arg, bdev_alignment_offset(bdev)); case BLKDISCARDZEROES: return compat_put_uint(arg, bdev_discard_zeroes_data(bdev)); case BLKFLSBUF: case BLKROSET: case BLKDISCARD: case BLKSECDISCARD: case BLKZEROOUT: /* * the ones below are implemented in blkdev_locked_ioctl, * but we call blkdev_ioctl, which gets the lock for us */ case BLKRRPART: return blkdev_ioctl(bdev, mode, cmd, (unsigned long)compat_ptr(arg)); case BLKBSZSET_32: return blkdev_ioctl(bdev, mode, BLKBSZSET, (unsigned long)compat_ptr(arg)); case BLKPG: return compat_blkpg_ioctl(bdev, mode, cmd, compat_ptr(arg)); case BLKRAGET: case BLKFRAGET: if (!arg) return -EINVAL; bdi = blk_get_backing_dev_info(bdev); if (bdi == NULL) return -ENOTTY; return compat_put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); case BLKROGET: /* compatible */ return compat_put_int(arg, bdev_read_only(bdev) != 0); case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ return compat_put_int(arg, block_size(bdev)); case BLKSSZGET: /* get block device hardware sector size */ return compat_put_int(arg, bdev_logical_block_size(bdev)); case BLKSECTGET: return compat_put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); case BLKROTATIONAL: return compat_put_ushort(arg, !blk_queue_nonrot(bdev_get_queue(bdev))); case BLKRASET: /* compatible, but no compat_ptr (!) */ case BLKFRASET: if (!capable(CAP_SYS_ADMIN)) return -EACCES; bdi = blk_get_backing_dev_info(bdev); if (bdi == NULL) return -ENOTTY; bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; return 0; case BLKGETSIZE: size = i_size_read(bdev->bd_inode); if ((size >> 9) > ~0UL) return -EFBIG; return compat_put_ulong(arg, size >> 9); case BLKGETSIZE64_32: return compat_put_u64(arg, i_size_read(bdev->bd_inode)); case BLKTRACESETUP32: case BLKTRACESTART: /* compatible */ case BLKTRACESTOP: /* compatible */ case BLKTRACETEARDOWN: /* compatible */ ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg)); return ret; default: if (disk->fops->compat_ioctl) ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg); if (ret == -ENOIOCTLCMD) ret = compat_blkdev_driver_ioctl(bdev, mode, cmd, arg); return ret; } }
gpl-2.0
NooNameR/Sense4.0-kernel
drivers/gpu/drm/drm_sman.c
1645
8782
/************************************************************************** * * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * **************************************************************************/ /* * Simple memory manager interface that keeps track on allocate regions on a * per "owner" basis. All regions associated with an "owner" can be released * with a simple call. Typically if the "owner" exists. The owner is any * "unsigned long" identifier. Can typically be a pointer to a file private * struct or a context identifier. * * Authors: * Thomas Hellström <thomas-at-tungstengraphics-dot-com> */ #include "drm_sman.h" struct drm_owner_item { struct drm_hash_item owner_hash; struct list_head sman_list; struct list_head mem_blocks; }; void drm_sman_takedown(struct drm_sman * sman) { drm_ht_remove(&sman->user_hash_tab); drm_ht_remove(&sman->owner_hash_tab); kfree(sman->mm); } EXPORT_SYMBOL(drm_sman_takedown); int drm_sman_init(struct drm_sman * sman, unsigned int num_managers, unsigned int user_order, unsigned int owner_order) { int ret = 0; sman->mm = (struct drm_sman_mm *) kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL); if (!sman->mm) { ret = -ENOMEM; goto out; } sman->num_managers = num_managers; INIT_LIST_HEAD(&sman->owner_items); ret = drm_ht_create(&sman->owner_hash_tab, owner_order); if (ret) goto out1; ret = drm_ht_create(&sman->user_hash_tab, user_order); if (!ret) goto out; drm_ht_remove(&sman->owner_hash_tab); out1: kfree(sman->mm); out: return ret; } EXPORT_SYMBOL(drm_sman_init); static void *drm_sman_mm_allocate(void *private, unsigned long size, unsigned alignment) { struct drm_mm *mm = (struct drm_mm *) private; struct drm_mm_node *tmp; tmp = drm_mm_search_free(mm, size, alignment, 1); if (!tmp) { return NULL; } tmp = drm_mm_get_block(tmp, size, alignment); return tmp; } static void drm_sman_mm_free(void *private, void *ref) { struct drm_mm_node *node = (struct drm_mm_node *) ref; drm_mm_put_block(node); } static void drm_sman_mm_destroy(void *private) { struct drm_mm *mm = (struct drm_mm *) private; drm_mm_takedown(mm); kfree(mm); } static unsigned long drm_sman_mm_offset(void *private, void *ref) { struct drm_mm_node *node = (struct drm_mm_node *) ref; return node->start; } int drm_sman_set_range(struct drm_sman * sman, unsigned int manager, unsigned long start, unsigned long size) { struct drm_sman_mm *sman_mm; struct drm_mm *mm; int ret; BUG_ON(manager >= sman->num_managers); sman_mm = &sman->mm[manager]; mm = kzalloc(sizeof(*mm), GFP_KERNEL); if (!mm) { return -ENOMEM; } sman_mm->private = mm; ret = drm_mm_init(mm, start, size); if (ret) { kfree(mm); return ret; } sman_mm->allocate = drm_sman_mm_allocate; sman_mm->free = drm_sman_mm_free; sman_mm->destroy = drm_sman_mm_destroy; sman_mm->offset = drm_sman_mm_offset; return 0; } EXPORT_SYMBOL(drm_sman_set_range); int drm_sman_set_manager(struct drm_sman * sman, unsigned int manager, struct drm_sman_mm * allocator) { BUG_ON(manager >= sman->num_managers); sman->mm[manager] = *allocator; return 0; } EXPORT_SYMBOL(drm_sman_set_manager); static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman, unsigned long owner) { int ret; struct drm_hash_item *owner_hash_item; struct drm_owner_item *owner_item; ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item); if (!ret) { return drm_hash_entry(owner_hash_item, struct drm_owner_item, owner_hash); } owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL); if (!owner_item) goto out; INIT_LIST_HEAD(&owner_item->mem_blocks); owner_item->owner_hash.key = owner; if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash)) goto out1; list_add_tail(&owner_item->sman_list, &sman->owner_items); return owner_item; out1: kfree(owner_item); out: return NULL; } struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager, unsigned long size, unsigned alignment, unsigned long owner) { void *tmp; struct drm_sman_mm *sman_mm; struct drm_owner_item *owner_item; struct drm_memblock_item *memblock; BUG_ON(manager >= sman->num_managers); sman_mm = &sman->mm[manager]; tmp = sman_mm->allocate(sman_mm->private, size, alignment); if (!tmp) { return NULL; } memblock = kzalloc(sizeof(*memblock), GFP_KERNEL); if (!memblock) goto out; memblock->mm_info = tmp; memblock->mm = sman_mm; memblock->sman = sman; if (drm_ht_just_insert_please (&sman->user_hash_tab, &memblock->user_hash, (unsigned long)memblock, 32, 0, 0)) goto out1; owner_item = drm_sman_get_owner_item(sman, owner); if (!owner_item) goto out2; list_add_tail(&memblock->owner_list, &owner_item->mem_blocks); return memblock; out2: drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash); out1: kfree(memblock); out: sman_mm->free(sman_mm->private, tmp); return NULL; } EXPORT_SYMBOL(drm_sman_alloc); static void drm_sman_free(struct drm_memblock_item *item) { struct drm_sman *sman = item->sman; list_del(&item->owner_list); drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash); item->mm->free(item->mm->private, item->mm_info); kfree(item); } int drm_sman_free_key(struct drm_sman *sman, unsigned int key) { struct drm_hash_item *hash_item; struct drm_memblock_item *memblock_item; if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item)) return -EINVAL; memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, user_hash); drm_sman_free(memblock_item); return 0; } EXPORT_SYMBOL(drm_sman_free_key); static void drm_sman_remove_owner(struct drm_sman *sman, struct drm_owner_item *owner_item) { list_del(&owner_item->sman_list); drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash); kfree(owner_item); } int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) { struct drm_hash_item *hash_item; struct drm_owner_item *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { return -1; } owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); if (owner_item->mem_blocks.next == &owner_item->mem_blocks) { drm_sman_remove_owner(sman, owner_item); return -1; } return 0; } EXPORT_SYMBOL(drm_sman_owner_clean); static void drm_sman_do_owner_cleanup(struct drm_sman *sman, struct drm_owner_item *owner_item) { struct drm_memblock_item *entry, *next; list_for_each_entry_safe(entry, next, &owner_item->mem_blocks, owner_list) { drm_sman_free(entry); } drm_sman_remove_owner(sman, owner_item); } void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner) { struct drm_hash_item *hash_item; struct drm_owner_item *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { return; } owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); drm_sman_do_owner_cleanup(sman, owner_item); } EXPORT_SYMBOL(drm_sman_owner_cleanup); void drm_sman_cleanup(struct drm_sman *sman) { struct drm_owner_item *entry, *next; unsigned int i; struct drm_sman_mm *sman_mm; list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) { drm_sman_do_owner_cleanup(sman, entry); } if (sman->mm) { for (i = 0; i < sman->num_managers; ++i) { sman_mm = &sman->mm[i]; if (sman_mm->private) { sman_mm->destroy(sman_mm->private); sman_mm->private = NULL; } } } } EXPORT_SYMBOL(drm_sman_cleanup);
gpl-2.0
TheRomRoot/DeepKernel
lib/rbtree_test.c
2157
5214
#include <linux/module.h> #include <linux/rbtree_augmented.h> #include <linux/random.h> #include <asm/timex.h> #define NODES 100 #define PERF_LOOPS 100000 #define CHECK_LOOPS 100 struct test_node { struct rb_node rb; u32 key; /* following fields used for testing augmented rbtree functionality */ u32 val; u32 augmented; }; static struct rb_root root = RB_ROOT; static struct test_node nodes[NODES]; static struct rnd_state rnd; static void insert(struct test_node *node, struct rb_root *root) { struct rb_node **new = &root->rb_node, *parent = NULL; u32 key = node->key; while (*new) { parent = *new; if (key < rb_entry(parent, struct test_node, rb)->key) new = &parent->rb_left; else new = &parent->rb_right; } rb_link_node(&node->rb, parent, new); rb_insert_color(&node->rb, root); } static inline void erase(struct test_node *node, struct rb_root *root) { rb_erase(&node->rb, root); } static inline u32 augment_recompute(struct test_node *node) { u32 max = node->val, child_augmented; if (node->rb.rb_left) { child_augmented = rb_entry(node->rb.rb_left, struct test_node, rb)->augmented; if (max < child_augmented) max = child_augmented; } if (node->rb.rb_right) { child_augmented = rb_entry(node->rb.rb_right, struct test_node, rb)->augmented; if (max < child_augmented) max = child_augmented; } return max; } RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb, u32, augmented, augment_recompute) static void insert_augmented(struct test_node *node, struct rb_root *root) { struct rb_node **new = &root->rb_node, *rb_parent = NULL; u32 key = node->key; u32 val = node->val; struct test_node *parent; while (*new) { rb_parent = *new; parent = rb_entry(rb_parent, struct test_node, rb); if (parent->augmented < val) parent->augmented = val; if (key < parent->key) new = &parent->rb.rb_left; else new = &parent->rb.rb_right; } node->augmented = val; rb_link_node(&node->rb, rb_parent, new); rb_insert_augmented(&node->rb, root, &augment_callbacks); } static void erase_augmented(struct test_node *node, struct rb_root *root) { rb_erase_augmented(&node->rb, root, &augment_callbacks); } static void init(void) { int i; for (i = 0; i < NODES; i++) { nodes[i].key = prandom_u32_state(&rnd); nodes[i].val = prandom_u32_state(&rnd); } } static bool is_red(struct rb_node *rb) { return !(rb->__rb_parent_color & 1); } static int black_path_count(struct rb_node *rb) { int count; for (count = 0; rb; rb = rb_parent(rb)) count += !is_red(rb); return count; } static void check(int nr_nodes) { struct rb_node *rb; int count = 0, blacks = 0; u32 prev_key = 0; for (rb = rb_first(&root); rb; rb = rb_next(rb)) { struct test_node *node = rb_entry(rb, struct test_node, rb); WARN_ON_ONCE(node->key < prev_key); WARN_ON_ONCE(is_red(rb) && (!rb_parent(rb) || is_red(rb_parent(rb)))); if (!count) blacks = black_path_count(rb); else WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) && blacks != black_path_count(rb)); prev_key = node->key; count++; } WARN_ON_ONCE(count != nr_nodes); WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1); } static void check_augmented(int nr_nodes) { struct rb_node *rb; check(nr_nodes); for (rb = rb_first(&root); rb; rb = rb_next(rb)) { struct test_node *node = rb_entry(rb, struct test_node, rb); WARN_ON_ONCE(node->augmented != augment_recompute(node)); } } static int __init rbtree_test_init(void) { int i, j; cycles_t time1, time2, time; printk(KERN_ALERT "rbtree testing"); prandom_seed_state(&rnd, 3141592653589793238ULL); init(); time1 = get_cycles(); for (i = 0; i < PERF_LOOPS; i++) { for (j = 0; j < NODES; j++) insert(nodes + j, &root); for (j = 0; j < NODES; j++) erase(nodes + j, &root); } time2 = get_cycles(); time = time2 - time1; time = div_u64(time, PERF_LOOPS); printk(" -> %llu cycles\n", (unsigned long long)time); for (i = 0; i < CHECK_LOOPS; i++) { init(); for (j = 0; j < NODES; j++) { check(j); insert(nodes + j, &root); } for (j = 0; j < NODES; j++) { check(NODES - j); erase(nodes + j, &root); } check(0); } printk(KERN_ALERT "augmented rbtree testing"); init(); time1 = get_cycles(); for (i = 0; i < PERF_LOOPS; i++) { for (j = 0; j < NODES; j++) insert_augmented(nodes + j, &root); for (j = 0; j < NODES; j++) erase_augmented(nodes + j, &root); } time2 = get_cycles(); time = time2 - time1; time = div_u64(time, PERF_LOOPS); printk(" -> %llu cycles\n", (unsigned long long)time); for (i = 0; i < CHECK_LOOPS; i++) { init(); for (j = 0; j < NODES; j++) { check_augmented(j); insert_augmented(nodes + j, &root); } for (j = 0; j < NODES; j++) { check_augmented(NODES - j); erase_augmented(nodes + j, &root); } check_augmented(0); } return -EAGAIN; /* Fail will directly unload the module */ } static void __exit rbtree_test_exit(void) { printk(KERN_ALERT "test exit\n"); } module_init(rbtree_test_init) module_exit(rbtree_test_exit) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michel Lespinasse"); MODULE_DESCRIPTION("Red Black Tree test");
gpl-2.0
bmc08gt/kernel_samsung_exynos7420
drivers/mtd/onenand/omap2.c
2157
21905
/* * linux/drivers/mtd/onenand/omap2.c * * OneNAND driver for OMAP2 / OMAP3 * * Copyright © 2005-2006 Nokia Corporation * * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä * IRQ and DMA support written by Timo Teras * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; see the file COPYING. If not, write to the Free Software * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <linux/device.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/onenand.h> #include <linux/mtd/partitions.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/regulator/consumer.h> #include <asm/mach/flash.h> #include <linux/platform_data/mtd-onenand-omap2.h> #include <asm/gpio.h> #include <linux/omap-dma.h> #define DRIVER_NAME "omap2-onenand" #define ONENAND_BUFRAM_SIZE (1024 * 5) struct omap2_onenand { struct platform_device *pdev; int gpmc_cs; unsigned long phys_base; unsigned int mem_size; int gpio_irq; struct mtd_info mtd; struct onenand_chip onenand; struct completion irq_done; struct completion dma_done; int dma_channel; int freq; int (*setup)(void __iomem *base, int *freq_ptr); struct regulator *regulator; u8 flags; }; static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data) { struct omap2_onenand *c = data; complete(&c->dma_done); } static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id) { struct omap2_onenand *c = dev_id; complete(&c->irq_done); return IRQ_HANDLED; } static inline unsigned short read_reg(struct omap2_onenand *c, int reg) { return readw(c->onenand.base + reg); } static inline void write_reg(struct omap2_onenand *c, unsigned short value, int reg) { writew(value, c->onenand.base + reg); } static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr) { printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n", msg, state, ctrl, intr); } static void wait_warn(char *msg, int state, unsigned int ctrl, unsigned int intr) { printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x " "intr 0x%04x\n", msg, state, ctrl, intr); } static int omap2_onenand_wait(struct mtd_info *mtd, int state) { struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); struct onenand_chip *this = mtd->priv; unsigned int intr = 0; unsigned int ctrl, ctrl_mask; unsigned long timeout; u32 syscfg; if (state == FL_RESETING || state == FL_PREPARING_ERASE || state == FL_VERIFYING_ERASE) { int i = 21; unsigned int intr_flags = ONENAND_INT_MASTER; switch (state) { case FL_RESETING: intr_flags |= ONENAND_INT_RESET; break; case FL_PREPARING_ERASE: intr_flags |= ONENAND_INT_ERASE; break; case FL_VERIFYING_ERASE: i = 101; break; } while (--i) { udelay(1); intr = read_reg(c, ONENAND_REG_INTERRUPT); if (intr & ONENAND_INT_MASTER) break; } ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); if (ctrl & ONENAND_CTRL_ERROR) { wait_err("controller error", state, ctrl, intr); return -EIO; } if ((intr & intr_flags) == intr_flags) return 0; /* Continue in wait for interrupt branch */ } if (state != FL_READING) { int result; /* Turn interrupts on */ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) { syscfg |= ONENAND_SYS_CFG1_IOBE; write_reg(c, syscfg, ONENAND_REG_SYS_CFG1); if (c->flags & ONENAND_IN_OMAP34XX) /* Add a delay to let GPIO settle */ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); } INIT_COMPLETION(c->irq_done); if (c->gpio_irq) { result = gpio_get_value(c->gpio_irq); if (result == -1) { ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); intr = read_reg(c, ONENAND_REG_INTERRUPT); wait_err("gpio error", state, ctrl, intr); return -EIO; } } else result = 0; if (result == 0) { int retry_cnt = 0; retry: result = wait_for_completion_timeout(&c->irq_done, msecs_to_jiffies(20)); if (result == 0) { /* Timeout after 20ms */ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); if (ctrl & ONENAND_CTRL_ONGO && !this->ongoing) { /* * The operation seems to be still going * so give it some more time. */ retry_cnt += 1; if (retry_cnt < 3) goto retry; intr = read_reg(c, ONENAND_REG_INTERRUPT); wait_err("timeout", state, ctrl, intr); return -EIO; } intr = read_reg(c, ONENAND_REG_INTERRUPT); if ((intr & ONENAND_INT_MASTER) == 0) wait_warn("timeout", state, ctrl, intr); } } } else { int retry_cnt = 0; /* Turn interrupts off */ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); syscfg &= ~ONENAND_SYS_CFG1_IOBE; write_reg(c, syscfg, ONENAND_REG_SYS_CFG1); timeout = jiffies + msecs_to_jiffies(20); while (1) { if (time_before(jiffies, timeout)) { intr = read_reg(c, ONENAND_REG_INTERRUPT); if (intr & ONENAND_INT_MASTER) break; } else { /* Timeout after 20ms */ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); if (ctrl & ONENAND_CTRL_ONGO) { /* * The operation seems to be still going * so give it some more time. */ retry_cnt += 1; if (retry_cnt < 3) { timeout = jiffies + msecs_to_jiffies(20); continue; } } break; } } } intr = read_reg(c, ONENAND_REG_INTERRUPT); ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); if (intr & ONENAND_INT_READ) { int ecc = read_reg(c, ONENAND_REG_ECC_STATUS); if (ecc) { unsigned int addr1, addr8; addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1); addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8); if (ecc & ONENAND_ECC_2BIT_ALL) { printk(KERN_ERR "onenand_wait: ECC error = " "0x%04x, addr1 %#x, addr8 %#x\n", ecc, addr1, addr8); mtd->ecc_stats.failed++; return -EBADMSG; } else if (ecc & ONENAND_ECC_1BIT_ALL) { printk(KERN_NOTICE "onenand_wait: correctable " "ECC error = 0x%04x, addr1 %#x, " "addr8 %#x\n", ecc, addr1, addr8); mtd->ecc_stats.corrected++; } } } else if (state == FL_READING) { wait_err("timeout", state, ctrl, intr); return -EIO; } if (ctrl & ONENAND_CTRL_ERROR) { wait_err("controller error", state, ctrl, intr); if (ctrl & ONENAND_CTRL_LOCK) printk(KERN_ERR "onenand_wait: " "Device is write protected!!!\n"); return -EIO; } ctrl_mask = 0xFE9F; if (this->ongoing) ctrl_mask &= ~0x8000; if (ctrl & ctrl_mask) wait_warn("unexpected controller status", state, ctrl, intr); return 0; } static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area) { struct onenand_chip *this = mtd->priv; if (ONENAND_CURRENT_BUFFERRAM(this)) { if (area == ONENAND_DATARAM) return this->writesize; if (area == ONENAND_SPARERAM) return mtd->oobsize; } return 0; } #if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2) static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area, unsigned char *buffer, int offset, size_t count) { struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); struct onenand_chip *this = mtd->priv; dma_addr_t dma_src, dma_dst; int bram_offset; unsigned long timeout; void *buf = (void *)buffer; size_t xtra; volatile unsigned *done; bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; if (bram_offset & 3 || (size_t)buf & 3 || count < 384) goto out_copy; /* panic_write() may be in an interrupt context */ if (in_interrupt() || oops_in_progress) goto out_copy; if (buf >= high_memory) { struct page *p1; if (((size_t)buf & PAGE_MASK) != ((size_t)(buf + count - 1) & PAGE_MASK)) goto out_copy; p1 = vmalloc_to_page(buf); if (!p1) goto out_copy; buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK); } xtra = count & 3; if (xtra) { count -= xtra; memcpy(buf + count, this->base + bram_offset + count, xtra); } dma_src = c->phys_base + bram_offset; dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE); if (dma_mapping_error(&c->pdev->dev, dma_dst)) { dev_err(&c->pdev->dev, "Couldn't DMA map a %d byte buffer\n", count); goto out_copy; } omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32, count >> 2, 1, 0, 0, 0); omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, dma_src, 0, 0); omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, dma_dst, 0, 0); INIT_COMPLETION(c->dma_done); omap_start_dma(c->dma_channel); timeout = jiffies + msecs_to_jiffies(20); done = &c->dma_done.done; while (time_before(jiffies, timeout)) if (*done) break; dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE); if (!*done) { dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); goto out_copy; } return 0; out_copy: memcpy(buf, this->base + bram_offset, count); return 0; } static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area, const unsigned char *buffer, int offset, size_t count) { struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); struct onenand_chip *this = mtd->priv; dma_addr_t dma_src, dma_dst; int bram_offset; unsigned long timeout; void *buf = (void *)buffer; volatile unsigned *done; bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; if (bram_offset & 3 || (size_t)buf & 3 || count < 384) goto out_copy; /* panic_write() may be in an interrupt context */ if (in_interrupt() || oops_in_progress) goto out_copy; if (buf >= high_memory) { struct page *p1; if (((size_t)buf & PAGE_MASK) != ((size_t)(buf + count - 1) & PAGE_MASK)) goto out_copy; p1 = vmalloc_to_page(buf); if (!p1) goto out_copy; buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK); } dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE); dma_dst = c->phys_base + bram_offset; if (dma_mapping_error(&c->pdev->dev, dma_src)) { dev_err(&c->pdev->dev, "Couldn't DMA map a %d byte buffer\n", count); return -1; } omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32, count >> 2, 1, 0, 0, 0); omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, dma_src, 0, 0); omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, dma_dst, 0, 0); INIT_COMPLETION(c->dma_done); omap_start_dma(c->dma_channel); timeout = jiffies + msecs_to_jiffies(20); done = &c->dma_done.done; while (time_before(jiffies, timeout)) if (*done) break; dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE); if (!*done) { dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); goto out_copy; } return 0; out_copy: memcpy(this->base + bram_offset, buf, count); return 0; } #else static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area, unsigned char *buffer, int offset, size_t count) { return -ENOSYS; } static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area, const unsigned char *buffer, int offset, size_t count) { return -ENOSYS; } #endif #if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2) static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, unsigned char *buffer, int offset, size_t count) { struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); struct onenand_chip *this = mtd->priv; dma_addr_t dma_src, dma_dst; int bram_offset; bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; /* DMA is not used. Revisit PM requirements before enabling it. */ if (1 || (c->dma_channel < 0) || ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) || (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) { memcpy(buffer, (__force void *)(this->base + bram_offset), count); return 0; } dma_src = c->phys_base + bram_offset; dma_dst = dma_map_single(&c->pdev->dev, buffer, count, DMA_FROM_DEVICE); if (dma_mapping_error(&c->pdev->dev, dma_dst)) { dev_err(&c->pdev->dev, "Couldn't DMA map a %d byte buffer\n", count); return -1; } omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32, count / 4, 1, 0, 0, 0); omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, dma_src, 0, 0); omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, dma_dst, 0, 0); INIT_COMPLETION(c->dma_done); omap_start_dma(c->dma_channel); wait_for_completion(&c->dma_done); dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE); return 0; } static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, const unsigned char *buffer, int offset, size_t count) { struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); struct onenand_chip *this = mtd->priv; dma_addr_t dma_src, dma_dst; int bram_offset; bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; /* DMA is not used. Revisit PM requirements before enabling it. */ if (1 || (c->dma_channel < 0) || ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) || (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) { memcpy((__force void *)(this->base + bram_offset), buffer, count); return 0; } dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count, DMA_TO_DEVICE); dma_dst = c->phys_base + bram_offset; if (dma_mapping_error(&c->pdev->dev, dma_src)) { dev_err(&c->pdev->dev, "Couldn't DMA map a %d byte buffer\n", count); return -1; } omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16, count / 2, 1, 0, 0, 0); omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, dma_src, 0, 0); omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, dma_dst, 0, 0); INIT_COMPLETION(c->dma_done); omap_start_dma(c->dma_channel); wait_for_completion(&c->dma_done); dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE); return 0; } #else static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, unsigned char *buffer, int offset, size_t count) { return -ENOSYS; } static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, const unsigned char *buffer, int offset, size_t count) { return -ENOSYS; } #endif static struct platform_driver omap2_onenand_driver; static int __adjust_timing(struct device *dev, void *data) { int ret = 0; struct omap2_onenand *c; c = dev_get_drvdata(dev); BUG_ON(c->setup == NULL); /* DMA is not in use so this is all that is needed */ /* Revisit for OMAP3! */ ret = c->setup(c->onenand.base, &c->freq); return ret; } int omap2_onenand_rephase(void) { return driver_for_each_device(&omap2_onenand_driver.driver, NULL, NULL, __adjust_timing); } static void omap2_onenand_shutdown(struct platform_device *pdev) { struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); /* With certain content in the buffer RAM, the OMAP boot ROM code * can recognize the flash chip incorrectly. Zero it out before * soft reset. */ memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE); } static int omap2_onenand_enable(struct mtd_info *mtd) { int ret; struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); ret = regulator_enable(c->regulator); if (ret != 0) dev_err(&c->pdev->dev, "can't enable regulator\n"); return ret; } static int omap2_onenand_disable(struct mtd_info *mtd) { int ret; struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); ret = regulator_disable(c->regulator); if (ret != 0) dev_err(&c->pdev->dev, "can't disable regulator\n"); return ret; } static int omap2_onenand_probe(struct platform_device *pdev) { struct omap_onenand_platform_data *pdata; struct omap2_onenand *c; struct onenand_chip *this; int r; struct resource *res; struct mtd_part_parser_data ppdata = {}; pdata = pdev->dev.platform_data; if (pdata == NULL) { dev_err(&pdev->dev, "platform data missing\n"); return -ENODEV; } c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL); if (!c) return -ENOMEM; init_completion(&c->irq_done); init_completion(&c->dma_done); c->flags = pdata->flags; c->gpmc_cs = pdata->cs; c->gpio_irq = pdata->gpio_irq; c->dma_channel = pdata->dma_channel; if (c->dma_channel < 0) { /* if -1, don't use DMA */ c->gpio_irq = 0; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { r = -EINVAL; dev_err(&pdev->dev, "error getting memory resource\n"); goto err_kfree; } c->phys_base = res->start; c->mem_size = resource_size(res); if (request_mem_region(c->phys_base, c->mem_size, pdev->dev.driver->name) == NULL) { dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n", c->phys_base, c->mem_size); r = -EBUSY; goto err_kfree; } c->onenand.base = ioremap(c->phys_base, c->mem_size); if (c->onenand.base == NULL) { r = -ENOMEM; goto err_release_mem_region; } if (pdata->onenand_setup != NULL) { r = pdata->onenand_setup(c->onenand.base, &c->freq); if (r < 0) { dev_err(&pdev->dev, "Onenand platform setup failed: " "%d\n", r); goto err_iounmap; } c->setup = pdata->onenand_setup; } if (c->gpio_irq) { if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) { dev_err(&pdev->dev, "Failed to request GPIO%d for " "OneNAND\n", c->gpio_irq); goto err_iounmap; } gpio_direction_input(c->gpio_irq); if ((r = request_irq(gpio_to_irq(c->gpio_irq), omap2_onenand_interrupt, IRQF_TRIGGER_RISING, pdev->dev.driver->name, c)) < 0) goto err_release_gpio; } if (c->dma_channel >= 0) { r = omap_request_dma(0, pdev->dev.driver->name, omap2_onenand_dma_cb, (void *) c, &c->dma_channel); if (r == 0) { omap_set_dma_write_mode(c->dma_channel, OMAP_DMA_WRITE_NON_POSTED); omap_set_dma_src_data_pack(c->dma_channel, 1); omap_set_dma_src_burst_mode(c->dma_channel, OMAP_DMA_DATA_BURST_8); omap_set_dma_dest_data_pack(c->dma_channel, 1); omap_set_dma_dest_burst_mode(c->dma_channel, OMAP_DMA_DATA_BURST_8); } else { dev_info(&pdev->dev, "failed to allocate DMA for OneNAND, " "using PIO instead\n"); c->dma_channel = -1; } } dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual " "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base, c->onenand.base, c->freq); c->pdev = pdev; c->mtd.name = dev_name(&pdev->dev); c->mtd.priv = &c->onenand; c->mtd.owner = THIS_MODULE; c->mtd.dev.parent = &pdev->dev; this = &c->onenand; if (c->dma_channel >= 0) { this->wait = omap2_onenand_wait; if (c->flags & ONENAND_IN_OMAP34XX) { this->read_bufferram = omap3_onenand_read_bufferram; this->write_bufferram = omap3_onenand_write_bufferram; } else { this->read_bufferram = omap2_onenand_read_bufferram; this->write_bufferram = omap2_onenand_write_bufferram; } } if (pdata->regulator_can_sleep) { c->regulator = regulator_get(&pdev->dev, "vonenand"); if (IS_ERR(c->regulator)) { dev_err(&pdev->dev, "Failed to get regulator\n"); r = PTR_ERR(c->regulator); goto err_release_dma; } c->onenand.enable = omap2_onenand_enable; c->onenand.disable = omap2_onenand_disable; } if (pdata->skip_initial_unlocking) this->options |= ONENAND_SKIP_INITIAL_UNLOCKING; if ((r = onenand_scan(&c->mtd, 1)) < 0) goto err_release_regulator; ppdata.of_node = pdata->of_node; r = mtd_device_parse_register(&c->mtd, NULL, &ppdata, pdata ? pdata->parts : NULL, pdata ? pdata->nr_parts : 0); if (r) goto err_release_onenand; platform_set_drvdata(pdev, c); return 0; err_release_onenand: onenand_release(&c->mtd); err_release_regulator: regulator_put(c->regulator); err_release_dma: if (c->dma_channel != -1) omap_free_dma(c->dma_channel); if (c->gpio_irq) free_irq(gpio_to_irq(c->gpio_irq), c); err_release_gpio: if (c->gpio_irq) gpio_free(c->gpio_irq); err_iounmap: iounmap(c->onenand.base); err_release_mem_region: release_mem_region(c->phys_base, c->mem_size); err_kfree: kfree(c); return r; } static int omap2_onenand_remove(struct platform_device *pdev) { struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); onenand_release(&c->mtd); regulator_put(c->regulator); if (c->dma_channel != -1) omap_free_dma(c->dma_channel); omap2_onenand_shutdown(pdev); platform_set_drvdata(pdev, NULL); if (c->gpio_irq) { free_irq(gpio_to_irq(c->gpio_irq), c); gpio_free(c->gpio_irq); } iounmap(c->onenand.base); release_mem_region(c->phys_base, c->mem_size); kfree(c); return 0; } static struct platform_driver omap2_onenand_driver = { .probe = omap2_onenand_probe, .remove = omap2_onenand_remove, .shutdown = omap2_onenand_shutdown, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; module_platform_driver(omap2_onenand_driver); MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>"); MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
gpl-2.0
klabit87/jflte_vzw_of1
lib/mpi/mpi-cmp.c
4973
1647
/* mpi-cmp.c - MPI functions * Copyright (C) 1998, 1999 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include "mpi-internal.h" int mpi_cmp_ui(MPI u, unsigned long v) { mpi_limb_t limb = v; mpi_normalize(u); if (!u->nlimbs && !limb) return 0; if (u->sign) return -1; if (u->nlimbs > 1) return 1; if (u->d[0] == limb) return 0; else if (u->d[0] > limb) return 1; else return -1; } int mpi_cmp(MPI u, MPI v) { mpi_size_t usize, vsize; int cmp; mpi_normalize(u); mpi_normalize(v); usize = u->nlimbs; vsize = v->nlimbs; if (!u->sign && v->sign) return 1; if (u->sign && !v->sign) return -1; if (usize != vsize && !u->sign && !v->sign) return usize - vsize; if (usize != vsize && u->sign && v->sign) return vsize + usize; if (!usize) return 0; cmp = mpihelp_cmp(u->d, v->d, usize); if (!cmp) return 0; if ((cmp < 0 ? 1 : 0) == (u->sign ? 1 : 0)) return 1; return -1; }
gpl-2.0
jcadduono/nethunter_kernel_klte
drivers/net/wireless/ath/ath9k/eeprom_4k.c
4973
33600
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <asm/unaligned.h> #include "hw.h" #include "ar9002_phy.h" static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah) { return ((ah->eeprom.map4k.baseEepHeader.version >> 12) & 0xF); } static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah) { return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF); } #define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u16 *eep_data = (u16 *)&ah->eeprom.map4k; int addr, eep_start_loc = 64; for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { ath_dbg(common, EEPROM, "Unable to read eeprom region\n"); return false; } eep_data++; } return true; } static bool __ath9k_hw_usb_4k_fill_eeprom(struct ath_hw *ah) { u16 *eep_data = (u16 *)&ah->eeprom.map4k; ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, 64, SIZE_EEPROM_4K); return true; } static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); if (!ath9k_hw_use_flash(ah)) { ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n"); } if (common->bus_ops->ath_bus_type == ATH_USB) return __ath9k_hw_usb_4k_fill_eeprom(ah); else return __ath9k_hw_4k_fill_eeprom(ah); } #if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS) static u32 ath9k_dump_4k_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_4k_header *modal_hdr) { PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]); PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon); PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]); PR_EEP("Switch Settle", modal_hdr->switchSettling); PR_EEP("Chain0 TxRxAtten", modal_hdr->txRxAttenCh[0]); PR_EEP("Chain0 RxTxMargin", modal_hdr->rxTxMarginCh[0]); PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize); PR_EEP("PGA Desired size", modal_hdr->pgaDesiredSize); PR_EEP("Chain0 xlna Gain", modal_hdr->xlnaGainCh[0]); PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff); PR_EEP("txEndToRxOn", modal_hdr->txEndToRxOn); PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn); PR_EEP("CCA Threshold)", modal_hdr->thresh62); PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]); PR_EEP("xpdGain", modal_hdr->xpdGain); PR_EEP("External PD", modal_hdr->xpd); PR_EEP("Chain0 I Coefficient", modal_hdr->iqCalICh[0]); PR_EEP("Chain0 Q Coefficient", modal_hdr->iqCalQCh[0]); PR_EEP("pdGainOverlap", modal_hdr->pdGainOverlap); PR_EEP("O/D Bias Version", modal_hdr->version); PR_EEP("CCK OutputBias", modal_hdr->ob_0); PR_EEP("BPSK OutputBias", modal_hdr->ob_1); PR_EEP("QPSK OutputBias", modal_hdr->ob_2); PR_EEP("16QAM OutputBias", modal_hdr->ob_3); PR_EEP("64QAM OutputBias", modal_hdr->ob_4); PR_EEP("CCK Driver1_Bias", modal_hdr->db1_0); PR_EEP("BPSK Driver1_Bias", modal_hdr->db1_1); PR_EEP("QPSK Driver1_Bias", modal_hdr->db1_2); PR_EEP("16QAM Driver1_Bias", modal_hdr->db1_3); PR_EEP("64QAM Driver1_Bias", modal_hdr->db1_4); PR_EEP("CCK Driver2_Bias", modal_hdr->db2_0); PR_EEP("BPSK Driver2_Bias", modal_hdr->db2_1); PR_EEP("QPSK Driver2_Bias", modal_hdr->db2_2); PR_EEP("16QAM Driver2_Bias", modal_hdr->db2_3); PR_EEP("64QAM Driver2_Bias", modal_hdr->db2_4); PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl); PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart); PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn); PR_EEP("HT40 Power Inc.", modal_hdr->ht40PowerIncForPdadc); PR_EEP("Chain0 bswAtten", modal_hdr->bswAtten[0]); PR_EEP("Chain0 bswMargin", modal_hdr->bswMargin[0]); PR_EEP("HT40 Switch Settle", modal_hdr->swSettleHt40); PR_EEP("Chain0 xatten2Db", modal_hdr->xatten2Db[0]); PR_EEP("Chain0 xatten2Margin", modal_hdr->xatten2Margin[0]); PR_EEP("Ant. Diversity ctl1", modal_hdr->antdiv_ctl1); PR_EEP("Ant. Diversity ctl2", modal_hdr->antdiv_ctl2); PR_EEP("TX Diversity", modal_hdr->tx_diversity); return len; } static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, u8 *buf, u32 len, u32 size) { struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct base_eep_header_4k *pBase = &eep->baseEepHeader; if (!dump_base_hdr) { len += snprintf(buf + len, size - len, "%20s :\n", "2GHz modal Header"); len += ath9k_dump_4k_modal_eeprom(buf, len, size, &eep->modalHeader); goto out; } PR_EEP("Major Version", pBase->version >> 12); PR_EEP("Minor Version", pBase->version & 0xFFF); PR_EEP("Checksum", pBase->checksum); PR_EEP("Length", pBase->length); PR_EEP("RegDomain1", pBase->regDmn[0]); PR_EEP("RegDomain2", pBase->regDmn[1]); PR_EEP("TX Mask", pBase->txMask); PR_EEP("RX Mask", pBase->rxMask); PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A)); PR_EEP("Allow 2GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11G)); PR_EEP("Disable 2GHz HT20", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20)); PR_EEP("Disable 2GHz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40)); PR_EEP("Disable 5Ghz HT20", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20)); PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40)); PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01)); PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF); PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF); PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF); PR_EEP("TX Gain type", pBase->txGainType); len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress", pBase->macAddr); out: if (len > size) len = size; return len; } #else static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, u8 *buf, u32 len, u32 size) { return 0; } #endif #undef SIZE_EEPROM_4K static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) { #define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) struct ath_common *common = ath9k_hw_common(ah); struct ar5416_eeprom_4k *eep = (struct ar5416_eeprom_4k *) &ah->eeprom.map4k; u16 *eepdata, temp, magic, magic2; u32 sum = 0, el; bool need_swap = false; int i, addr; if (!ath9k_hw_use_flash(ah)) { if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { ath_err(common, "Reading Magic # failed\n"); return false; } ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic); if (magic != AR5416_EEPROM_MAGIC) { magic2 = swab16(magic); if (magic2 == AR5416_EEPROM_MAGIC) { need_swap = true; eepdata = (u16 *) (&ah->eeprom); for (addr = 0; addr < EEPROM_4K_SIZE; addr++) { temp = swab16(*eepdata); *eepdata = temp; eepdata++; } } else { ath_err(common, "Invalid EEPROM Magic. Endianness mismatch.\n"); return -EINVAL; } } } ath_dbg(common, EEPROM, "need_swap = %s\n", need_swap ? "True" : "False"); if (need_swap) el = swab16(ah->eeprom.map4k.baseEepHeader.length); else el = ah->eeprom.map4k.baseEepHeader.length; if (el > sizeof(struct ar5416_eeprom_4k)) el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16); else el = el / sizeof(u16); eepdata = (u16 *)(&ah->eeprom); for (i = 0; i < el; i++) sum ^= *eepdata++; if (need_swap) { u32 integer; u16 word; ath_dbg(common, EEPROM, "EEPROM Endianness is not native.. Changing\n"); word = swab16(eep->baseEepHeader.length); eep->baseEepHeader.length = word; word = swab16(eep->baseEepHeader.checksum); eep->baseEepHeader.checksum = word; word = swab16(eep->baseEepHeader.version); eep->baseEepHeader.version = word; word = swab16(eep->baseEepHeader.regDmn[0]); eep->baseEepHeader.regDmn[0] = word; word = swab16(eep->baseEepHeader.regDmn[1]); eep->baseEepHeader.regDmn[1] = word; word = swab16(eep->baseEepHeader.rfSilent); eep->baseEepHeader.rfSilent = word; word = swab16(eep->baseEepHeader.blueToothOptions); eep->baseEepHeader.blueToothOptions = word; word = swab16(eep->baseEepHeader.deviceCap); eep->baseEepHeader.deviceCap = word; integer = swab32(eep->modalHeader.antCtrlCommon); eep->modalHeader.antCtrlCommon = integer; for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { integer = swab32(eep->modalHeader.antCtrlChain[i]); eep->modalHeader.antCtrlChain[i] = integer; } for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { word = swab16(eep->modalHeader.spurChans[i].spurChan); eep->modalHeader.spurChans[i].spurChan = word; } } if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n", sum, ah->eep_ops->get_eeprom_ver(ah)); return -EINVAL; } return 0; #undef EEPROM_4K_SIZE } static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah, enum eeprom_param param) { struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct modal_eep_4k_header *pModal = &eep->modalHeader; struct base_eep_header_4k *pBase = &eep->baseEepHeader; u16 ver_minor; ver_minor = pBase->version & AR5416_EEP_VER_MINOR_MASK; switch (param) { case EEP_NFTHRESH_2: return pModal->noiseFloorThreshCh[0]; case EEP_MAC_LSW: return get_unaligned_be16(pBase->macAddr); case EEP_MAC_MID: return get_unaligned_be16(pBase->macAddr + 2); case EEP_MAC_MSW: return get_unaligned_be16(pBase->macAddr + 4); case EEP_REG_0: return pBase->regDmn[0]; case EEP_OP_CAP: return pBase->deviceCap; case EEP_OP_MODE: return pBase->opCapFlags; case EEP_RF_SILENT: return pBase->rfSilent; case EEP_OB_2: return pModal->ob_0; case EEP_DB_2: return pModal->db1_1; case EEP_MINOR_REV: return ver_minor; case EEP_TX_MASK: return pBase->txMask; case EEP_RX_MASK: return pBase->rxMask; case EEP_FRAC_N_5G: return 0; case EEP_PWR_TABLE_OFFSET: return AR5416_PWR_TABLE_OFFSET_DB; case EEP_MODAL_VER: return pModal->version; case EEP_ANT_DIV_CTL1: return pModal->antdiv_ctl1; case EEP_TXGAIN_TYPE: return pBase->txGainType; case EEP_ANTENNA_GAIN_2G: return pModal->antennaGainCh[0]; default: return 0; } } static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; struct cal_data_per_freq_4k *pRawDataset; u8 *pCalBChans = NULL; u16 pdGainOverlap_t2; static u8 pdadcValues[AR5416_NUM_PDADC_VALUES]; u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK]; u16 numPiers, i, j; u16 numXpdGain, xpdMask; u16 xpdGainValues[AR5416_EEP4K_NUM_PD_GAINS] = { 0, 0 }; u32 reg32, regOffset, regChainOffset; xpdMask = pEepData->modalHeader.xpdGain; if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_2) { pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap; } else { pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5), AR_PHY_TPCRG5_PD_GAIN_OVERLAP)); } pCalBChans = pEepData->calFreqPier2G; numPiers = AR5416_EEP4K_NUM_2G_CAL_PIERS; numXpdGain = 0; for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) { if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) { if (numXpdGain >= AR5416_EEP4K_NUM_PD_GAINS) break; xpdGainValues[numXpdGain] = (u16)(AR5416_PD_GAINS_IN_MASK - i); numXpdGain++; } } REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN, (numXpdGain - 1) & 0x3); REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1, xpdGainValues[0]); REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2, xpdGainValues[1]); REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, 0); for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { regChainOffset = i * 0x1000; if (pEepData->baseEepHeader.txMask & (1 << i)) { pRawDataset = pEepData->calPierData2G[i]; ath9k_hw_get_gain_boundaries_pdadcs(ah, chan, pRawDataset, pCalBChans, numPiers, pdGainOverlap_t2, gainBoundaries, pdadcValues, numXpdGain); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset, SM(pdGainOverlap_t2, AR_PHY_TPCRG5_PD_GAIN_OVERLAP) | SM(gainBoundaries[0], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1) | SM(gainBoundaries[1], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2) | SM(gainBoundaries[2], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3) | SM(gainBoundaries[3], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4)); regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset; for (j = 0; j < 32; j++) { reg32 = get_unaligned_le32(&pdadcValues[4 * j]); REG_WRITE(ah, regOffset, reg32); ath_dbg(common, EEPROM, "PDADC (%d,%4x): %4.4x %8.8x\n", i, regChainOffset, regOffset, reg32); ath_dbg(common, EEPROM, "PDADC: Chain %d | " "PDADC %3d Value %3d | " "PDADC %3d Value %3d | " "PDADC %3d Value %3d | " "PDADC %3d Value %3d |\n", i, 4 * j, pdadcValues[4 * j], 4 * j + 1, pdadcValues[4 * j + 1], 4 * j + 2, pdadcValues[4 * j + 2], 4 * j + 3, pdadcValues[4 * j + 3]); regOffset += 4; } REGWRITE_BUFFER_FLUSH(ah); } } } static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah, struct ath9k_channel *chan, int16_t *ratesArray, u16 cfgCtl, u16 antenna_reduction, u16 powerLimit) { #define CMP_TEST_GRP \ (((cfgCtl & ~CTL_MODE_M)| (pCtlMode[ctlMode] & CTL_MODE_M)) == \ pEepData->ctlIndex[i]) \ || (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \ ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL)) int i; u16 twiceMinEdgePower; u16 twiceMaxEdgePower; u16 scaledPower = 0, minCtlPower; u16 numCtlModes; const u16 *pCtlMode; u16 ctlMode, freq; struct chan_centers centers; struct cal_ctl_data_4k *rep; struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; struct cal_target_power_leg targetPowerOfdm, targetPowerCck = { 0, { 0, 0, 0, 0} }; struct cal_target_power_leg targetPowerOfdmExt = { 0, { 0, 0, 0, 0} }, targetPowerCckExt = { 0, { 0, 0, 0, 0 } }; struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = { 0, {0, 0, 0, 0} }; static const u16 ctlModesFor11g[] = { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40 }; ath9k_hw_get_channel_centers(ah, chan, &centers); scaledPower = powerLimit - antenna_reduction; numCtlModes = ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40; pCtlMode = ctlModesFor11g; ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPowerCck, AR5416_NUM_2G_CCK_TARGET_POWERS, &targetPowerCck, 4, false); ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPower2G, AR5416_NUM_2G_20_TARGET_POWERS, &targetPowerOfdm, 4, false); ath9k_hw_get_target_powers(ah, chan, pEepData->calTargetPower2GHT20, AR5416_NUM_2G_20_TARGET_POWERS, &targetPowerHt20, 8, false); if (IS_CHAN_HT40(chan)) { numCtlModes = ARRAY_SIZE(ctlModesFor11g); ath9k_hw_get_target_powers(ah, chan, pEepData->calTargetPower2GHT40, AR5416_NUM_2G_40_TARGET_POWERS, &targetPowerHt40, 8, true); ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPowerCck, AR5416_NUM_2G_CCK_TARGET_POWERS, &targetPowerCckExt, 4, true); ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPower2G, AR5416_NUM_2G_20_TARGET_POWERS, &targetPowerOfdmExt, 4, true); } for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) || (pCtlMode[ctlMode] == CTL_2GHT40); if (isHt40CtlMode) freq = centers.synth_center; else if (pCtlMode[ctlMode] & EXT_ADDITIVE) freq = centers.ext_center; else freq = centers.ctl_center; twiceMaxEdgePower = MAX_RATE_POWER; for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { if (CMP_TEST_GRP) { rep = &(pEepData->ctlData[i]); twiceMinEdgePower = ath9k_hw_get_max_edge_power( freq, rep->ctlEdges[ ar5416_get_ntxchains(ah->txchainmask) - 1], IS_CHAN_2GHZ(chan), AR5416_EEP4K_NUM_BAND_EDGES); if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) { twiceMaxEdgePower = min(twiceMaxEdgePower, twiceMinEdgePower); } else { twiceMaxEdgePower = twiceMinEdgePower; break; } } } minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); switch (pCtlMode[ctlMode]) { case CTL_11B: for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) { targetPowerCck.tPow2x[i] = min((u16)targetPowerCck.tPow2x[i], minCtlPower); } break; case CTL_11G: for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) { targetPowerOfdm.tPow2x[i] = min((u16)targetPowerOfdm.tPow2x[i], minCtlPower); } break; case CTL_2GHT20: for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) { targetPowerHt20.tPow2x[i] = min((u16)targetPowerHt20.tPow2x[i], minCtlPower); } break; case CTL_11B_EXT: targetPowerCckExt.tPow2x[0] = min((u16)targetPowerCckExt.tPow2x[0], minCtlPower); break; case CTL_11G_EXT: targetPowerOfdmExt.tPow2x[0] = min((u16)targetPowerOfdmExt.tPow2x[0], minCtlPower); break; case CTL_2GHT40: for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { targetPowerHt40.tPow2x[i] = min((u16)targetPowerHt40.tPow2x[i], minCtlPower); } break; default: break; } } ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] = ratesArray[rate18mb] = ratesArray[rate24mb] = targetPowerOfdm.tPow2x[0]; ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1]; ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2]; ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3]; ratesArray[rateXr] = targetPowerOfdm.tPow2x[0]; for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i]; ratesArray[rate1l] = targetPowerCck.tPow2x[0]; ratesArray[rate2s] = ratesArray[rate2l] = targetPowerCck.tPow2x[1]; ratesArray[rate5_5s] = ratesArray[rate5_5l] = targetPowerCck.tPow2x[2]; ratesArray[rate11s] = ratesArray[rate11l] = targetPowerCck.tPow2x[3]; if (IS_CHAN_HT40(chan)) { for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { ratesArray[rateHt40_0 + i] = targetPowerHt40.tPow2x[i]; } ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0]; ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0]; ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0]; ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0]; } #undef CMP_TEST_GRP } static void ath9k_hw_4k_set_txpower(struct ath_hw *ah, struct ath9k_channel *chan, u16 cfgCtl, u8 twiceAntennaReduction, u8 powerLimit, bool test) { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; struct modal_eep_4k_header *pModal = &pEepData->modalHeader; int16_t ratesArray[Ar5416RateSize]; u8 ht40PowerIncForPdadc = 2; int i; memset(ratesArray, 0, sizeof(ratesArray)); if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_2) { ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; } ath9k_hw_set_4k_power_per_rate_table(ah, chan, &ratesArray[0], cfgCtl, twiceAntennaReduction, powerLimit); ath9k_hw_set_4k_power_cal_table(ah, chan); regulatory->max_power_level = 0; for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { if (ratesArray[i] > MAX_RATE_POWER) ratesArray[i] = MAX_RATE_POWER; if (ratesArray[i] > regulatory->max_power_level) regulatory->max_power_level = ratesArray[i]; } if (test) return; for (i = 0; i < Ar5416RateSize; i++) ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2; ENABLE_REGWRITE_BUFFER(ah); /* OFDM power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, ATH9K_POW_SM(ratesArray[rate18mb], 24) | ATH9K_POW_SM(ratesArray[rate12mb], 16) | ATH9K_POW_SM(ratesArray[rate9mb], 8) | ATH9K_POW_SM(ratesArray[rate6mb], 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE2, ATH9K_POW_SM(ratesArray[rate54mb], 24) | ATH9K_POW_SM(ratesArray[rate48mb], 16) | ATH9K_POW_SM(ratesArray[rate36mb], 8) | ATH9K_POW_SM(ratesArray[rate24mb], 0)); /* CCK power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE3, ATH9K_POW_SM(ratesArray[rate2s], 24) | ATH9K_POW_SM(ratesArray[rate2l], 16) | ATH9K_POW_SM(ratesArray[rateXr], 8) | ATH9K_POW_SM(ratesArray[rate1l], 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE4, ATH9K_POW_SM(ratesArray[rate11s], 24) | ATH9K_POW_SM(ratesArray[rate11l], 16) | ATH9K_POW_SM(ratesArray[rate5_5s], 8) | ATH9K_POW_SM(ratesArray[rate5_5l], 0)); /* HT20 power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE5, ATH9K_POW_SM(ratesArray[rateHt20_3], 24) | ATH9K_POW_SM(ratesArray[rateHt20_2], 16) | ATH9K_POW_SM(ratesArray[rateHt20_1], 8) | ATH9K_POW_SM(ratesArray[rateHt20_0], 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE6, ATH9K_POW_SM(ratesArray[rateHt20_7], 24) | ATH9K_POW_SM(ratesArray[rateHt20_6], 16) | ATH9K_POW_SM(ratesArray[rateHt20_5], 8) | ATH9K_POW_SM(ratesArray[rateHt20_4], 0)); /* HT40 power per rate */ if (IS_CHAN_HT40(chan)) { REG_WRITE(ah, AR_PHY_POWER_TX_RATE7, ATH9K_POW_SM(ratesArray[rateHt40_3] + ht40PowerIncForPdadc, 24) | ATH9K_POW_SM(ratesArray[rateHt40_2] + ht40PowerIncForPdadc, 16) | ATH9K_POW_SM(ratesArray[rateHt40_1] + ht40PowerIncForPdadc, 8) | ATH9K_POW_SM(ratesArray[rateHt40_0] + ht40PowerIncForPdadc, 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE8, ATH9K_POW_SM(ratesArray[rateHt40_7] + ht40PowerIncForPdadc, 24) | ATH9K_POW_SM(ratesArray[rateHt40_6] + ht40PowerIncForPdadc, 16) | ATH9K_POW_SM(ratesArray[rateHt40_5] + ht40PowerIncForPdadc, 8) | ATH9K_POW_SM(ratesArray[rateHt40_4] + ht40PowerIncForPdadc, 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE9, ATH9K_POW_SM(ratesArray[rateExtOfdm], 24) | ATH9K_POW_SM(ratesArray[rateExtCck], 16) | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); } REGWRITE_BUFFER_FLUSH(ah); } static void ath9k_hw_4k_set_gain(struct ath_hw *ah, struct modal_eep_4k_header *pModal, struct ar5416_eeprom_4k *eep, u8 txRxAttenLocal) { REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0, pModal->antCtrlChain[0]); REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), (REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) & ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_3) { txRxAttenLocal = pModal->txRxAttenCh[0]; REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, pModal->xatten2Margin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]); /* Set the block 1 value to block 0 value */ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, pModal->xatten2Margin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]); } REG_RMW_FIELD(ah, AR_PHY_RXGAIN, AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); REG_RMW_FIELD(ah, AR_PHY_RXGAIN, AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000, AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000, AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); } /* * Read EEPROM header info and program the device for correct operation * given the channel value. */ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { struct modal_eep_4k_header *pModal; struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct base_eep_header_4k *pBase = &eep->baseEepHeader; u8 txRxAttenLocal; u8 ob[5], db1[5], db2[5]; u8 ant_div_control1, ant_div_control2; u8 bb_desired_scale; u32 regVal; pModal = &eep->modalHeader; txRxAttenLocal = 23; REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon); /* Single chain for 4K EEPROM*/ ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal); /* Initialize Ant Diversity settings from EEPROM */ if (pModal->version >= 3) { ant_div_control1 = pModal->antdiv_ctl1; ant_div_control2 = pModal->antdiv_ctl2; regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); regVal &= (~(AR_PHY_9285_ANT_DIV_CTL_ALL)); regVal |= SM(ant_div_control1, AR_PHY_9285_ANT_DIV_CTL); regVal |= SM(ant_div_control2, AR_PHY_9285_ANT_DIV_ALT_LNACONF); regVal |= SM((ant_div_control2 >> 2), AR_PHY_9285_ANT_DIV_MAIN_LNACONF); regVal |= SM((ant_div_control1 >> 1), AR_PHY_9285_ANT_DIV_ALT_GAINTB); regVal |= SM((ant_div_control1 >> 2), AR_PHY_9285_ANT_DIV_MAIN_GAINTB); REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regVal); regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); regVal = REG_READ(ah, AR_PHY_CCK_DETECT); regVal &= (~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); regVal |= SM((ant_div_control1 >> 3), AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); REG_WRITE(ah, AR_PHY_CCK_DETECT, regVal); regVal = REG_READ(ah, AR_PHY_CCK_DETECT); } if (pModal->version >= 2) { ob[0] = pModal->ob_0; ob[1] = pModal->ob_1; ob[2] = pModal->ob_2; ob[3] = pModal->ob_3; ob[4] = pModal->ob_4; db1[0] = pModal->db1_0; db1[1] = pModal->db1_1; db1[2] = pModal->db1_2; db1[3] = pModal->db1_3; db1[4] = pModal->db1_4; db2[0] = pModal->db2_0; db2[1] = pModal->db2_1; db2[2] = pModal->db2_2; db2[3] = pModal->db2_3; db2[4] = pModal->db2_4; } else if (pModal->version == 1) { ob[0] = pModal->ob_0; ob[1] = ob[2] = ob[3] = ob[4] = pModal->ob_1; db1[0] = pModal->db1_0; db1[1] = db1[2] = db1[3] = db1[4] = pModal->db1_1; db2[0] = pModal->db2_0; db2[1] = db2[2] = db2[3] = db2[4] = pModal->db2_1; } else { int i; for (i = 0; i < 5; i++) { ob[i] = pModal->ob_0; db1[i] = pModal->db1_0; db2[i] = pModal->db1_0; } } if (AR_SREV_9271(ah)) { ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_OB_cck, AR9271_AN_RF2G3_OB_cck_S, ob[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_OB_psk, AR9271_AN_RF2G3_OB_psk_S, ob[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_OB_qam, AR9271_AN_RF2G3_OB_qam_S, ob[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_DB_1, AR9271_AN_RF2G3_DB_1_S, db1[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9271_AN_RF2G4_DB_2, AR9271_AN_RF2G4_DB_2_S, db2[0]); } else { ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_0, AR9285_AN_RF2G3_OB_0_S, ob[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_1, AR9285_AN_RF2G3_OB_1_S, ob[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_2, AR9285_AN_RF2G3_OB_2_S, ob[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_3, AR9285_AN_RF2G3_OB_3_S, ob[3]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_4, AR9285_AN_RF2G3_OB_4_S, ob[4]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_DB1_0, AR9285_AN_RF2G3_DB1_0_S, db1[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_DB1_1, AR9285_AN_RF2G3_DB1_1_S, db1[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_DB1_2, AR9285_AN_RF2G3_DB1_2_S, db1[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB1_3, AR9285_AN_RF2G4_DB1_3_S, db1[3]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB1_4, AR9285_AN_RF2G4_DB1_4_S, db1[4]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_0, AR9285_AN_RF2G4_DB2_0_S, db2[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_1, AR9285_AN_RF2G4_DB2_1_S, db2[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_2, AR9285_AN_RF2G4_DB2_2_S, db2[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_3, AR9285_AN_RF2G4_DB2_3_S, db2[3]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_4, AR9285_AN_RF2G4_DB2_4_S, db2[4]); } REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, pModal->switchSettling); REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC, pModal->adcDesiredSize); REG_WRITE(ah, AR_PHY_RF_CTL4, SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) | SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) | SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) | SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON)); REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, pModal->txEndToRxOn); if (AR_SREV_9271_10(ah)) REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, pModal->txEndToRxOn); REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62, pModal->thresh62); REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62, pModal->thresh62); if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_2) { REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart); REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON, pModal->txFrameToPaOn); } if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_3) { if (IS_CHAN_HT40(chan)) REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, pModal->swSettleHt40); } bb_desired_scale = (pModal->bb_scale_smrt_antenna & EEP_4K_BB_DESIRED_SCALE_MASK); if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) { u32 pwrctrl, mask, clr; mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25); pwrctrl = mask * bb_desired_scale; clr = mask * 0x1f; REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr); REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr); REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr); mask = BIT(0)|BIT(5)|BIT(15); pwrctrl = mask * bb_desired_scale; clr = mask * 0x1f; REG_RMW(ah, AR_PHY_TX_PWRCTRL9, pwrctrl, clr); mask = BIT(0)|BIT(5); pwrctrl = mask * bb_desired_scale; clr = mask * 0x1f; REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr); REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr); } } static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) { #define EEP_MAP4K_SPURCHAN \ (ah->eeprom.map4k.modalHeader.spurChans[i].spurChan) struct ath_common *common = ath9k_hw_common(ah); u16 spur_val = AR_NO_SPUR; ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n", i, is2GHz, ah->config.spurchans[i][is2GHz]); switch (ah->config.spurmode) { case SPUR_DISABLE: break; case SPUR_ENABLE_IOCTL: spur_val = ah->config.spurchans[i][is2GHz]; ath_dbg(common, ANI, "Getting spur val from new loc. %d\n", spur_val); break; case SPUR_ENABLE_EEPROM: spur_val = EEP_MAP4K_SPURCHAN; break; } return spur_val; #undef EEP_MAP4K_SPURCHAN } const struct eeprom_ops eep_4k_ops = { .check_eeprom = ath9k_hw_4k_check_eeprom, .get_eeprom = ath9k_hw_4k_get_eeprom, .fill_eeprom = ath9k_hw_4k_fill_eeprom, .dump_eeprom = ath9k_hw_4k_dump_eeprom, .get_eeprom_ver = ath9k_hw_4k_get_eeprom_ver, .get_eeprom_rev = ath9k_hw_4k_get_eeprom_rev, .set_board_values = ath9k_hw_4k_set_board_values, .set_txpower = ath9k_hw_4k_set_txpower, .get_spur_channel = ath9k_hw_4k_get_spur_channel };
gpl-2.0
vDorst/linux
drivers/staging/vt6656/usbpipe.c
5997
20616
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: usbpipe.c * * Purpose: Handle USB control endpoint * * Author: Warren Hsu * * Date: Mar. 29, 2005 * * Functions: * CONTROLnsRequestOut - Write variable length bytes to MEM/BB/MAC/EEPROM * CONTROLnsRequestIn - Read variable length bytes from MEM/BB/MAC/EEPROM * ControlvWriteByte - Write one byte to MEM/BB/MAC/EEPROM * ControlvReadByte - Read one byte from MEM/BB/MAC/EEPROM * ControlvMaskByte - Read one byte from MEM/BB/MAC/EEPROM and clear/set some bits in the same address * * Revision History: * 04-05-2004 Jerry Chen: Initial release * 11-24-2004 Warren Hsu: Add ControlvWriteByte,ControlvReadByte,ControlvMaskByte * */ #include "int.h" #include "rxtx.h" #include "dpc.h" #include "control.h" #include "desc.h" #include "device.h" /*--------------------- Static Definitions -------------------------*/ //endpoint def //endpoint 0: control //endpoint 1: interrupt //endpoint 2: read bulk //endpoint 3: write bulk //RequestType: //#define REQUEST_OUT (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE) // 0x40 //#define REQUEST_IN (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE ) //0xc0 //static int msglevel =MSG_LEVEL_DEBUG; static int msglevel =MSG_LEVEL_INFO; #define USB_CTL_WAIT 500 //ms #ifndef URB_ASYNC_UNLINK #define URB_ASYNC_UNLINK 0 #endif /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ static void s_nsInterruptUsbIoCompleteRead( struct urb *urb ); static void s_nsBulkInUsbIoCompleteRead( struct urb *urb ); static void s_nsBulkOutIoCompleteWrite( struct urb *urb ); static void s_nsControlInUsbIoCompleteRead( struct urb *urb ); static void s_nsControlInUsbIoCompleteWrite( struct urb *urb ); /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ int PIPEnsControlOutAsyn( PSDevice pDevice, BYTE byRequest, WORD wValue, WORD wIndex, WORD wLength, PBYTE pbyBuffer ) { int ntStatus; if (pDevice->Flags & fMP_DISCONNECTED) return STATUS_FAILURE; if (pDevice->Flags & fMP_CONTROL_WRITES) return STATUS_FAILURE; if (in_interrupt()) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"in_interrupt return ..byRequest %x\n", byRequest); return STATUS_FAILURE; } ntStatus = usb_control_msg( pDevice->usb, usb_sndctrlpipe(pDevice->usb , 0), byRequest, 0x40, // RequestType wValue, wIndex, (void *) pbyBuffer, wLength, HZ ); if (ntStatus >= 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"usb_sndctrlpipe ntStatus= %d\n", ntStatus); ntStatus = 0; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"usb_sndctrlpipe fail, ntStatus= %d\n", ntStatus); } return ntStatus; } int PIPEnsControlOut( PSDevice pDevice, BYTE byRequest, WORD wValue, WORD wIndex, WORD wLength, PBYTE pbyBuffer ) { int ntStatus = 0; int ii; if (pDevice->Flags & fMP_DISCONNECTED) return STATUS_FAILURE; if (pDevice->Flags & fMP_CONTROL_WRITES) return STATUS_FAILURE; pDevice->sUsbCtlRequest.bRequestType = 0x40; pDevice->sUsbCtlRequest.bRequest = byRequest; pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue); pDevice->sUsbCtlRequest.wIndex = cpu_to_le16p(&wIndex); pDevice->sUsbCtlRequest.wLength = cpu_to_le16p(&wLength); pDevice->pControlURB->transfer_flags |= URB_ASYNC_UNLINK; pDevice->pControlURB->actual_length = 0; // Notice, pbyBuffer limited point to variable buffer, can't be constant. usb_fill_control_urb(pDevice->pControlURB, pDevice->usb, usb_sndctrlpipe(pDevice->usb , 0), (char *) &pDevice->sUsbCtlRequest, pbyBuffer, wLength, s_nsControlInUsbIoCompleteWrite, pDevice); ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control send request submission failed: %d\n", ntStatus); return STATUS_FAILURE; } else { MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES); } spin_unlock_irq(&pDevice->lock); for (ii = 0; ii <= USB_CTL_WAIT; ii ++) { if (pDevice->Flags & fMP_CONTROL_WRITES) mdelay(1); else break; if (ii >= USB_CTL_WAIT) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "control send request submission timeout\n"); spin_lock_irq(&pDevice->lock); MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES); return STATUS_FAILURE; } } spin_lock_irq(&pDevice->lock); return STATUS_SUCCESS; } int PIPEnsControlIn( PSDevice pDevice, BYTE byRequest, WORD wValue, WORD wIndex, WORD wLength, PBYTE pbyBuffer ) { int ntStatus = 0; int ii; if (pDevice->Flags & fMP_DISCONNECTED) return STATUS_FAILURE; if (pDevice->Flags & fMP_CONTROL_READS) return STATUS_FAILURE; pDevice->sUsbCtlRequest.bRequestType = 0xC0; pDevice->sUsbCtlRequest.bRequest = byRequest; pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue); pDevice->sUsbCtlRequest.wIndex = cpu_to_le16p(&wIndex); pDevice->sUsbCtlRequest.wLength = cpu_to_le16p(&wLength); pDevice->pControlURB->transfer_flags |= URB_ASYNC_UNLINK; pDevice->pControlURB->actual_length = 0; usb_fill_control_urb(pDevice->pControlURB, pDevice->usb, usb_rcvctrlpipe(pDevice->usb , 0), (char *) &pDevice->sUsbCtlRequest, pbyBuffer, wLength, s_nsControlInUsbIoCompleteRead, pDevice); ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control request submission failed: %d\n", ntStatus); }else { MP_SET_FLAG(pDevice, fMP_CONTROL_READS); } spin_unlock_irq(&pDevice->lock); for (ii = 0; ii <= USB_CTL_WAIT; ii ++) { if (pDevice->Flags & fMP_CONTROL_READS) mdelay(1); else break; if (ii >= USB_CTL_WAIT) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "control rcv request submission timeout\n"); spin_lock_irq(&pDevice->lock); MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS); return STATUS_FAILURE; } } spin_lock_irq(&pDevice->lock); return ntStatus; } static void s_nsControlInUsbIoCompleteWrite( struct urb *urb ) { PSDevice pDevice; pDevice = urb->context; switch (urb->status) { case 0: break; case -EINPROGRESS: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl write urb status EINPROGRESS%d\n", urb->status); break; case -ENOENT: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl write urb status ENOENT %d\n", urb->status); break; default: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl write urb status %d\n", urb->status); } MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES); } /* * Description: * Complete function of usb Control callback * * Parameters: * In: * pDevice - Pointer to the adapter * * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ static void s_nsControlInUsbIoCompleteRead( struct urb *urb ) { PSDevice pDevice; pDevice = urb->context; switch (urb->status) { case 0: break; case -EINPROGRESS: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl read urb status EINPROGRESS%d\n", urb->status); break; case -ENOENT: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl read urb status = ENOENT %d\n", urb->status); break; default: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl read urb status %d\n", urb->status); } MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS); } /* * Description: * Allocates an usb interrupt in irp and calls USBD. * * Parameters: * In: * pDevice - Pointer to the adapter * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ int PIPEnsInterruptRead(PSDevice pDevice) { int ntStatus = STATUS_FAILURE; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartInterruptUsbRead()\n"); if(pDevice->intBuf.bInUse == TRUE){ return (STATUS_FAILURE); } pDevice->intBuf.bInUse = TRUE; // pDevice->bEventAvailable = FALSE; pDevice->ulIntInPosted++; // // Now that we have created the urb, we will send a // request to the USB device object. // pDevice->pInterruptURB->interval = pDevice->int_interval; usb_fill_bulk_urb(pDevice->pInterruptURB, pDevice->usb, usb_rcvbulkpipe(pDevice->usb, 1), (void *) pDevice->intBuf.pDataBuf, MAX_INTERRUPT_SIZE, s_nsInterruptUsbIoCompleteRead, pDevice); ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit int URB failed %d\n", ntStatus); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----s_nsStartInterruptUsbRead Return(%x)\n",ntStatus); return ntStatus; } /* * Description: * Complete function of usb interrupt in irp. * * Parameters: * In: * pDevice - Pointer to the adapter * * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ static void s_nsInterruptUsbIoCompleteRead( struct urb *urb ) { PSDevice pDevice; int ntStatus; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsInterruptUsbIoCompleteRead\n"); // // The context given to IoSetCompletionRoutine is the receive buffer object // pDevice = (PSDevice)urb->context; // // We have a number of cases: // 1) The USB read timed out and we received no data. // 2) The USB read timed out and we received some data. // 3) The USB read was successful and fully filled our irp buffer. // 4) The irp was cancelled. // 5) Some other failure from the USB device object. // ntStatus = urb->status; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_nsInterruptUsbIoCompleteRead Status %d\n", ntStatus); // if we were not successful, we need to free the int buffer for future use right here // otherwise interrupt data handler will free int buffer after it handle it. if (( ntStatus != STATUS_SUCCESS )) { pDevice->ulBulkInError++; pDevice->intBuf.bInUse = FALSE; // if (ntStatus == USBD_STATUS_CRC) { // pDevice->ulIntInContCRCError++; // } // if (ntStatus == STATUS_NOT_CONNECTED ) // { pDevice->fKillEventPollingThread = TRUE; // } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"IntUSBIoCompleteControl STATUS = %d\n", ntStatus ); } else { pDevice->ulIntInBytesRead += (unsigned long) urb->actual_length; pDevice->ulIntInContCRCError = 0; pDevice->bEventAvailable = TRUE; INTnsProcessData(pDevice); } STAvUpdateUSBCounter(&pDevice->scStatistic.USB_InterruptStat, ntStatus); if (pDevice->fKillEventPollingThread != TRUE) { usb_fill_bulk_urb(pDevice->pInterruptURB, pDevice->usb, usb_rcvbulkpipe(pDevice->usb, 1), (void *) pDevice->intBuf.pDataBuf, MAX_INTERRUPT_SIZE, s_nsInterruptUsbIoCompleteRead, pDevice); ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit int URB failed %d\n", ntStatus); } } // // We return STATUS_MORE_PROCESSING_REQUIRED so that the completion // routine (IofCompleteRequest) will stop working on the irp. // return ; } /* * Description: * Allocates an usb BulkIn irp and calls USBD. * * Parameters: * In: * pDevice - Pointer to the adapter * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ int PIPEnsBulkInUsbRead(PSDevice pDevice, PRCB pRCB) { int ntStatus = 0; struct urb *pUrb; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartBulkInUsbRead\n"); if (pDevice->Flags & fMP_DISCONNECTED) return STATUS_FAILURE; pDevice->ulBulkInPosted++; pUrb = pRCB->pUrb; // // Now that we have created the urb, we will send a // request to the USB device object. // if (pRCB->skb == NULL) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pRCB->skb is null \n"); return ntStatus; } usb_fill_bulk_urb(pUrb, pDevice->usb, usb_rcvbulkpipe(pDevice->usb, 2), (void *) (pRCB->skb->data), MAX_TOTAL_SIZE_WITH_ALL_HEADERS, s_nsBulkInUsbIoCompleteRead, pRCB); ntStatus = usb_submit_urb(pUrb, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit Rx URB failed %d\n", ntStatus); return STATUS_FAILURE ; } pRCB->Ref = 1; pRCB->bBoolInUse= TRUE; return ntStatus; } /* * Description: * Complete function of usb BulkIn irp. * * Parameters: * In: * pDevice - Pointer to the adapter * * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ static void s_nsBulkInUsbIoCompleteRead( struct urb *urb ) { PRCB pRCB = (PRCB)urb->context; PSDevice pDevice = (PSDevice)pRCB->pDevice; unsigned long bytesRead; BOOL bIndicateReceive = FALSE; BOOL bReAllocSkb = FALSE; int status; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkInUsbIoCompleteRead\n"); status = urb->status; bytesRead = urb->actual_length; if (status) { pDevice->ulBulkInError++; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK In failed %d\n", status); pDevice->scStatistic.RxFcsErrCnt ++; //todo...xxxxxx // if (status == USBD_STATUS_CRC) { // pDevice->ulBulkInContCRCError++; // } // if (status == STATUS_DEVICE_NOT_CONNECTED ) // { // MP_SET_FLAG(pDevice, fMP_DISCONNECTED); // } } else { bIndicateReceive = TRUE; pDevice->ulBulkInContCRCError = 0; pDevice->ulBulkInBytesRead += bytesRead; pDevice->scStatistic.RxOkCnt ++; } STAvUpdateUSBCounter(&pDevice->scStatistic.USB_BulkInStat, status); if (bIndicateReceive) { spin_lock(&pDevice->lock); if (RXbBulkInProcessData(pDevice, pRCB, bytesRead) == TRUE) bReAllocSkb = TRUE; spin_unlock(&pDevice->lock); } pRCB->Ref--; if (pRCB->Ref == 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RxvFreeNormal %d \n",pDevice->NumRecvFreeList); spin_lock(&pDevice->lock); RXvFreeRCB(pRCB, bReAllocSkb); spin_unlock(&pDevice->lock); } return; } /* * Description: * Allocates an usb BulkOut irp and calls USBD. * * Parameters: * In: * pDevice - Pointer to the adapter * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ int PIPEnsSendBulkOut( PSDevice pDevice, PUSB_SEND_CONTEXT pContext ) { int status; struct urb *pUrb; pDevice->bPWBitOn = FALSE; /* if (pDevice->pPendingBulkOutContext != NULL) { pDevice->NumContextsQueued++; EnqueueContext(pDevice->FirstTxContextQueue, pDevice->LastTxContextQueue, pContext); status = STATUS_PENDING; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send pending!\n"); return status; } */ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_nsSendBulkOut\n"); if (MP_IS_READY(pDevice) && (pDevice->Flags & fMP_POST_WRITES)) { pUrb = pContext->pUrb; pDevice->ulBulkOutPosted++; // pDevice->pPendingBulkOutContext = pContext; usb_fill_bulk_urb( pUrb, pDevice->usb, usb_sndbulkpipe(pDevice->usb, 3), (void *) &(pContext->Data[0]), pContext->uBufLen, s_nsBulkOutIoCompleteWrite, pContext); status = usb_submit_urb(pUrb, GFP_ATOMIC); if (status != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit Tx URB failed %d\n", status); return STATUS_FAILURE; } return STATUS_PENDING; } else { pContext->bBoolInUse = FALSE; return STATUS_RESOURCES; } } /* * Description: s_nsBulkOutIoCompleteWrite * 1a) Indicate to the protocol the status of the write. * 1b) Return ownership of the packet to the protocol. * * 2) If any more packets are queue for sending, send another packet * to USBD. * If the attempt to send the packet to the driver fails, * return ownership of the packet to the protocol and * try another packet (until one succeeds). * * Parameters: * In: * pdoUsbDevObj - pointer to the USB device object which * completed the irp * pIrp - the irp which was completed by the * device object * pContext - the context given to IoSetCompletionRoutine * before calling IoCallDriver on the irp * The pContext is a pointer to the USB device object. * Out: * none * * Return Value: STATUS_MORE_PROCESSING_REQUIRED - allows the completion routine * (IofCompleteRequest) to stop working on the irp. * */ static void s_nsBulkOutIoCompleteWrite( struct urb *urb ) { PSDevice pDevice; int status; CONTEXT_TYPE ContextType; unsigned long ulBufLen; PUSB_SEND_CONTEXT pContext; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkOutIoCompleteWrite\n"); // // The context given to IoSetCompletionRoutine is an USB_CONTEXT struct // pContext = (PUSB_SEND_CONTEXT) urb->context; ASSERT( NULL != pContext ); pDevice = pContext->pDevice; ContextType = pContext->Type; ulBufLen = pContext->uBufLen; if (!netif_device_present(pDevice->dev)) return; // // Perform various IRP, URB, and buffer 'sanity checks' // status = urb->status; //we should have failed, succeeded, or cancelled, but NOT be pending STAvUpdateUSBCounter(&pDevice->scStatistic.USB_BulkOutStat, status); if(status == STATUS_SUCCESS) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Write %d bytes\n",(int)ulBufLen); pDevice->ulBulkOutBytesWrite += ulBufLen; pDevice->ulBulkOutContCRCError = 0; pDevice->nTxDataTimeCout = 0; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK Out failed %d\n", status); pDevice->ulBulkOutError++; } // pDevice->ulCheckForHangCount = 0; // pDevice->pPendingBulkOutContext = NULL; if ( CONTEXT_DATA_PACKET == ContextType ) { // Indicate to the protocol the status of the sent packet and return // ownership of the packet. if (pContext->pPacket != NULL) { dev_kfree_skb_irq(pContext->pPacket); pContext->pPacket = NULL; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"tx %d bytes\n",(int)ulBufLen); } pDevice->dev->trans_start = jiffies; if (status == STATUS_SUCCESS) { pDevice->packetsSent++; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send USB error! [%08xh]\n", status); pDevice->packetsSentDropped++; } } if (pDevice->bLinkPass == TRUE) { if (netif_queue_stopped(pDevice->dev)) netif_wake_queue(pDevice->dev); } pContext->bBoolInUse = FALSE; return; }
gpl-2.0
dexter93/kernel_htc_msm8660_old
arch/m68k/kernel/asm-offsets.c
6765
3844
/* * This program is used to generate definitions needed by * assembly language modules. * * We use the technique used in the OSF Mach kernel code: * generate asm statements containing #defines, * compile this file to assembler, and then extract the * #defines from the assembly-language output. */ #define ASM_OFFSETS_C #include <linux/stddef.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/kbuild.h> #include <asm/bootinfo.h> #include <asm/irq.h> #include <asm/amigahw.h> #include <linux/font.h> int main(void) { /* offsets into the task struct */ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); DEFINE(TASK_MM, offsetof(struct task_struct, mm)); DEFINE(TASK_STACK, offsetof(struct task_struct, stack)); /* offsets into the thread struct */ DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); DEFINE(THREAD_USP, offsetof(struct thread_struct, usp)); DEFINE(THREAD_SR, offsetof(struct thread_struct, sr)); DEFINE(THREAD_FS, offsetof(struct thread_struct, fs)); DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp)); DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0)); DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp)); DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl)); DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate)); /* offsets into the thread_info struct */ DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); /* offsets into the pt_regs */ DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0)); DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0)); DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1)); DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2)); DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3)); DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4)); DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5)); DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0)); DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1)); DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2)); DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc)); DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr)); /* bitfields are a bit difficult */ #ifdef CONFIG_COLDFIRE DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2); #else DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4); #endif /* offsets into the irq_cpustat_t struct */ DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending)); /* signal defines */ DEFINE(LSIGSEGV, SIGSEGV); DEFINE(LSEGV_MAPERR, SEGV_MAPERR); DEFINE(LSIGTRAP, SIGTRAP); DEFINE(LTRAP_TRACE, TRAP_TRACE); #ifdef CONFIG_MMU /* offsets into the bi_record struct */ DEFINE(BIR_TAG, offsetof(struct bi_record, tag)); DEFINE(BIR_SIZE, offsetof(struct bi_record, size)); DEFINE(BIR_DATA, offsetof(struct bi_record, data)); /* offsets into font_desc (drivers/video/console/font.h) */ DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx)); DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name)); DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width)); DEFINE(FONT_DESC_HEIGHT, offsetof(struct font_desc, height)); DEFINE(FONT_DESC_DATA, offsetof(struct font_desc, data)); DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref)); /* offsets into the custom struct */ DEFINE(CUSTOMBASE, &amiga_custom); DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar)); DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr)); DEFINE(C_INTENA, offsetof(struct CUSTOM, intena)); DEFINE(C_INTREQ, offsetof(struct CUSTOM, intreq)); DEFINE(C_SERDATR, offsetof(struct CUSTOM, serdatr)); DEFINE(C_SERDAT, offsetof(struct CUSTOM, serdat)); DEFINE(C_SERPER, offsetof(struct CUSTOM, serper)); DEFINE(CIAABASE, &ciaa); DEFINE(CIABBASE, &ciab); DEFINE(C_PRA, offsetof(struct CIA, pra)); DEFINE(ZTWOBASE, zTwoBase); #endif return 0; }
gpl-2.0
stargo/android_kernel_amazon_ford
drivers/xen/xen-pciback/xenbus.c
7277
18668
/* * PCI Backend Xenbus Setup - handles setup with frontend and xend * * Author: Ryan Wilson <hap9@epoch.ncsc.mil> */ #include <linux/module.h> #include <linux/init.h> #include <linux/list.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <xen/xenbus.h> #include <xen/events.h> #include <asm/xen/pci.h> #include "pciback.h" #define INVALID_EVTCHN_IRQ (-1) struct workqueue_struct *xen_pcibk_wq; static bool __read_mostly passthrough; module_param(passthrough, bool, S_IRUGO); MODULE_PARM_DESC(passthrough, "Option to specify how to export PCI topology to guest:\n"\ " 0 - (default) Hide the true PCI topology and makes the frontend\n"\ " there is a single PCI bus with only the exported devices on it.\n"\ " For example, a device at 03:05.0 will be re-assigned to 00:00.0\n"\ " while second device at 02:1a.1 will be re-assigned to 00:01.1.\n"\ " 1 - Passthrough provides a real view of the PCI topology to the\n"\ " frontend (for example, a device at 06:01.b will still appear at\n"\ " 06:01.b to the frontend). This is similar to how Xen 2.0.x\n"\ " exposed PCI devices to its driver domains. This may be required\n"\ " for drivers which depend on finding their hardward in certain\n"\ " bus/slot locations."); static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev) { struct xen_pcibk_device *pdev; pdev = kzalloc(sizeof(struct xen_pcibk_device), GFP_KERNEL); if (pdev == NULL) goto out; dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev); pdev->xdev = xdev; dev_set_drvdata(&xdev->dev, pdev); mutex_init(&pdev->dev_lock); pdev->sh_info = NULL; pdev->evtchn_irq = INVALID_EVTCHN_IRQ; pdev->be_watching = 0; INIT_WORK(&pdev->op_work, xen_pcibk_do_op); if (xen_pcibk_init_devices(pdev)) { kfree(pdev); pdev = NULL; } out: return pdev; } static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev) { mutex_lock(&pdev->dev_lock); /* Ensure the guest can't trigger our handler before removing devices */ if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ) { unbind_from_irqhandler(pdev->evtchn_irq, pdev); pdev->evtchn_irq = INVALID_EVTCHN_IRQ; } /* If the driver domain started an op, make sure we complete it * before releasing the shared memory */ /* Note, the workqueue does not use spinlocks at all.*/ flush_workqueue(xen_pcibk_wq); if (pdev->sh_info != NULL) { xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_info); pdev->sh_info = NULL; } mutex_unlock(&pdev->dev_lock); } static void free_pdev(struct xen_pcibk_device *pdev) { if (pdev->be_watching) { unregister_xenbus_watch(&pdev->be_watch); pdev->be_watching = 0; } xen_pcibk_disconnect(pdev); xen_pcibk_release_devices(pdev); dev_set_drvdata(&pdev->xdev->dev, NULL); pdev->xdev = NULL; kfree(pdev); } static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref, int remote_evtchn) { int err = 0; void *vaddr; dev_dbg(&pdev->xdev->dev, "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n", gnt_ref, remote_evtchn); err = xenbus_map_ring_valloc(pdev->xdev, gnt_ref, &vaddr); if (err < 0) { xenbus_dev_fatal(pdev->xdev, err, "Error mapping other domain page in ours."); goto out; } pdev->sh_info = vaddr; err = bind_interdomain_evtchn_to_irqhandler( pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event, 0, DRV_NAME, pdev); if (err < 0) { xenbus_dev_fatal(pdev->xdev, err, "Error binding event channel to IRQ"); goto out; } pdev->evtchn_irq = err; err = 0; dev_dbg(&pdev->xdev->dev, "Attached!\n"); out: return err; } static int xen_pcibk_attach(struct xen_pcibk_device *pdev) { int err = 0; int gnt_ref, remote_evtchn; char *magic = NULL; mutex_lock(&pdev->dev_lock); /* Make sure we only do this setup once */ if (xenbus_read_driver_state(pdev->xdev->nodename) != XenbusStateInitialised) goto out; /* Wait for frontend to state that it has published the configuration */ if (xenbus_read_driver_state(pdev->xdev->otherend) != XenbusStateInitialised) goto out; dev_dbg(&pdev->xdev->dev, "Reading frontend config\n"); err = xenbus_gather(XBT_NIL, pdev->xdev->otherend, "pci-op-ref", "%u", &gnt_ref, "event-channel", "%u", &remote_evtchn, "magic", NULL, &magic, NULL); if (err) { /* If configuration didn't get read correctly, wait longer */ xenbus_dev_fatal(pdev->xdev, err, "Error reading configuration from frontend"); goto out; } if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) { xenbus_dev_fatal(pdev->xdev, -EFAULT, "version mismatch (%s/%s) with pcifront - " "halting " DRV_NAME, magic, XEN_PCI_MAGIC); goto out; } err = xen_pcibk_do_attach(pdev, gnt_ref, remote_evtchn); if (err) goto out; dev_dbg(&pdev->xdev->dev, "Connecting...\n"); err = xenbus_switch_state(pdev->xdev, XenbusStateConnected); if (err) xenbus_dev_fatal(pdev->xdev, err, "Error switching to connected state!"); dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err); out: mutex_unlock(&pdev->dev_lock); kfree(magic); return err; } static int xen_pcibk_publish_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain, unsigned int bus, unsigned int devfn, unsigned int devid) { int err; int len; char str[64]; len = snprintf(str, sizeof(str), "vdev-%d", devid); if (unlikely(len >= (sizeof(str) - 1))) { err = -ENOMEM; goto out; } /* Note: The PV protocol uses %02x, don't change it */ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str, "%04x:%02x:%02x.%02x", domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); out: return err; } static int xen_pcibk_export_device(struct xen_pcibk_device *pdev, int domain, int bus, int slot, int func, int devid) { struct pci_dev *dev; int err = 0; dev_dbg(&pdev->xdev->dev, "exporting dom %x bus %x slot %x func %x\n", domain, bus, slot, func); dev = pcistub_get_pci_dev_by_slot(pdev, domain, bus, slot, func); if (!dev) { err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Couldn't locate PCI device " "(%04x:%02x:%02x.%d)! " "perhaps already in-use?", domain, bus, slot, func); goto out; } err = xen_pcibk_add_pci_dev(pdev, dev, devid, xen_pcibk_publish_pci_dev); if (err) goto out; dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id); if (xen_register_device_domain_owner(dev, pdev->xdev->otherend_id) != 0) { dev_err(&dev->dev, "Stealing ownership from dom%d.\n", xen_find_device_domain_owner(dev)); xen_unregister_device_domain_owner(dev); xen_register_device_domain_owner(dev, pdev->xdev->otherend_id); } /* TODO: It'd be nice to export a bridge and have all of its children * get exported with it. This may be best done in xend (which will * have to calculate resource usage anyway) but we probably want to * put something in here to ensure that if a bridge gets given to a * driver domain, that all devices under that bridge are not given * to other driver domains (as he who controls the bridge can disable * it and stop the other devices from working). */ out: return err; } static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev, int domain, int bus, int slot, int func) { int err = 0; struct pci_dev *dev; dev_dbg(&pdev->xdev->dev, "removing dom %x bus %x slot %x func %x\n", domain, bus, slot, func); dev = xen_pcibk_get_pci_dev(pdev, domain, bus, PCI_DEVFN(slot, func)); if (!dev) { err = -EINVAL; dev_dbg(&pdev->xdev->dev, "Couldn't locate PCI device " "(%04x:%02x:%02x.%d)! not owned by this domain\n", domain, bus, slot, func); goto out; } dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id); xen_unregister_device_domain_owner(dev); xen_pcibk_release_pci_dev(pdev, dev); out: return err; } static int xen_pcibk_publish_pci_root(struct xen_pcibk_device *pdev, unsigned int domain, unsigned int bus) { unsigned int d, b; int i, root_num, len, err; char str[64]; dev_dbg(&pdev->xdev->dev, "Publishing pci roots\n"); err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "root_num", "%d", &root_num); if (err == 0 || err == -ENOENT) root_num = 0; else if (err < 0) goto out; /* Verify that we haven't already published this pci root */ for (i = 0; i < root_num; i++) { len = snprintf(str, sizeof(str), "root-%d", i); if (unlikely(len >= (sizeof(str) - 1))) { err = -ENOMEM; goto out; } err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, str, "%x:%x", &d, &b); if (err < 0) goto out; if (err != 2) { err = -EINVAL; goto out; } if (d == domain && b == bus) { err = 0; goto out; } } len = snprintf(str, sizeof(str), "root-%d", root_num); if (unlikely(len >= (sizeof(str) - 1))) { err = -ENOMEM; goto out; } dev_dbg(&pdev->xdev->dev, "writing root %d at %04x:%02x\n", root_num, domain, bus); err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str, "%04x:%02x", domain, bus); if (err) goto out; err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, "root_num", "%d", (root_num + 1)); out: return err; } static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev) { int err = 0; int num_devs; int domain, bus, slot, func; int substate; int i, len; char state_str[64]; char dev_str[64]; dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n"); mutex_lock(&pdev->dev_lock); /* Make sure we only reconfigure once */ if (xenbus_read_driver_state(pdev->xdev->nodename) != XenbusStateReconfiguring) goto out; err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d", &num_devs); if (err != 1) { if (err >= 0) err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Error reading number of devices"); goto out; } for (i = 0; i < num_devs; i++) { len = snprintf(state_str, sizeof(state_str), "state-%d", i); if (unlikely(len >= (sizeof(state_str) - 1))) { err = -ENOMEM; xenbus_dev_fatal(pdev->xdev, err, "String overflow while reading " "configuration"); goto out; } err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, state_str, "%d", &substate); if (err != 1) substate = XenbusStateUnknown; switch (substate) { case XenbusStateInitialising: dev_dbg(&pdev->xdev->dev, "Attaching dev-%d ...\n", i); len = snprintf(dev_str, sizeof(dev_str), "dev-%d", i); if (unlikely(len >= (sizeof(dev_str) - 1))) { err = -ENOMEM; xenbus_dev_fatal(pdev->xdev, err, "String overflow while " "reading configuration"); goto out; } err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, dev_str, "%x:%x:%x.%x", &domain, &bus, &slot, &func); if (err < 0) { xenbus_dev_fatal(pdev->xdev, err, "Error reading device " "configuration"); goto out; } if (err != 4) { err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Error parsing pci device " "configuration"); goto out; } err = xen_pcibk_export_device(pdev, domain, bus, slot, func, i); if (err) goto out; /* Publish pci roots. */ err = xen_pcibk_publish_pci_roots(pdev, xen_pcibk_publish_pci_root); if (err) { xenbus_dev_fatal(pdev->xdev, err, "Error while publish PCI root" "buses for frontend"); goto out; } err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, state_str, "%d", XenbusStateInitialised); if (err) { xenbus_dev_fatal(pdev->xdev, err, "Error switching substate of " "dev-%d\n", i); goto out; } break; case XenbusStateClosing: dev_dbg(&pdev->xdev->dev, "Detaching dev-%d ...\n", i); len = snprintf(dev_str, sizeof(dev_str), "vdev-%d", i); if (unlikely(len >= (sizeof(dev_str) - 1))) { err = -ENOMEM; xenbus_dev_fatal(pdev->xdev, err, "String overflow while " "reading configuration"); goto out; } err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, dev_str, "%x:%x:%x.%x", &domain, &bus, &slot, &func); if (err < 0) { xenbus_dev_fatal(pdev->xdev, err, "Error reading device " "configuration"); goto out; } if (err != 4) { err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Error parsing pci device " "configuration"); goto out; } err = xen_pcibk_remove_device(pdev, domain, bus, slot, func); if (err) goto out; /* TODO: If at some point we implement support for pci * root hot-remove on pcifront side, we'll need to * remove unnecessary xenstore nodes of pci roots here. */ break; default: break; } } err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured); if (err) { xenbus_dev_fatal(pdev->xdev, err, "Error switching to reconfigured state!"); goto out; } out: mutex_unlock(&pdev->dev_lock); return 0; } static void xen_pcibk_frontend_changed(struct xenbus_device *xdev, enum xenbus_state fe_state) { struct xen_pcibk_device *pdev = dev_get_drvdata(&xdev->dev); dev_dbg(&xdev->dev, "fe state changed %d\n", fe_state); switch (fe_state) { case XenbusStateInitialised: xen_pcibk_attach(pdev); break; case XenbusStateReconfiguring: xen_pcibk_reconfigure(pdev); break; case XenbusStateConnected: /* pcifront switched its state from reconfiguring to connected. * Then switch to connected state. */ xenbus_switch_state(xdev, XenbusStateConnected); break; case XenbusStateClosing: xen_pcibk_disconnect(pdev); xenbus_switch_state(xdev, XenbusStateClosing); break; case XenbusStateClosed: xen_pcibk_disconnect(pdev); xenbus_switch_state(xdev, XenbusStateClosed); if (xenbus_dev_is_online(xdev)) break; /* fall through if not online */ case XenbusStateUnknown: dev_dbg(&xdev->dev, "frontend is gone! unregister device\n"); device_unregister(&xdev->dev); break; default: break; } } static int xen_pcibk_setup_backend(struct xen_pcibk_device *pdev) { /* Get configuration from xend (if available now) */ int domain, bus, slot, func; int err = 0; int i, num_devs; char dev_str[64]; char state_str[64]; mutex_lock(&pdev->dev_lock); /* It's possible we could get the call to setup twice, so make sure * we're not already connected. */ if (xenbus_read_driver_state(pdev->xdev->nodename) != XenbusStateInitWait) goto out; dev_dbg(&pdev->xdev->dev, "getting be setup\n"); err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d", &num_devs); if (err != 1) { if (err >= 0) err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Error reading number of devices"); goto out; } for (i = 0; i < num_devs; i++) { int l = snprintf(dev_str, sizeof(dev_str), "dev-%d", i); if (unlikely(l >= (sizeof(dev_str) - 1))) { err = -ENOMEM; xenbus_dev_fatal(pdev->xdev, err, "String overflow while reading " "configuration"); goto out; } err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, dev_str, "%x:%x:%x.%x", &domain, &bus, &slot, &func); if (err < 0) { xenbus_dev_fatal(pdev->xdev, err, "Error reading device configuration"); goto out; } if (err != 4) { err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Error parsing pci device " "configuration"); goto out; } err = xen_pcibk_export_device(pdev, domain, bus, slot, func, i); if (err) goto out; /* Switch substate of this device. */ l = snprintf(state_str, sizeof(state_str), "state-%d", i); if (unlikely(l >= (sizeof(state_str) - 1))) { err = -ENOMEM; xenbus_dev_fatal(pdev->xdev, err, "String overflow while reading " "configuration"); goto out; } err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, state_str, "%d", XenbusStateInitialised); if (err) { xenbus_dev_fatal(pdev->xdev, err, "Error switching " "substate of dev-%d\n", i); goto out; } } err = xen_pcibk_publish_pci_roots(pdev, xen_pcibk_publish_pci_root); if (err) { xenbus_dev_fatal(pdev->xdev, err, "Error while publish PCI root buses " "for frontend"); goto out; } err = xenbus_switch_state(pdev->xdev, XenbusStateInitialised); if (err) xenbus_dev_fatal(pdev->xdev, err, "Error switching to initialised state!"); out: mutex_unlock(&pdev->dev_lock); if (!err) /* see if pcifront is already configured (if not, we'll wait) */ xen_pcibk_attach(pdev); return err; } static void xen_pcibk_be_watch(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct xen_pcibk_device *pdev = container_of(watch, struct xen_pcibk_device, be_watch); switch (xenbus_read_driver_state(pdev->xdev->nodename)) { case XenbusStateInitWait: xen_pcibk_setup_backend(pdev); break; default: break; } } static int xen_pcibk_xenbus_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err = 0; struct xen_pcibk_device *pdev = alloc_pdev(dev); if (pdev == NULL) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "Error allocating xen_pcibk_device struct"); goto out; } /* wait for xend to configure us */ err = xenbus_switch_state(dev, XenbusStateInitWait); if (err) goto out; /* watch the backend node for backend configuration information */ err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch, xen_pcibk_be_watch); if (err) goto out; pdev->be_watching = 1; /* We need to force a call to our callback here in case * xend already configured us! */ xen_pcibk_be_watch(&pdev->be_watch, NULL, 0); out: return err; } static int xen_pcibk_xenbus_remove(struct xenbus_device *dev) { struct xen_pcibk_device *pdev = dev_get_drvdata(&dev->dev); if (pdev != NULL) free_pdev(pdev); return 0; } static const struct xenbus_device_id xen_pcibk_ids[] = { {"pci"}, {""}, }; static DEFINE_XENBUS_DRIVER(xen_pcibk, DRV_NAME, .probe = xen_pcibk_xenbus_probe, .remove = xen_pcibk_xenbus_remove, .otherend_changed = xen_pcibk_frontend_changed, ); const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend; int __init xen_pcibk_xenbus_register(void) { xen_pcibk_wq = create_workqueue("xen_pciback_workqueue"); if (!xen_pcibk_wq) { printk(KERN_ERR "%s: create" "xen_pciback_workqueue failed\n", __func__); return -EFAULT; } xen_pcibk_backend = &xen_pcibk_vpci_backend; if (passthrough) xen_pcibk_backend = &xen_pcibk_passthrough_backend; pr_info(DRV_NAME ": backend is %s\n", xen_pcibk_backend->name); return xenbus_register_backend(&xen_pcibk_driver); } void __exit xen_pcibk_xenbus_unregister(void) { destroy_workqueue(xen_pcibk_wq); xenbus_unregister_driver(&xen_pcibk_driver); }
gpl-2.0
SkrilaxCZ/android_kernel_moto_asanti_c
arch/xtensa/kernel/pci-dma.c
8045
2359
/* * arch/xtensa/kernel/pci-dma.c * * DMA coherent memory allocation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Copyright (C) 2002 - 2005 Tensilica Inc. * * Based on version for i386. * * Chris Zankel <chris@zankel.net> * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> */ #include <linux/types.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/gfp.h> #include <asm/io.h> #include <asm/cacheflush.h> /* * Note: We assume that the full memory space is always mapped to 'kseg' * Otherwise we have to use page attributes (not implemented). */ void * dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) { unsigned long ret; unsigned long uncached = 0; /* ignore region speicifiers */ flag &= ~(__GFP_DMA | __GFP_HIGHMEM); if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) flag |= GFP_DMA; ret = (unsigned long)__get_free_pages(flag, get_order(size)); if (ret == 0) return NULL; /* We currently don't support coherent memory outside KSEG */ if (ret < XCHAL_KSEG_CACHED_VADDR || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE) BUG(); if (ret != 0) { memset((void*) ret, 0, size); uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR; *handle = virt_to_bus((void*)ret); __flush_invalidate_dcache_range(ret, size); } return (void*)uncached; } void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR; if (addr < 0 || addr >= XCHAL_KSEG_SIZE) BUG(); free_pages(addr, get_order(size)); } void consistent_sync(void *vaddr, size_t size, int direction) { switch (direction) { case PCI_DMA_NONE: BUG(); case PCI_DMA_FROMDEVICE: /* invalidate only */ __invalidate_dcache_range((unsigned long)vaddr, (unsigned long)size); break; case PCI_DMA_TODEVICE: /* writeback only */ case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ __flush_invalidate_dcache_range((unsigned long)vaddr, (unsigned long)size); break; } }
gpl-2.0
varunchitre15/android_kernel_xperiaL
drivers/net/hamradio/baycom_par.c
9325
16824
/*****************************************************************************/ /* * baycom_par.c -- baycom par96 and picpar radio modem driver. * * Copyright (C) 1996-2000 Thomas Sailer (sailer@ife.ee.ethz.ch) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Please note that the GPL allows you to use the driver, NOT the radio. * In order to use the radio, you need a license from the communications * authority of your country. * * * Supported modems * * par96: This is a modem for 9600 baud FSK compatible to the G3RUH standard. * The modem does all the filtering and regenerates the receiver clock. * Data is transferred from and to the PC via a shift register. * The shift register is filled with 16 bits and an interrupt is * signalled. The PC then empties the shift register in a burst. This * modem connects to the parallel port, hence the name. The modem * leaves the implementation of the HDLC protocol and the scrambler * polynomial to the PC. This modem is no longer available (at least * from Baycom) and has been replaced by the PICPAR modem (see below). * You may however still build one from the schematics published in * cq-DL :-). * * picpar: This is a redesign of the par96 modem by Henning Rech, DF9IC. The * modem is protocol compatible to par96, but uses only three low * power ICs and can therefore be fed from the parallel port and * does not require an additional power supply. It features * built in DCD circuitry. The driver should therefore be configured * for hardware DCD. * * * Command line options (insmod command line) * * mode driver mode string. Valid choices are par96 and picpar. * iobase base address of the port; common values are 0x378, 0x278, 0x3bc * * * History: * 0.1 26.06.1996 Adapted from baycom.c and made network driver interface * 18.10.1996 Changed to new user space access routines (copy_{to,from}_user) * 0.3 26.04.1997 init code/data tagged * 0.4 08.07.1997 alternative ser12 decoding algorithm (uses delta CTS ints) * 0.5 11.11.1997 split into separate files for ser12/par96 * 0.6 03.08.1999 adapt to Linus' new __setup/__initcall * removed some pre-2.2 kernel compatibility cruft * 0.7 10.08.1999 Check if parport can do SPP and is safe to access during interrupt contexts * 0.8 12.02.2000 adapted to softnet driver interface * removed direct parport access, uses parport driver methods * 0.9 03.07.2000 fix interface name handling */ /*****************************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/hdlcdrv.h> #include <linux/baycom.h> #include <linux/parport.h> #include <linux/bitops.h> #include <linux/jiffies.h> #include <asm/uaccess.h> /* --------------------------------------------------------------------- */ #define BAYCOM_DEBUG /* * modem options; bit mask */ #define BAYCOM_OPTIONS_SOFTDCD 1 /* --------------------------------------------------------------------- */ static const char bc_drvname[] = "baycom_par"; static const char bc_drvinfo[] = KERN_INFO "baycom_par: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" "baycom_par: version 0.9\n"; /* --------------------------------------------------------------------- */ #define NR_PORTS 4 static struct net_device *baycom_device[NR_PORTS]; /* --------------------------------------------------------------------- */ #define PAR96_BURSTBITS 16 #define PAR96_BURST 4 #define PAR96_PTT 2 #define PAR96_TXBIT 1 #define PAR96_ACK 0x40 #define PAR96_RXBIT 0x20 #define PAR96_DCD 0x10 #define PAR97_POWER 0xf8 /* ---------------------------------------------------------------------- */ /* * Information that need to be kept for each board. */ struct baycom_state { struct hdlcdrv_state hdrv; struct pardevice *pdev; unsigned int options; struct modem_state { short arb_divider; unsigned char flags; unsigned int shreg; struct modem_state_par96 { int dcd_count; unsigned int dcd_shreg; unsigned long descram; unsigned long scram; } par96; } modem; #ifdef BAYCOM_DEBUG struct debug_vals { unsigned long last_jiffies; unsigned cur_intcnt; unsigned last_intcnt; int cur_pllcorr; int last_pllcorr; } debug_vals; #endif /* BAYCOM_DEBUG */ }; /* --------------------------------------------------------------------- */ static void __inline__ baycom_int_freq(struct baycom_state *bc) { #ifdef BAYCOM_DEBUG unsigned long cur_jiffies = jiffies; /* * measure the interrupt frequency */ bc->debug_vals.cur_intcnt++; if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) { bc->debug_vals.last_jiffies = cur_jiffies; bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt; bc->debug_vals.cur_intcnt = 0; bc->debug_vals.last_pllcorr = bc->debug_vals.cur_pllcorr; bc->debug_vals.cur_pllcorr = 0; } #endif /* BAYCOM_DEBUG */ } /* --------------------------------------------------------------------- */ /* * ===================== PAR96 specific routines ========================= */ #define PAR96_DESCRAM_TAP1 0x20000 #define PAR96_DESCRAM_TAP2 0x01000 #define PAR96_DESCRAM_TAP3 0x00001 #define PAR96_DESCRAM_TAPSH1 17 #define PAR96_DESCRAM_TAPSH2 12 #define PAR96_DESCRAM_TAPSH3 0 #define PAR96_SCRAM_TAP1 0x20000 /* X^17 */ #define PAR96_SCRAM_TAPN 0x00021 /* X^0+X^5 */ /* --------------------------------------------------------------------- */ static __inline__ void par96_tx(struct net_device *dev, struct baycom_state *bc) { int i; unsigned int data = hdlcdrv_getbits(&bc->hdrv); struct parport *pp = bc->pdev->port; for(i = 0; i < PAR96_BURSTBITS; i++, data >>= 1) { unsigned char val = PAR97_POWER; bc->modem.par96.scram = ((bc->modem.par96.scram << 1) | (bc->modem.par96.scram & 1)); if (!(data & 1)) bc->modem.par96.scram ^= 1; if (bc->modem.par96.scram & (PAR96_SCRAM_TAP1 << 1)) bc->modem.par96.scram ^= (PAR96_SCRAM_TAPN << 1); if (bc->modem.par96.scram & (PAR96_SCRAM_TAP1 << 2)) val |= PAR96_TXBIT; pp->ops->write_data(pp, val); pp->ops->write_data(pp, val | PAR96_BURST); } } /* --------------------------------------------------------------------- */ static __inline__ void par96_rx(struct net_device *dev, struct baycom_state *bc) { int i; unsigned int data, mask, mask2, descx; struct parport *pp = bc->pdev->port; /* * do receiver; differential decode and descramble on the fly */ for(data = i = 0; i < PAR96_BURSTBITS; i++) { bc->modem.par96.descram = (bc->modem.par96.descram << 1); if (pp->ops->read_status(pp) & PAR96_RXBIT) bc->modem.par96.descram |= 1; descx = bc->modem.par96.descram ^ (bc->modem.par96.descram >> 1); /* now the diff decoded data is inverted in descram */ pp->ops->write_data(pp, PAR97_POWER | PAR96_PTT); descx ^= ((descx >> PAR96_DESCRAM_TAPSH1) ^ (descx >> PAR96_DESCRAM_TAPSH2)); data >>= 1; if (!(descx & 1)) data |= 0x8000; pp->ops->write_data(pp, PAR97_POWER | PAR96_PTT | PAR96_BURST); } hdlcdrv_putbits(&bc->hdrv, data); /* * do DCD algorithm */ if (bc->options & BAYCOM_OPTIONS_SOFTDCD) { bc->modem.par96.dcd_shreg = (bc->modem.par96.dcd_shreg >> 16) | (data << 16); /* search for flags and set the dcd counter appropriately */ for(mask = 0x1fe00, mask2 = 0xfc00, i = 0; i < PAR96_BURSTBITS; i++, mask <<= 1, mask2 <<= 1) if ((bc->modem.par96.dcd_shreg & mask) == mask2) bc->modem.par96.dcd_count = HDLCDRV_MAXFLEN+4; /* check for abort/noise sequences */ for(mask = 0x1fe00, mask2 = 0x1fe00, i = 0; i < PAR96_BURSTBITS; i++, mask <<= 1, mask2 <<= 1) if (((bc->modem.par96.dcd_shreg & mask) == mask2) && (bc->modem.par96.dcd_count >= 0)) bc->modem.par96.dcd_count -= HDLCDRV_MAXFLEN-10; /* decrement and set the dcd variable */ if (bc->modem.par96.dcd_count >= 0) bc->modem.par96.dcd_count -= 2; hdlcdrv_setdcd(&bc->hdrv, bc->modem.par96.dcd_count > 0); } else { hdlcdrv_setdcd(&bc->hdrv, !!(pp->ops->read_status(pp) & PAR96_DCD)); } } /* --------------------------------------------------------------------- */ static void par96_interrupt(void *dev_id) { struct net_device *dev = dev_id; struct baycom_state *bc = netdev_priv(dev); baycom_int_freq(bc); /* * check if transmitter active */ if (hdlcdrv_ptt(&bc->hdrv)) par96_tx(dev, bc); else { par96_rx(dev, bc); if (--bc->modem.arb_divider <= 0) { bc->modem.arb_divider = 6; local_irq_enable(); hdlcdrv_arbitrate(dev, &bc->hdrv); } } local_irq_enable(); hdlcdrv_transmitter(dev, &bc->hdrv); hdlcdrv_receiver(dev, &bc->hdrv); local_irq_disable(); } /* --------------------------------------------------------------------- */ static void par96_wakeup(void *handle) { struct net_device *dev = (struct net_device *)handle; struct baycom_state *bc = netdev_priv(dev); printk(KERN_DEBUG "baycom_par: %s: why am I being woken up?\n", dev->name); if (!parport_claim(bc->pdev)) printk(KERN_DEBUG "baycom_par: %s: I'm broken.\n", dev->name); } /* --------------------------------------------------------------------- */ static int par96_open(struct net_device *dev) { struct baycom_state *bc = netdev_priv(dev); struct parport *pp; if (!dev || !bc) return -ENXIO; pp = parport_find_base(dev->base_addr); if (!pp) { printk(KERN_ERR "baycom_par: parport at 0x%lx unknown\n", dev->base_addr); return -ENXIO; } if (pp->irq < 0) { printk(KERN_ERR "baycom_par: parport at 0x%lx has no irq\n", pp->base); parport_put_port(pp); return -ENXIO; } if ((~pp->modes) & (PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT)) { printk(KERN_ERR "baycom_par: parport at 0x%lx cannot be used\n", pp->base); parport_put_port(pp); return -ENXIO; } memset(&bc->modem, 0, sizeof(bc->modem)); bc->hdrv.par.bitrate = 9600; bc->pdev = parport_register_device(pp, dev->name, NULL, par96_wakeup, par96_interrupt, PARPORT_DEV_EXCL, dev); parport_put_port(pp); if (!bc->pdev) { printk(KERN_ERR "baycom_par: cannot register parport at 0x%lx\n", dev->base_addr); return -ENXIO; } if (parport_claim(bc->pdev)) { printk(KERN_ERR "baycom_par: parport at 0x%lx busy\n", pp->base); parport_unregister_device(bc->pdev); return -EBUSY; } pp = bc->pdev->port; dev->irq = pp->irq; pp->ops->data_forward(pp); bc->hdrv.par.bitrate = 9600; pp->ops->write_data(pp, PAR96_PTT | PAR97_POWER); /* switch off PTT */ pp->ops->enable_irq(pp); printk(KERN_INFO "%s: par96 at iobase 0x%lx irq %u options 0x%x\n", bc_drvname, dev->base_addr, dev->irq, bc->options); return 0; } /* --------------------------------------------------------------------- */ static int par96_close(struct net_device *dev) { struct baycom_state *bc = netdev_priv(dev); struct parport *pp; if (!dev || !bc) return -EINVAL; pp = bc->pdev->port; /* disable interrupt */ pp->ops->disable_irq(pp); /* switch off PTT */ pp->ops->write_data(pp, PAR96_PTT | PAR97_POWER); parport_release(bc->pdev); parport_unregister_device(bc->pdev); printk(KERN_INFO "%s: close par96 at iobase 0x%lx irq %u\n", bc_drvname, dev->base_addr, dev->irq); return 0; } /* --------------------------------------------------------------------- */ /* * ===================== hdlcdrv driver interface ========================= */ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, struct hdlcdrv_ioctl *hi, int cmd); /* --------------------------------------------------------------------- */ static struct hdlcdrv_ops par96_ops = { .drvname = bc_drvname, .drvinfo = bc_drvinfo, .open = par96_open, .close = par96_close, .ioctl = baycom_ioctl }; /* --------------------------------------------------------------------- */ static int baycom_setmode(struct baycom_state *bc, const char *modestr) { if (!strncmp(modestr, "picpar", 6)) bc->options = 0; else if (!strncmp(modestr, "par96", 5)) bc->options = BAYCOM_OPTIONS_SOFTDCD; else bc->options = !!strchr(modestr, '*'); return 0; } /* --------------------------------------------------------------------- */ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, struct hdlcdrv_ioctl *hi, int cmd) { struct baycom_state *bc; struct baycom_ioctl bi; if (!dev) return -EINVAL; bc = netdev_priv(dev); BUG_ON(bc->hdrv.magic != HDLCDRV_MAGIC); if (cmd != SIOCDEVPRIVATE) return -ENOIOCTLCMD; switch (hi->cmd) { default: break; case HDLCDRVCTL_GETMODE: strcpy(hi->data.modename, bc->options ? "par96" : "picpar"); if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl))) return -EFAULT; return 0; case HDLCDRVCTL_SETMODE: if (netif_running(dev) || !capable(CAP_NET_ADMIN)) return -EACCES; hi->data.modename[sizeof(hi->data.modename)-1] = '\0'; return baycom_setmode(bc, hi->data.modename); case HDLCDRVCTL_MODELIST: strcpy(hi->data.modename, "par96,picpar"); if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl))) return -EFAULT; return 0; case HDLCDRVCTL_MODEMPARMASK: return HDLCDRV_PARMASK_IOBASE; } if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi))) return -EFAULT; switch (bi.cmd) { default: return -ENOIOCTLCMD; #ifdef BAYCOM_DEBUG case BAYCOMCTL_GETDEBUG: bi.data.dbg.debug1 = bc->hdrv.ptt_keyed; bi.data.dbg.debug2 = bc->debug_vals.last_intcnt; bi.data.dbg.debug3 = bc->debug_vals.last_pllcorr; break; #endif /* BAYCOM_DEBUG */ } if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi))) return -EFAULT; return 0; } /* --------------------------------------------------------------------- */ /* * command line settable parameters */ static char *mode[NR_PORTS] = { "picpar", }; static int iobase[NR_PORTS] = { 0x378, }; module_param_array(mode, charp, NULL, 0); MODULE_PARM_DESC(mode, "baycom operating mode; eg. par96 or picpar"); module_param_array(iobase, int, NULL, 0); MODULE_PARM_DESC(iobase, "baycom io base address"); MODULE_AUTHOR("Thomas M. Sailer, sailer@ife.ee.ethz.ch, hb9jnx@hb9w.che.eu"); MODULE_DESCRIPTION("Baycom par96 and picpar amateur radio modem driver"); MODULE_LICENSE("GPL"); /* --------------------------------------------------------------------- */ static int __init init_baycompar(void) { int i, found = 0; char set_hw = 1; printk(bc_drvinfo); /* * register net devices */ for (i = 0; i < NR_PORTS; i++) { struct net_device *dev; struct baycom_state *bc; char ifname[IFNAMSIZ]; sprintf(ifname, "bcp%d", i); if (!mode[i]) set_hw = 0; if (!set_hw) iobase[i] = 0; dev = hdlcdrv_register(&par96_ops, sizeof(struct baycom_state), ifname, iobase[i], 0, 0); if (IS_ERR(dev)) break; bc = netdev_priv(dev); if (set_hw && baycom_setmode(bc, mode[i])) set_hw = 0; found++; baycom_device[i] = dev; } if (!found) return -ENXIO; return 0; } static void __exit cleanup_baycompar(void) { int i; for(i = 0; i < NR_PORTS; i++) { struct net_device *dev = baycom_device[i]; if (dev) hdlcdrv_unregister(dev); } } module_init(init_baycompar); module_exit(cleanup_baycompar); /* --------------------------------------------------------------------- */ #ifndef MODULE /* * format: baycom_par=io,mode * mode: par96,picpar */ static int __init baycom_par_setup(char *str) { static unsigned nr_dev; int ints[2]; if (nr_dev >= NR_PORTS) return 0; str = get_options(str, 2, ints); if (ints[0] < 1) return 0; mode[nr_dev] = str; iobase[nr_dev] = ints[1]; nr_dev++; return 1; } __setup("baycom_par=", baycom_par_setup); #endif /* MODULE */ /* --------------------------------------------------------------------- */
gpl-2.0
BrandonSchaefer/xbmc
xbmc/platform/win32/WindowHelper.cpp
110
1973
/* * Copyright (C) 2005-2013 Team XBMC * http://xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "WindowHelper.h" extern HWND g_hWnd; CWHelper g_windowHelper; CWHelper::CWHelper(void) : CThread("WindowHelper") { m_hwnd = NULL; m_hProcess = NULL; } CWHelper::~CWHelper(void) { StopThread(); m_hwnd = NULL; if(m_hProcess != NULL) { CloseHandle(m_hProcess); m_hProcess = NULL; } } void CWHelper::OnStartup() { if((m_hwnd == NULL) && (m_hProcess == NULL)) return; // Minimize XBMC if not already ShowWindow(g_hWnd,SW_MINIMIZE); if(m_hwnd != NULL) ShowWindow(m_hwnd,SW_RESTORE); OutputDebugString("WindowHelper thread started\n"); } void CWHelper::OnExit() { // Bring back XBMC window ShowWindow(g_hWnd,SW_RESTORE); SetForegroundWindow(g_hWnd); m_hwnd = NULL; if(m_hProcess != NULL) { CloseHandle(m_hProcess); m_hProcess = NULL; } LockSetForegroundWindow(LSFW_LOCK); OutputDebugString("WindowHelper thread ended\n"); } void CWHelper::Process() { while (( !m_bStop )) { if(WaitForSingleObject(m_hProcess,500) != WAIT_TIMEOUT) break; /*if((m_hwnd != NULL) && (IsIconic(m_hwnd) == TRUE)) break;*/ } } void CWHelper::SetHWND(HWND hwnd) { m_hwnd = hwnd; } void CWHelper::SetHANDLE(HANDLE hProcess) { m_hProcess = hProcess; }
gpl-2.0
BenHuiHui/linux
net/sunrpc/auth.c
622
21408
/* * linux/net/sunrpc/auth.c * * Generic RPC client authentication API. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/hash.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/gss_api.h> #include <linux/spinlock.h> #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define RPCDBG_FACILITY RPCDBG_AUTH #endif #define RPC_CREDCACHE_DEFAULT_HASHBITS (4) struct rpc_cred_cache { struct hlist_head *hashtable; unsigned int hashbits; spinlock_t lock; }; static unsigned int auth_hashbits = RPC_CREDCACHE_DEFAULT_HASHBITS; static DEFINE_SPINLOCK(rpc_authflavor_lock); static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = { &authnull_ops, /* AUTH_NULL */ &authunix_ops, /* AUTH_UNIX */ NULL, /* others can be loadable modules */ }; static LIST_HEAD(cred_unused); static unsigned long number_cred_unused; #define MAX_HASHTABLE_BITS (14) static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp) { unsigned long num; unsigned int nbits; int ret; if (!val) goto out_inval; ret = kstrtoul(val, 0, &num); if (ret == -EINVAL) goto out_inval; nbits = fls(num); if (num > (1U << nbits)) nbits++; if (nbits > MAX_HASHTABLE_BITS || nbits < 2) goto out_inval; *(unsigned int *)kp->arg = nbits; return 0; out_inval: return -EINVAL; } static int param_get_hashtbl_sz(char *buffer, const struct kernel_param *kp) { unsigned int nbits; nbits = *(unsigned int *)kp->arg; return sprintf(buffer, "%u", 1U << nbits); } #define param_check_hashtbl_sz(name, p) __param_check(name, p, unsigned int); static const struct kernel_param_ops param_ops_hashtbl_sz = { .set = param_set_hashtbl_sz, .get = param_get_hashtbl_sz, }; module_param_named(auth_hashtable_size, auth_hashbits, hashtbl_sz, 0644); MODULE_PARM_DESC(auth_hashtable_size, "RPC credential cache hashtable size"); static unsigned long auth_max_cred_cachesize = ULONG_MAX; module_param(auth_max_cred_cachesize, ulong, 0644); MODULE_PARM_DESC(auth_max_cred_cachesize, "RPC credential maximum total cache size"); static u32 pseudoflavor_to_flavor(u32 flavor) { if (flavor > RPC_AUTH_MAXFLAVOR) return RPC_AUTH_GSS; return flavor; } int rpcauth_register(const struct rpc_authops *ops) { rpc_authflavor_t flavor; int ret = -EPERM; if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) return -EINVAL; spin_lock(&rpc_authflavor_lock); if (auth_flavors[flavor] == NULL) { auth_flavors[flavor] = ops; ret = 0; } spin_unlock(&rpc_authflavor_lock); return ret; } EXPORT_SYMBOL_GPL(rpcauth_register); int rpcauth_unregister(const struct rpc_authops *ops) { rpc_authflavor_t flavor; int ret = -EPERM; if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) return -EINVAL; spin_lock(&rpc_authflavor_lock); if (auth_flavors[flavor] == ops) { auth_flavors[flavor] = NULL; ret = 0; } spin_unlock(&rpc_authflavor_lock); return ret; } EXPORT_SYMBOL_GPL(rpcauth_unregister); /** * rpcauth_get_pseudoflavor - check if security flavor is supported * @flavor: a security flavor * @info: a GSS mech OID, quality of protection, and service value * * Verifies that an appropriate kernel module is available or already loaded. * Returns an equivalent pseudoflavor, or RPC_AUTH_MAXFLAVOR if "flavor" is * not supported locally. */ rpc_authflavor_t rpcauth_get_pseudoflavor(rpc_authflavor_t flavor, struct rpcsec_gss_info *info) { const struct rpc_authops *ops; rpc_authflavor_t pseudoflavor; ops = auth_flavors[flavor]; if (ops == NULL) request_module("rpc-auth-%u", flavor); spin_lock(&rpc_authflavor_lock); ops = auth_flavors[flavor]; if (ops == NULL || !try_module_get(ops->owner)) { spin_unlock(&rpc_authflavor_lock); return RPC_AUTH_MAXFLAVOR; } spin_unlock(&rpc_authflavor_lock); pseudoflavor = flavor; if (ops->info2flavor != NULL) pseudoflavor = ops->info2flavor(info); module_put(ops->owner); return pseudoflavor; } EXPORT_SYMBOL_GPL(rpcauth_get_pseudoflavor); /** * rpcauth_get_gssinfo - find GSS tuple matching a GSS pseudoflavor * @pseudoflavor: GSS pseudoflavor to match * @info: rpcsec_gss_info structure to fill in * * Returns zero and fills in "info" if pseudoflavor matches a * supported mechanism. */ int rpcauth_get_gssinfo(rpc_authflavor_t pseudoflavor, struct rpcsec_gss_info *info) { rpc_authflavor_t flavor = pseudoflavor_to_flavor(pseudoflavor); const struct rpc_authops *ops; int result; if (flavor >= RPC_AUTH_MAXFLAVOR) return -EINVAL; ops = auth_flavors[flavor]; if (ops == NULL) request_module("rpc-auth-%u", flavor); spin_lock(&rpc_authflavor_lock); ops = auth_flavors[flavor]; if (ops == NULL || !try_module_get(ops->owner)) { spin_unlock(&rpc_authflavor_lock); return -ENOENT; } spin_unlock(&rpc_authflavor_lock); result = -ENOENT; if (ops->flavor2info != NULL) result = ops->flavor2info(pseudoflavor, info); module_put(ops->owner); return result; } EXPORT_SYMBOL_GPL(rpcauth_get_gssinfo); /** * rpcauth_list_flavors - discover registered flavors and pseudoflavors * @array: array to fill in * @size: size of "array" * * Returns the number of array items filled in, or a negative errno. * * The returned array is not sorted by any policy. Callers should not * rely on the order of the items in the returned array. */ int rpcauth_list_flavors(rpc_authflavor_t *array, int size) { rpc_authflavor_t flavor; int result = 0; spin_lock(&rpc_authflavor_lock); for (flavor = 0; flavor < RPC_AUTH_MAXFLAVOR; flavor++) { const struct rpc_authops *ops = auth_flavors[flavor]; rpc_authflavor_t pseudos[4]; int i, len; if (result >= size) { result = -ENOMEM; break; } if (ops == NULL) continue; if (ops->list_pseudoflavors == NULL) { array[result++] = ops->au_flavor; continue; } len = ops->list_pseudoflavors(pseudos, ARRAY_SIZE(pseudos)); if (len < 0) { result = len; break; } for (i = 0; i < len; i++) { if (result >= size) { result = -ENOMEM; break; } array[result++] = pseudos[i]; } } spin_unlock(&rpc_authflavor_lock); dprintk("RPC: %s returns %d\n", __func__, result); return result; } EXPORT_SYMBOL_GPL(rpcauth_list_flavors); struct rpc_auth * rpcauth_create(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) { struct rpc_auth *auth; const struct rpc_authops *ops; u32 flavor = pseudoflavor_to_flavor(args->pseudoflavor); auth = ERR_PTR(-EINVAL); if (flavor >= RPC_AUTH_MAXFLAVOR) goto out; if ((ops = auth_flavors[flavor]) == NULL) request_module("rpc-auth-%u", flavor); spin_lock(&rpc_authflavor_lock); ops = auth_flavors[flavor]; if (ops == NULL || !try_module_get(ops->owner)) { spin_unlock(&rpc_authflavor_lock); goto out; } spin_unlock(&rpc_authflavor_lock); auth = ops->create(args, clnt); module_put(ops->owner); if (IS_ERR(auth)) return auth; if (clnt->cl_auth) rpcauth_release(clnt->cl_auth); clnt->cl_auth = auth; out: return auth; } EXPORT_SYMBOL_GPL(rpcauth_create); void rpcauth_release(struct rpc_auth *auth) { if (!atomic_dec_and_test(&auth->au_count)) return; auth->au_ops->destroy(auth); } static DEFINE_SPINLOCK(rpc_credcache_lock); static void rpcauth_unhash_cred_locked(struct rpc_cred *cred) { hlist_del_rcu(&cred->cr_hash); smp_mb__before_atomic(); clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); } static int rpcauth_unhash_cred(struct rpc_cred *cred) { spinlock_t *cache_lock; int ret; cache_lock = &cred->cr_auth->au_credcache->lock; spin_lock(cache_lock); ret = atomic_read(&cred->cr_count) == 0; if (ret) rpcauth_unhash_cred_locked(cred); spin_unlock(cache_lock); return ret; } /* * Initialize RPC credential cache */ int rpcauth_init_credcache(struct rpc_auth *auth) { struct rpc_cred_cache *new; unsigned int hashsize; new = kmalloc(sizeof(*new), GFP_KERNEL); if (!new) goto out_nocache; new->hashbits = auth_hashbits; hashsize = 1U << new->hashbits; new->hashtable = kcalloc(hashsize, sizeof(new->hashtable[0]), GFP_KERNEL); if (!new->hashtable) goto out_nohashtbl; spin_lock_init(&new->lock); auth->au_credcache = new; return 0; out_nohashtbl: kfree(new); out_nocache: return -ENOMEM; } EXPORT_SYMBOL_GPL(rpcauth_init_credcache); /* * Setup a credential key lifetime timeout notification */ int rpcauth_key_timeout_notify(struct rpc_auth *auth, struct rpc_cred *cred) { if (!cred->cr_auth->au_ops->key_timeout) return 0; return cred->cr_auth->au_ops->key_timeout(auth, cred); } EXPORT_SYMBOL_GPL(rpcauth_key_timeout_notify); bool rpcauth_cred_key_to_expire(struct rpc_cred *cred) { if (!cred->cr_ops->crkey_to_expire) return false; return cred->cr_ops->crkey_to_expire(cred); } EXPORT_SYMBOL_GPL(rpcauth_cred_key_to_expire); char * rpcauth_stringify_acceptor(struct rpc_cred *cred) { if (!cred->cr_ops->crstringify_acceptor) return NULL; return cred->cr_ops->crstringify_acceptor(cred); } EXPORT_SYMBOL_GPL(rpcauth_stringify_acceptor); /* * Destroy a list of credentials */ static inline void rpcauth_destroy_credlist(struct list_head *head) { struct rpc_cred *cred; while (!list_empty(head)) { cred = list_entry(head->next, struct rpc_cred, cr_lru); list_del_init(&cred->cr_lru); put_rpccred(cred); } } /* * Clear the RPC credential cache, and delete those credentials * that are not referenced. */ void rpcauth_clear_credcache(struct rpc_cred_cache *cache) { LIST_HEAD(free); struct hlist_head *head; struct rpc_cred *cred; unsigned int hashsize = 1U << cache->hashbits; int i; spin_lock(&rpc_credcache_lock); spin_lock(&cache->lock); for (i = 0; i < hashsize; i++) { head = &cache->hashtable[i]; while (!hlist_empty(head)) { cred = hlist_entry(head->first, struct rpc_cred, cr_hash); get_rpccred(cred); if (!list_empty(&cred->cr_lru)) { list_del(&cred->cr_lru); number_cred_unused--; } list_add_tail(&cred->cr_lru, &free); rpcauth_unhash_cred_locked(cred); } } spin_unlock(&cache->lock); spin_unlock(&rpc_credcache_lock); rpcauth_destroy_credlist(&free); } /* * Destroy the RPC credential cache */ void rpcauth_destroy_credcache(struct rpc_auth *auth) { struct rpc_cred_cache *cache = auth->au_credcache; if (cache) { auth->au_credcache = NULL; rpcauth_clear_credcache(cache); kfree(cache->hashtable); kfree(cache); } } EXPORT_SYMBOL_GPL(rpcauth_destroy_credcache); #define RPC_AUTH_EXPIRY_MORATORIUM (60 * HZ) /* * Remove stale credentials. Avoid sleeping inside the loop. */ static long rpcauth_prune_expired(struct list_head *free, int nr_to_scan) { spinlock_t *cache_lock; struct rpc_cred *cred, *next; unsigned long expired = jiffies - RPC_AUTH_EXPIRY_MORATORIUM; long freed = 0; list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) { if (nr_to_scan-- == 0) break; /* * Enforce a 60 second garbage collection moratorium * Note that the cred_unused list must be time-ordered. */ if (time_in_range(cred->cr_expire, expired, jiffies) && test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) break; list_del_init(&cred->cr_lru); number_cred_unused--; freed++; if (atomic_read(&cred->cr_count) != 0) continue; cache_lock = &cred->cr_auth->au_credcache->lock; spin_lock(cache_lock); if (atomic_read(&cred->cr_count) == 0) { get_rpccred(cred); list_add_tail(&cred->cr_lru, free); rpcauth_unhash_cred_locked(cred); } spin_unlock(cache_lock); } return freed; } static unsigned long rpcauth_cache_do_shrink(int nr_to_scan) { LIST_HEAD(free); unsigned long freed; spin_lock(&rpc_credcache_lock); freed = rpcauth_prune_expired(&free, nr_to_scan); spin_unlock(&rpc_credcache_lock); rpcauth_destroy_credlist(&free); return freed; } /* * Run memory cache shrinker. */ static unsigned long rpcauth_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { if ((sc->gfp_mask & GFP_KERNEL) != GFP_KERNEL) return SHRINK_STOP; /* nothing left, don't come back */ if (list_empty(&cred_unused)) return SHRINK_STOP; return rpcauth_cache_do_shrink(sc->nr_to_scan); } static unsigned long rpcauth_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { return (number_cred_unused / 100) * sysctl_vfs_cache_pressure; } static void rpcauth_cache_enforce_limit(void) { unsigned long diff; unsigned int nr_to_scan; if (number_cred_unused <= auth_max_cred_cachesize) return; diff = number_cred_unused - auth_max_cred_cachesize; nr_to_scan = 100; if (diff < nr_to_scan) nr_to_scan = diff; rpcauth_cache_do_shrink(nr_to_scan); } /* * Look up a process' credentials in the authentication cache */ struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, int flags) { LIST_HEAD(free); struct rpc_cred_cache *cache = auth->au_credcache; struct rpc_cred *cred = NULL, *entry, *new; unsigned int nr; nr = hash_long(from_kuid(&init_user_ns, acred->uid), cache->hashbits); rcu_read_lock(); hlist_for_each_entry_rcu(entry, &cache->hashtable[nr], cr_hash) { if (!entry->cr_ops->crmatch(acred, entry, flags)) continue; if (flags & RPCAUTH_LOOKUP_RCU) { if (test_bit(RPCAUTH_CRED_HASHED, &entry->cr_flags) && !test_bit(RPCAUTH_CRED_NEW, &entry->cr_flags)) cred = entry; break; } spin_lock(&cache->lock); if (test_bit(RPCAUTH_CRED_HASHED, &entry->cr_flags) == 0) { spin_unlock(&cache->lock); continue; } cred = get_rpccred(entry); spin_unlock(&cache->lock); break; } rcu_read_unlock(); if (cred != NULL) goto found; if (flags & RPCAUTH_LOOKUP_RCU) return ERR_PTR(-ECHILD); new = auth->au_ops->crcreate(auth, acred, flags); if (IS_ERR(new)) { cred = new; goto out; } spin_lock(&cache->lock); hlist_for_each_entry(entry, &cache->hashtable[nr], cr_hash) { if (!entry->cr_ops->crmatch(acred, entry, flags)) continue; cred = get_rpccred(entry); break; } if (cred == NULL) { cred = new; set_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); hlist_add_head_rcu(&cred->cr_hash, &cache->hashtable[nr]); } else list_add_tail(&new->cr_lru, &free); spin_unlock(&cache->lock); rpcauth_cache_enforce_limit(); found: if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && cred->cr_ops->cr_init != NULL && !(flags & RPCAUTH_LOOKUP_NEW)) { int res = cred->cr_ops->cr_init(auth, cred); if (res < 0) { put_rpccred(cred); cred = ERR_PTR(res); } } rpcauth_destroy_credlist(&free); out: return cred; } EXPORT_SYMBOL_GPL(rpcauth_lookup_credcache); struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *auth, int flags) { struct auth_cred acred; struct rpc_cred *ret; const struct cred *cred = current_cred(); dprintk("RPC: looking up %s cred\n", auth->au_ops->au_name); memset(&acred, 0, sizeof(acred)); acred.uid = cred->fsuid; acred.gid = cred->fsgid; acred.group_info = cred->group_info; ret = auth->au_ops->lookup_cred(auth, &acred, flags); return ret; } EXPORT_SYMBOL_GPL(rpcauth_lookupcred); void rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, struct rpc_auth *auth, const struct rpc_credops *ops) { INIT_HLIST_NODE(&cred->cr_hash); INIT_LIST_HEAD(&cred->cr_lru); atomic_set(&cred->cr_count, 1); cred->cr_auth = auth; cred->cr_ops = ops; cred->cr_expire = jiffies; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) cred->cr_magic = RPCAUTH_CRED_MAGIC; #endif cred->cr_uid = acred->uid; } EXPORT_SYMBOL_GPL(rpcauth_init_cred); struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags) { dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid, cred->cr_auth->au_ops->au_name, cred); return get_rpccred(cred); } EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred); static struct rpc_cred * rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags) { struct rpc_auth *auth = task->tk_client->cl_auth; struct auth_cred acred = { .uid = GLOBAL_ROOT_UID, .gid = GLOBAL_ROOT_GID, }; dprintk("RPC: %5u looking up %s cred\n", task->tk_pid, task->tk_client->cl_auth->au_ops->au_name); return auth->au_ops->lookup_cred(auth, &acred, lookupflags); } static struct rpc_cred * rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags) { struct rpc_auth *auth = task->tk_client->cl_auth; dprintk("RPC: %5u looking up %s cred\n", task->tk_pid, auth->au_ops->au_name); return rpcauth_lookupcred(auth, lookupflags); } static int rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_cred *new; int lookupflags = 0; if (flags & RPC_TASK_ASYNC) lookupflags |= RPCAUTH_LOOKUP_NEW; if (cred != NULL) new = cred->cr_ops->crbind(task, cred, lookupflags); else if (flags & RPC_TASK_ROOTCREDS) new = rpcauth_bind_root_cred(task, lookupflags); else new = rpcauth_bind_new_cred(task, lookupflags); if (IS_ERR(new)) return PTR_ERR(new); if (req->rq_cred != NULL) put_rpccred(req->rq_cred); req->rq_cred = new; return 0; } void put_rpccred(struct rpc_cred *cred) { /* Fast path for unhashed credentials */ if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) == 0) { if (atomic_dec_and_test(&cred->cr_count)) cred->cr_ops->crdestroy(cred); return; } if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock)) return; if (!list_empty(&cred->cr_lru)) { number_cred_unused--; list_del_init(&cred->cr_lru); } if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0) { cred->cr_expire = jiffies; list_add_tail(&cred->cr_lru, &cred_unused); number_cred_unused++; goto out_nodestroy; } if (!rpcauth_unhash_cred(cred)) { /* We were hashed and someone looked us up... */ goto out_nodestroy; } } spin_unlock(&rpc_credcache_lock); cred->cr_ops->crdestroy(cred); return; out_nodestroy: spin_unlock(&rpc_credcache_lock); } EXPORT_SYMBOL_GPL(put_rpccred); __be32 * rpcauth_marshcred(struct rpc_task *task, __be32 *p) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; dprintk("RPC: %5u marshaling %s cred %p\n", task->tk_pid, cred->cr_auth->au_ops->au_name, cred); return cred->cr_ops->crmarshal(task, p); } __be32 * rpcauth_checkverf(struct rpc_task *task, __be32 *p) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; dprintk("RPC: %5u validating %s cred %p\n", task->tk_pid, cred->cr_auth->au_ops->au_name, cred); return cred->cr_ops->crvalidate(task, p); } static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, __be32 *data, void *obj) { struct xdr_stream xdr; xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data); encode(rqstp, &xdr, obj); } int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; dprintk("RPC: %5u using %s cred %p to wrap rpc data\n", task->tk_pid, cred->cr_ops->cr_name, cred); if (cred->cr_ops->crwrap_req) return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); /* By default, we encode the arguments normally. */ rpcauth_wrap_req_encode(encode, rqstp, data, obj); return 0; } static int rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp, __be32 *data, void *obj) { struct xdr_stream xdr; xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data); return decode(rqstp, &xdr, obj); } int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n", task->tk_pid, cred->cr_ops->cr_name, cred); if (cred->cr_ops->crunwrap_resp) return cred->cr_ops->crunwrap_resp(task, decode, rqstp, data, obj); /* By default, we decode the arguments normally. */ return rpcauth_unwrap_req_decode(decode, rqstp, data, obj); } int rpcauth_refreshcred(struct rpc_task *task) { struct rpc_cred *cred; int err; cred = task->tk_rqstp->rq_cred; if (cred == NULL) { err = rpcauth_bindcred(task, task->tk_msg.rpc_cred, task->tk_flags); if (err < 0) goto out; cred = task->tk_rqstp->rq_cred; } dprintk("RPC: %5u refreshing %s cred %p\n", task->tk_pid, cred->cr_auth->au_ops->au_name, cred); err = cred->cr_ops->crrefresh(task); out: if (err < 0) task->tk_status = err; return err; } void rpcauth_invalcred(struct rpc_task *task) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; dprintk("RPC: %5u invalidating %s cred %p\n", task->tk_pid, cred->cr_auth->au_ops->au_name, cred); if (cred) clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); } int rpcauth_uptodatecred(struct rpc_task *task) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; return cred == NULL || test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0; } static struct shrinker rpc_cred_shrinker = { .count_objects = rpcauth_cache_shrink_count, .scan_objects = rpcauth_cache_shrink_scan, .seeks = DEFAULT_SEEKS, }; int __init rpcauth_init_module(void) { int err; err = rpc_init_authunix(); if (err < 0) goto out1; err = rpc_init_generic_auth(); if (err < 0) goto out2; register_shrinker(&rpc_cred_shrinker); return 0; out2: rpc_destroy_authunix(); out1: return err; } void rpcauth_remove_module(void) { rpc_destroy_authunix(); rpc_destroy_generic_auth(); unregister_shrinker(&rpc_cred_shrinker); }
gpl-2.0
imoseyon/leanKernel-note3
fs/cifs/readdir.c
878
24265
/* * fs/cifs/readdir.c * * Directory search handling * * Copyright (C) International Business Machines Corp., 2004, 2008 * Copyright (C) Red Hat, Inc., 2011 * Author(s): Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/stat.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "cifsfs.h" /* * To be safe - for UCS to UTF-8 with strings loaded with the rare long * characters alloc more to account for such multibyte target UTF-8 * characters. */ #define UNICODE_NAME_MAX ((4 * NAME_MAX) + 2) #ifdef CONFIG_CIFS_DEBUG2 static void dump_cifs_file_struct(struct file *file, char *label) { struct cifsFileInfo *cf; if (file) { cf = file->private_data; if (cf == NULL) { cFYI(1, "empty cifs private file data"); return; } if (cf->invalidHandle) cFYI(1, "invalid handle"); if (cf->srch_inf.endOfSearch) cFYI(1, "end of search"); if (cf->srch_inf.emptyDir) cFYI(1, "empty dir"); } } #else static inline void dump_cifs_file_struct(struct file *file, char *label) { } #endif /* DEBUG2 */ /* * Find the dentry that matches "name". If there isn't one, create one. If it's * a negative dentry or the uniqueid changed, then drop it and recreate it. */ static struct dentry * cifs_readdir_lookup(struct dentry *parent, struct qstr *name, struct cifs_fattr *fattr) { struct dentry *dentry, *alias; struct inode *inode; struct super_block *sb = parent->d_inode->i_sb; cFYI(1, "For %s", name->name); if (parent->d_op && parent->d_op->d_hash) parent->d_op->d_hash(parent, parent->d_inode, name); else name->hash = full_name_hash(name->name, name->len); dentry = d_lookup(parent, name); if (dentry) { inode = dentry->d_inode; /* update inode in place if i_ino didn't change */ if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) { cifs_fattr_to_inode(inode, fattr); return dentry; } d_drop(dentry); dput(dentry); } /* * If we know that the inode will need to be revalidated immediately, * then don't create a new dentry for it. We'll end up doing an on * the wire call either way and this spares us an invalidation. */ if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) return NULL; dentry = d_alloc(parent, name); if (dentry == NULL) return NULL; inode = cifs_iget(sb, fattr); if (!inode) { dput(dentry); return NULL; } alias = d_materialise_unique(dentry, inode); if (alias != NULL) { dput(dentry); if (IS_ERR(alias)) return NULL; dentry = alias; } return dentry; } static void cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb) { fattr->cf_uid = cifs_sb->mnt_uid; fattr->cf_gid = cifs_sb->mnt_gid; if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode; fattr->cf_dtype = DT_DIR; } else { fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode; fattr->cf_dtype = DT_REG; } if (fattr->cf_cifsattrs & ATTR_READONLY) fattr->cf_mode &= ~S_IWUGO; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL && fattr->cf_cifsattrs & ATTR_SYSTEM) { if (fattr->cf_eof == 0) { fattr->cf_mode &= ~S_IFMT; fattr->cf_mode |= S_IFIFO; fattr->cf_dtype = DT_FIFO; } else { /* * trying to get the type and mode via SFU can be slow, * so just call those regular files for now, and mark * for reval */ fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; } } } static void cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info, struct cifs_sb_info *cifs_sb) { memset(fattr, 0, sizeof(*fattr)); fattr->cf_cifsattrs = le32_to_cpu(info->ExtFileAttributes); fattr->cf_eof = le64_to_cpu(info->EndOfFile); fattr->cf_bytes = le64_to_cpu(info->AllocationSize); fattr->cf_createtime = le64_to_cpu(info->CreationTime); fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime); fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime); fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime); cifs_fill_common_info(fattr, cifs_sb); } static void cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info, struct cifs_sb_info *cifs_sb) { int offset = cifs_sb_master_tcon(cifs_sb)->ses->server->timeAdj; memset(fattr, 0, sizeof(*fattr)); fattr->cf_atime = cnvrtDosUnixTm(info->LastAccessDate, info->LastAccessTime, offset); fattr->cf_ctime = cnvrtDosUnixTm(info->LastWriteDate, info->LastWriteTime, offset); fattr->cf_mtime = cnvrtDosUnixTm(info->LastWriteDate, info->LastWriteTime, offset); fattr->cf_cifsattrs = le16_to_cpu(info->Attributes); fattr->cf_bytes = le32_to_cpu(info->AllocationSize); fattr->cf_eof = le32_to_cpu(info->DataSize); cifs_fill_common_info(fattr, cifs_sb); } /* BB eventually need to add the following helper function to resolve NT_STATUS_STOPPED_ON_SYMLINK return code when we try to do FindFirst on (NTFS) directory symlinks */ /* int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb, int xid) { __u16 fid; int len; int oplock = 0; int rc; struct cifs_tcon *ptcon = cifs_sb_tcon(cifs_sb); char *tmpbuffer; rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ, OPEN_REPARSE_POINT, &fid, &oplock, NULL, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (!rc) { tmpbuffer = kmalloc(maxpath); rc = CIFSSMBQueryReparseLinkInfo(xid, ptcon, full_path, tmpbuffer, maxpath -1, fid, cifs_sb->local_nls); if (CIFSSMBClose(xid, ptcon, fid)) { cFYI(1, "Error closing temporary reparsepoint open"); } } } */ static int initiate_cifs_search(const int xid, struct file *file) { __u16 search_flags; int rc = 0; char *full_path = NULL; struct cifsFileInfo *cifsFile; struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); struct tcon_link *tlink = NULL; struct cifs_tcon *pTcon; if (file->private_data == NULL) { tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); cifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); if (cifsFile == NULL) { rc = -ENOMEM; goto error_exit; } file->private_data = cifsFile; cifsFile->tlink = cifs_get_tlink(tlink); pTcon = tlink_tcon(tlink); } else { cifsFile = file->private_data; pTcon = tlink_tcon(cifsFile->tlink); } cifsFile->invalidHandle = true; cifsFile->srch_inf.endOfSearch = false; full_path = build_path_from_dentry(file->f_path.dentry); if (full_path == NULL) { rc = -ENOMEM; goto error_exit; } cFYI(1, "Full path: %s start at: %lld", full_path, file->f_pos); ffirst_retry: /* test for Unix extensions */ /* but now check for them on the share/mount not on the SMB session */ /* if (pTcon->ses->capabilities & CAP_UNIX) { */ if (pTcon->unix_ext) cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX; else if ((pTcon->ses->capabilities & (CAP_NT_SMBS | CAP_NT_FIND)) == 0) { cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD; } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO; } else /* not srvinos - BB fixme add check for backlevel? */ { cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO; } search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME; if (backup_cred(cifs_sb)) search_flags |= CIFS_SEARCH_BACKUP_SEARCH; rc = CIFSFindFirst(xid, pTcon, full_path, cifs_sb->local_nls, &cifsFile->netfid, search_flags, &cifsFile->srch_inf, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR, CIFS_DIR_SEP(cifs_sb)); if (rc == 0) cifsFile->invalidHandle = false; /* BB add following call to handle readdir on new NTFS symlink errors else if STATUS_STOPPED_ON_SYMLINK call get_symlink_reparse_path and retry with new path */ else if ((rc == -EOPNOTSUPP) && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) { cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; goto ffirst_retry; } error_exit: kfree(full_path); cifs_put_tlink(tlink); return rc; } /* return length of unicode string in bytes */ static int cifs_unicode_bytelen(const char *str) { int len; const __le16 *ustr = (const __le16 *)str; for (len = 0; len <= PATH_MAX; len++) { if (ustr[len] == 0) return len << 1; } cFYI(1, "Unicode string longer than PATH_MAX found"); return len << 1; } static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level) { char *new_entry; FILE_DIRECTORY_INFO *pDirInfo = (FILE_DIRECTORY_INFO *)old_entry; if (level == SMB_FIND_FILE_INFO_STANDARD) { FIND_FILE_STANDARD_INFO *pfData; pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo; new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) + pfData->FileNameLength; } else new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset); cFYI(1, "new entry %p old entry %p", new_entry, old_entry); /* validate that new_entry is not past end of SMB */ if (new_entry >= end_of_smb) { cERROR(1, "search entry %p began after end of SMB %p old entry %p", new_entry, end_of_smb, old_entry); return NULL; } else if (((level == SMB_FIND_FILE_INFO_STANDARD) && (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb)) || ((level != SMB_FIND_FILE_INFO_STANDARD) && (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb))) { cERROR(1, "search entry %p extends after end of SMB %p", new_entry, end_of_smb); return NULL; } else return new_entry; } struct cifs_dirent { const char *name; size_t namelen; u32 resume_key; u64 ino; }; static void cifs_fill_dirent_unix(struct cifs_dirent *de, const FILE_UNIX_INFO *info, bool is_unicode) { de->name = &info->FileName[0]; if (is_unicode) de->namelen = cifs_unicode_bytelen(de->name); else de->namelen = strnlen(de->name, PATH_MAX); de->resume_key = info->ResumeKey; de->ino = le64_to_cpu(info->basic.UniqueId); } static void cifs_fill_dirent_dir(struct cifs_dirent *de, const FILE_DIRECTORY_INFO *info) { de->name = &info->FileName[0]; de->namelen = le32_to_cpu(info->FileNameLength); de->resume_key = info->FileIndex; } static void cifs_fill_dirent_full(struct cifs_dirent *de, const FILE_FULL_DIRECTORY_INFO *info) { de->name = &info->FileName[0]; de->namelen = le32_to_cpu(info->FileNameLength); de->resume_key = info->FileIndex; } static void cifs_fill_dirent_search(struct cifs_dirent *de, const SEARCH_ID_FULL_DIR_INFO *info) { de->name = &info->FileName[0]; de->namelen = le32_to_cpu(info->FileNameLength); de->resume_key = info->FileIndex; de->ino = le64_to_cpu(info->UniqueId); } static void cifs_fill_dirent_both(struct cifs_dirent *de, const FILE_BOTH_DIRECTORY_INFO *info) { de->name = &info->FileName[0]; de->namelen = le32_to_cpu(info->FileNameLength); de->resume_key = info->FileIndex; } static void cifs_fill_dirent_std(struct cifs_dirent *de, const FIND_FILE_STANDARD_INFO *info) { de->name = &info->FileName[0]; /* one byte length, no endianess conversion */ de->namelen = info->FileNameLength; de->resume_key = info->ResumeKey; } static int cifs_fill_dirent(struct cifs_dirent *de, const void *info, u16 level, bool is_unicode) { memset(de, 0, sizeof(*de)); switch (level) { case SMB_FIND_FILE_UNIX: cifs_fill_dirent_unix(de, info, is_unicode); break; case SMB_FIND_FILE_DIRECTORY_INFO: cifs_fill_dirent_dir(de, info); break; case SMB_FIND_FILE_FULL_DIRECTORY_INFO: cifs_fill_dirent_full(de, info); break; case SMB_FIND_FILE_ID_FULL_DIR_INFO: cifs_fill_dirent_search(de, info); break; case SMB_FIND_FILE_BOTH_DIRECTORY_INFO: cifs_fill_dirent_both(de, info); break; case SMB_FIND_FILE_INFO_STANDARD: cifs_fill_dirent_std(de, info); break; default: cFYI(1, "Unknown findfirst level %d", level); return -EINVAL; } return 0; } #define UNICODE_DOT cpu_to_le16(0x2e) /* return 0 if no match and 1 for . (current directory) and 2 for .. (parent) */ static int cifs_entry_is_dot(struct cifs_dirent *de, bool is_unicode) { int rc = 0; if (!de->name) return 0; if (is_unicode) { __le16 *ufilename = (__le16 *)de->name; if (de->namelen == 2) { /* check for . */ if (ufilename[0] == UNICODE_DOT) rc = 1; } else if (de->namelen == 4) { /* check for .. */ if (ufilename[0] == UNICODE_DOT && ufilename[1] == UNICODE_DOT) rc = 2; } } else /* ASCII */ { if (de->namelen == 1) { if (de->name[0] == '.') rc = 1; } else if (de->namelen == 2) { if (de->name[0] == '.' && de->name[1] == '.') rc = 2; } } return rc; } /* Check if directory that we are searching has changed so we can decide whether we can use the cached search results from the previous search */ static int is_dir_changed(struct file *file) { struct inode *inode = file->f_path.dentry->d_inode; struct cifsInodeInfo *cifsInfo = CIFS_I(inode); if (cifsInfo->time == 0) return 1; /* directory was changed, perhaps due to unlink */ else return 0; } static int cifs_save_resume_key(const char *current_entry, struct cifsFileInfo *file_info) { struct cifs_dirent de; int rc; rc = cifs_fill_dirent(&de, current_entry, file_info->srch_inf.info_level, file_info->srch_inf.unicode); if (!rc) { file_info->srch_inf.presume_name = de.name; file_info->srch_inf.resume_name_len = de.namelen; file_info->srch_inf.resume_key = de.resume_key; } return rc; } /* find the corresponding entry in the search */ /* Note that the SMB server returns search entries for . and .. which complicates logic here if we choose to parse for them and we do not assume that they are located in the findfirst return buffer.*/ /* We start counting in the buffer with entry 2 and increment for every entry (do not increment for . or .. entry) */ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon, struct file *file, char **ppCurrentEntry, int *num_to_ret) { __u16 search_flags; int rc = 0; int pos_in_buf = 0; loff_t first_entry_in_buffer; loff_t index_to_find = file->f_pos; struct cifsFileInfo *cifsFile = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); /* check if index in the buffer */ if ((cifsFile == NULL) || (ppCurrentEntry == NULL) || (num_to_ret == NULL)) return -ENOENT; *ppCurrentEntry = NULL; first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry - cifsFile->srch_inf.entries_in_buffer; /* if first entry in buf is zero then is first buffer in search response data which means it is likely . and .. will be in this buffer, although some servers do not return . and .. for the root of a drive and for those we need to start two entries earlier */ dump_cifs_file_struct(file, "In fce "); if (((index_to_find < cifsFile->srch_inf.index_of_last_entry) && is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) { /* close and restart search */ cFYI(1, "search backing up - close and restart search"); spin_lock(&cifs_file_list_lock); if (!cifsFile->srch_inf.endOfSearch && !cifsFile->invalidHandle) { cifsFile->invalidHandle = true; spin_unlock(&cifs_file_list_lock); CIFSFindClose(xid, pTcon, cifsFile->netfid); } else spin_unlock(&cifs_file_list_lock); if (cifsFile->srch_inf.ntwrk_buf_start) { cFYI(1, "freeing SMB ff cache buf on search rewind"); if (cifsFile->srch_inf.smallBuf) cifs_small_buf_release(cifsFile->srch_inf. ntwrk_buf_start); else cifs_buf_release(cifsFile->srch_inf. ntwrk_buf_start); cifsFile->srch_inf.ntwrk_buf_start = NULL; } rc = initiate_cifs_search(xid, file); if (rc) { cFYI(1, "error %d reinitiating a search on rewind", rc); return rc; } /* FindFirst/Next set last_entry to NULL on malformed reply */ if (cifsFile->srch_inf.last_entry) cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); } search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME; if (backup_cred(cifs_sb)) search_flags |= CIFS_SEARCH_BACKUP_SEARCH; while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) && (rc == 0) && !cifsFile->srch_inf.endOfSearch) { cFYI(1, "calling findnext2"); rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, search_flags, &cifsFile->srch_inf); /* FindFirst/Next set last_entry to NULL on malformed reply */ if (cifsFile->srch_inf.last_entry) cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); if (rc) return -ENOENT; } if (index_to_find < cifsFile->srch_inf.index_of_last_entry) { /* we found the buffer that contains the entry */ /* scan and find it */ int i; char *current_entry; char *end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + smbCalcSize((struct smb_hdr *) cifsFile->srch_inf.ntwrk_buf_start); current_entry = cifsFile->srch_inf.srch_entries_start; first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry - cifsFile->srch_inf.entries_in_buffer; pos_in_buf = index_to_find - first_entry_in_buffer; cFYI(1, "found entry - pos_in_buf %d", pos_in_buf); for (i = 0; (i < (pos_in_buf)) && (current_entry != NULL); i++) { /* go entry by entry figuring out which is first */ current_entry = nxt_dir_entry(current_entry, end_of_smb, cifsFile->srch_inf.info_level); } if ((current_entry == NULL) && (i < pos_in_buf)) { /* BB fixme - check if we should flag this error */ cERROR(1, "reached end of buf searching for pos in buf" " %d index to find %lld rc %d", pos_in_buf, index_to_find, rc); } rc = 0; *ppCurrentEntry = current_entry; } else { cFYI(1, "index not in buffer - could not findnext into it"); return 0; } if (pos_in_buf >= cifsFile->srch_inf.entries_in_buffer) { cFYI(1, "can not return entries pos_in_buf beyond last"); *num_to_ret = 0; } else *num_to_ret = cifsFile->srch_inf.entries_in_buffer - pos_in_buf; return rc; } static int cifs_filldir(char *find_entry, struct file *file, filldir_t filldir, void *dirent, char *scratch_buf, unsigned int max_len) { struct cifsFileInfo *file_info = file->private_data; struct super_block *sb = file->f_path.dentry->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_dirent de = { NULL, }; struct cifs_fattr fattr; struct dentry *dentry; struct qstr name; int rc = 0; ino_t ino; rc = cifs_fill_dirent(&de, find_entry, file_info->srch_inf.info_level, file_info->srch_inf.unicode); if (rc) return rc; if (de.namelen > max_len) { cERROR(1, "bad search response length %zd past smb end", de.namelen); return -EINVAL; } /* skip . and .. since we added them first */ if (cifs_entry_is_dot(&de, file_info->srch_inf.unicode)) return 0; if (file_info->srch_inf.unicode) { struct nls_table *nlt = cifs_sb->local_nls; name.name = scratch_buf; name.len = cifs_from_utf16((char *)name.name, (__le16 *)de.name, UNICODE_NAME_MAX, min_t(size_t, de.namelen, (size_t)max_len), nlt, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); name.len -= nls_nullsize(nlt); } else { name.name = de.name; name.len = de.namelen; } switch (file_info->srch_inf.info_level) { case SMB_FIND_FILE_UNIX: cifs_unix_basic_to_fattr(&fattr, &((FILE_UNIX_INFO *)find_entry)->basic, cifs_sb); break; case SMB_FIND_FILE_INFO_STANDARD: cifs_std_info_to_fattr(&fattr, (FIND_FILE_STANDARD_INFO *)find_entry, cifs_sb); break; default: cifs_dir_info_to_fattr(&fattr, (FILE_DIRECTORY_INFO *)find_entry, cifs_sb); break; } if (de.ino && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) { fattr.cf_uniqueid = de.ino; } else { fattr.cf_uniqueid = iunique(sb, ROOT_I); cifs_autodisable_serverino(cifs_sb); } if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) && CIFSCouldBeMFSymlink(&fattr)) /* * trying to get the type and mode can be slow, * so just call those regular files for now, and mark * for reval */ fattr.cf_flags |= CIFS_FATTR_NEED_REVAL; ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid); dentry = cifs_readdir_lookup(file->f_dentry, &name, &fattr); rc = filldir(dirent, name.name, name.len, file->f_pos, ino, fattr.cf_dtype); dput(dentry); return rc; } int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) { int rc = 0; int xid, i; struct cifs_tcon *pTcon; struct cifsFileInfo *cifsFile = NULL; char *current_entry; int num_to_fill = 0; char *tmp_buf = NULL; char *end_of_smb; unsigned int max_len; xid = GetXid(); /* * Ensure FindFirst doesn't fail before doing filldir() for '.' and * '..'. Otherwise we won't be able to notify VFS in case of failure. */ if (file->private_data == NULL) { rc = initiate_cifs_search(xid, file); cFYI(1, "initiate cifs search rc %d", rc); if (rc) goto rddir2_exit; } switch ((int) file->f_pos) { case 0: if (filldir(direntry, ".", 1, file->f_pos, file->f_path.dentry->d_inode->i_ino, DT_DIR) < 0) { cERROR(1, "Filldir for current dir failed"); rc = -ENOMEM; break; } file->f_pos++; case 1: if (filldir(direntry, "..", 2, file->f_pos, parent_ino(file->f_path.dentry), DT_DIR) < 0) { cERROR(1, "Filldir for parent dir failed"); rc = -ENOMEM; break; } file->f_pos++; default: /* 1) If search is active, is in current search buffer? if it before then restart search if after then keep searching till find it */ if (file->private_data == NULL) { rc = -EINVAL; FreeXid(xid); return rc; } cifsFile = file->private_data; if (cifsFile->srch_inf.endOfSearch) { if (cifsFile->srch_inf.emptyDir) { cFYI(1, "End of search, empty dir"); rc = 0; break; } } /* else { cifsFile->invalidHandle = true; CIFSFindClose(xid, pTcon, cifsFile->netfid); } */ pTcon = tlink_tcon(cifsFile->tlink); rc = find_cifs_entry(xid, pTcon, file, &current_entry, &num_to_fill); if (rc) { cFYI(1, "fce error %d", rc); goto rddir2_exit; } else if (current_entry != NULL) { cFYI(1, "entry %lld found", file->f_pos); } else { cFYI(1, "could not find entry"); goto rddir2_exit; } cFYI(1, "loop through %d times filling dir for net buf %p", num_to_fill, cifsFile->srch_inf.ntwrk_buf_start); max_len = smbCalcSize((struct smb_hdr *) cifsFile->srch_inf.ntwrk_buf_start); end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len; tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL); if (tmp_buf == NULL) { rc = -ENOMEM; break; } for (i = 0; (i < num_to_fill) && (rc == 0); i++) { if (current_entry == NULL) { /* evaluate whether this case is an error */ cERROR(1, "past SMB end, num to fill %d i %d", num_to_fill, i); break; } /* if buggy server returns . and .. late do we want to check for that here? */ rc = cifs_filldir(current_entry, file, filldir, direntry, tmp_buf, max_len); if (rc == -EOVERFLOW) { rc = 0; break; } file->f_pos++; if (file->f_pos == cifsFile->srch_inf.index_of_last_entry) { cFYI(1, "last entry in buf at pos %lld %s", file->f_pos, tmp_buf); cifs_save_resume_key(current_entry, cifsFile); break; } else current_entry = nxt_dir_entry(current_entry, end_of_smb, cifsFile->srch_inf.info_level); } kfree(tmp_buf); break; } /* end switch */ rddir2_exit: FreeXid(xid); return rc; }
gpl-2.0
siddhartha100/Kernel
drivers/media/usb/airspy/airspy.c
878
28226
/* * AirSpy SDR driver * * Copyright (C) 2014 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <media/videobuf2-vmalloc.h> /* AirSpy USB API commands (from AirSpy Library) */ enum { CMD_INVALID = 0x00, CMD_RECEIVER_MODE = 0x01, CMD_SI5351C_WRITE = 0x02, CMD_SI5351C_READ = 0x03, CMD_R820T_WRITE = 0x04, CMD_R820T_READ = 0x05, CMD_SPIFLASH_ERASE = 0x06, CMD_SPIFLASH_WRITE = 0x07, CMD_SPIFLASH_READ = 0x08, CMD_BOARD_ID_READ = 0x09, CMD_VERSION_STRING_READ = 0x0a, CMD_BOARD_PARTID_SERIALNO_READ = 0x0b, CMD_SET_SAMPLE_RATE = 0x0c, CMD_SET_FREQ = 0x0d, CMD_SET_LNA_GAIN = 0x0e, CMD_SET_MIXER_GAIN = 0x0f, CMD_SET_VGA_GAIN = 0x10, CMD_SET_LNA_AGC = 0x11, CMD_SET_MIXER_AGC = 0x12, CMD_SET_PACKING = 0x13, }; /* * bEndpointAddress 0x81 EP 1 IN * Transfer Type Bulk * wMaxPacketSize 0x0200 1x 512 bytes */ #define MAX_BULK_BUFS (6) #define BULK_BUFFER_SIZE (128 * 512) static const struct v4l2_frequency_band bands[] = { { .tuner = 0, .type = V4L2_TUNER_ADC, .index = 0, .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = 20000000, .rangehigh = 20000000, }, }; static const struct v4l2_frequency_band bands_rf[] = { { .tuner = 1, .type = V4L2_TUNER_RF, .index = 0, .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = 24000000, .rangehigh = 1750000000, }, }; /* stream formats */ struct airspy_format { char *name; u32 pixelformat; u32 buffersize; }; /* format descriptions for capture and preview */ static struct airspy_format formats[] = { { .name = "Real U12LE", .pixelformat = V4L2_SDR_FMT_RU12LE, .buffersize = BULK_BUFFER_SIZE, }, }; static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats); /* intermediate buffers with raw data from the USB device */ struct airspy_frame_buf { struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */ struct list_head list; }; struct airspy { #define POWER_ON (1 << 1) #define URB_BUF (1 << 2) #define USB_STATE_URB_BUF (1 << 3) unsigned long flags; struct device *dev; struct usb_device *udev; struct video_device vdev; struct v4l2_device v4l2_dev; /* videobuf2 queue and queued buffers list */ struct vb2_queue vb_queue; struct list_head queued_bufs; spinlock_t queued_bufs_lock; /* Protects queued_bufs */ unsigned sequence; /* Buffer sequence counter */ unsigned int vb_full; /* vb is full and packets dropped */ /* Note if taking both locks v4l2_lock must always be locked first! */ struct mutex v4l2_lock; /* Protects everything else */ struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */ struct urb *urb_list[MAX_BULK_BUFS]; int buf_num; unsigned long buf_size; u8 *buf_list[MAX_BULK_BUFS]; dma_addr_t dma_addr[MAX_BULK_BUFS]; int urbs_initialized; int urbs_submitted; /* USB control message buffer */ #define BUF_SIZE 24 u8 buf[BUF_SIZE]; /* Current configuration */ unsigned int f_adc; unsigned int f_rf; u32 pixelformat; u32 buffersize; /* Controls */ struct v4l2_ctrl_handler hdl; struct v4l2_ctrl *lna_gain_auto; struct v4l2_ctrl *lna_gain; struct v4l2_ctrl *mixer_gain_auto; struct v4l2_ctrl *mixer_gain; struct v4l2_ctrl *if_gain; /* Sample rate calc */ unsigned long jiffies_next; unsigned int sample; unsigned int sample_measured; }; #define airspy_dbg_usb_control_msg(_dev, _r, _t, _v, _i, _b, _l) { \ char *_direction; \ if (_t & USB_DIR_IN) \ _direction = "<<<"; \ else \ _direction = ">>>"; \ dev_dbg(_dev, "%02x %02x %02x %02x %02x %02x %02x %02x %s %*ph\n", \ _t, _r, _v & 0xff, _v >> 8, _i & 0xff, _i >> 8, \ _l & 0xff, _l >> 8, _direction, _l, _b); \ } /* execute firmware command */ static int airspy_ctrl_msg(struct airspy *s, u8 request, u16 value, u16 index, u8 *data, u16 size) { int ret; unsigned int pipe; u8 requesttype; switch (request) { case CMD_RECEIVER_MODE: case CMD_SET_FREQ: pipe = usb_sndctrlpipe(s->udev, 0); requesttype = (USB_TYPE_VENDOR | USB_DIR_OUT); break; case CMD_BOARD_ID_READ: case CMD_VERSION_STRING_READ: case CMD_BOARD_PARTID_SERIALNO_READ: case CMD_SET_LNA_GAIN: case CMD_SET_MIXER_GAIN: case CMD_SET_VGA_GAIN: case CMD_SET_LNA_AGC: case CMD_SET_MIXER_AGC: pipe = usb_rcvctrlpipe(s->udev, 0); requesttype = (USB_TYPE_VENDOR | USB_DIR_IN); break; default: dev_err(s->dev, "Unknown command %02x\n", request); ret = -EINVAL; goto err; } /* write request */ if (!(requesttype & USB_DIR_IN)) memcpy(s->buf, data, size); ret = usb_control_msg(s->udev, pipe, request, requesttype, value, index, s->buf, size, 1000); airspy_dbg_usb_control_msg(s->dev, request, requesttype, value, index, s->buf, size); if (ret < 0) { dev_err(s->dev, "usb_control_msg() failed %d request %02x\n", ret, request); goto err; } /* read request */ if (requesttype & USB_DIR_IN) memcpy(data, s->buf, size); return 0; err: return ret; } /* Private functions */ static struct airspy_frame_buf *airspy_get_next_fill_buf(struct airspy *s) { unsigned long flags; struct airspy_frame_buf *buf = NULL; spin_lock_irqsave(&s->queued_bufs_lock, flags); if (list_empty(&s->queued_bufs)) goto leave; buf = list_entry(s->queued_bufs.next, struct airspy_frame_buf, list); list_del(&buf->list); leave: spin_unlock_irqrestore(&s->queued_bufs_lock, flags); return buf; } static unsigned int airspy_convert_stream(struct airspy *s, void *dst, void *src, unsigned int src_len) { unsigned int dst_len; if (s->pixelformat == V4L2_SDR_FMT_RU12LE) { memcpy(dst, src, src_len); dst_len = src_len; } else { dst_len = 0; } /* calculate sample rate and output it in 10 seconds intervals */ if (unlikely(time_is_before_jiffies(s->jiffies_next))) { #define MSECS 10000UL unsigned int msecs = jiffies_to_msecs(jiffies - s->jiffies_next + msecs_to_jiffies(MSECS)); unsigned int samples = s->sample - s->sample_measured; s->jiffies_next = jiffies + msecs_to_jiffies(MSECS); s->sample_measured = s->sample; dev_dbg(s->dev, "slen=%u samples=%u msecs=%u sample rate=%lu\n", src_len, samples, msecs, samples * 1000UL / msecs); } /* total number of samples */ s->sample += src_len / 2; return dst_len; } /* * This gets called for the bulk stream pipe. This is done in interrupt * time, so it has to be fast, not crash, and not stall. Neat. */ static void airspy_urb_complete(struct urb *urb) { struct airspy *s = urb->context; struct airspy_frame_buf *fbuf; dev_dbg_ratelimited(s->dev, "status=%d length=%d/%d errors=%d\n", urb->status, urb->actual_length, urb->transfer_buffer_length, urb->error_count); switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ dev_err_ratelimited(s->dev, "URB failed %d\n", urb->status); break; } if (likely(urb->actual_length > 0)) { void *ptr; unsigned int len; /* get free framebuffer */ fbuf = airspy_get_next_fill_buf(s); if (unlikely(fbuf == NULL)) { s->vb_full++; dev_notice_ratelimited(s->dev, "videobuf is full, %d packets dropped\n", s->vb_full); goto skip; } /* fill framebuffer */ ptr = vb2_plane_vaddr(&fbuf->vb, 0); len = airspy_convert_stream(s, ptr, urb->transfer_buffer, urb->actual_length); vb2_set_plane_payload(&fbuf->vb, 0, len); v4l2_get_timestamp(&fbuf->vb.v4l2_buf.timestamp); fbuf->vb.v4l2_buf.sequence = s->sequence++; vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE); } skip: usb_submit_urb(urb, GFP_ATOMIC); } static int airspy_kill_urbs(struct airspy *s) { int i; for (i = s->urbs_submitted - 1; i >= 0; i--) { dev_dbg(s->dev, "kill urb=%d\n", i); /* stop the URB */ usb_kill_urb(s->urb_list[i]); } s->urbs_submitted = 0; return 0; } static int airspy_submit_urbs(struct airspy *s) { int i, ret; for (i = 0; i < s->urbs_initialized; i++) { dev_dbg(s->dev, "submit urb=%d\n", i); ret = usb_submit_urb(s->urb_list[i], GFP_ATOMIC); if (ret) { dev_err(s->dev, "Could not submit URB no. %d - get them all back\n", i); airspy_kill_urbs(s); return ret; } s->urbs_submitted++; } return 0; } static int airspy_free_stream_bufs(struct airspy *s) { if (s->flags & USB_STATE_URB_BUF) { while (s->buf_num) { s->buf_num--; dev_dbg(s->dev, "free buf=%d\n", s->buf_num); usb_free_coherent(s->udev, s->buf_size, s->buf_list[s->buf_num], s->dma_addr[s->buf_num]); } } s->flags &= ~USB_STATE_URB_BUF; return 0; } static int airspy_alloc_stream_bufs(struct airspy *s) { s->buf_num = 0; s->buf_size = BULK_BUFFER_SIZE; dev_dbg(s->dev, "all in all I will use %u bytes for streaming\n", MAX_BULK_BUFS * BULK_BUFFER_SIZE); for (s->buf_num = 0; s->buf_num < MAX_BULK_BUFS; s->buf_num++) { s->buf_list[s->buf_num] = usb_alloc_coherent(s->udev, BULK_BUFFER_SIZE, GFP_ATOMIC, &s->dma_addr[s->buf_num]); if (!s->buf_list[s->buf_num]) { dev_dbg(s->dev, "alloc buf=%d failed\n", s->buf_num); airspy_free_stream_bufs(s); return -ENOMEM; } dev_dbg(s->dev, "alloc buf=%d %p (dma %llu)\n", s->buf_num, s->buf_list[s->buf_num], (long long)s->dma_addr[s->buf_num]); s->flags |= USB_STATE_URB_BUF; } return 0; } static int airspy_free_urbs(struct airspy *s) { int i; airspy_kill_urbs(s); for (i = s->urbs_initialized - 1; i >= 0; i--) { if (s->urb_list[i]) { dev_dbg(s->dev, "free urb=%d\n", i); /* free the URBs */ usb_free_urb(s->urb_list[i]); } } s->urbs_initialized = 0; return 0; } static int airspy_alloc_urbs(struct airspy *s) { int i, j; /* allocate the URBs */ for (i = 0; i < MAX_BULK_BUFS; i++) { dev_dbg(s->dev, "alloc urb=%d\n", i); s->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC); if (!s->urb_list[i]) { dev_dbg(s->dev, "failed\n"); for (j = 0; j < i; j++) usb_free_urb(s->urb_list[j]); return -ENOMEM; } usb_fill_bulk_urb(s->urb_list[i], s->udev, usb_rcvbulkpipe(s->udev, 0x81), s->buf_list[i], BULK_BUFFER_SIZE, airspy_urb_complete, s); s->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP; s->urb_list[i]->transfer_dma = s->dma_addr[i]; s->urbs_initialized++; } return 0; } /* Must be called with vb_queue_lock hold */ static void airspy_cleanup_queued_bufs(struct airspy *s) { unsigned long flags; dev_dbg(s->dev, "\n"); spin_lock_irqsave(&s->queued_bufs_lock, flags); while (!list_empty(&s->queued_bufs)) { struct airspy_frame_buf *buf; buf = list_entry(s->queued_bufs.next, struct airspy_frame_buf, list); list_del(&buf->list); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); } spin_unlock_irqrestore(&s->queued_bufs_lock, flags); } /* The user yanked out the cable... */ static void airspy_disconnect(struct usb_interface *intf) { struct v4l2_device *v = usb_get_intfdata(intf); struct airspy *s = container_of(v, struct airspy, v4l2_dev); dev_dbg(s->dev, "\n"); mutex_lock(&s->vb_queue_lock); mutex_lock(&s->v4l2_lock); /* No need to keep the urbs around after disconnection */ s->udev = NULL; v4l2_device_disconnect(&s->v4l2_dev); video_unregister_device(&s->vdev); mutex_unlock(&s->v4l2_lock); mutex_unlock(&s->vb_queue_lock); v4l2_device_put(&s->v4l2_dev); } /* Videobuf2 operations */ static int airspy_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct airspy *s = vb2_get_drv_priv(vq); dev_dbg(s->dev, "nbuffers=%d\n", *nbuffers); /* Need at least 8 buffers */ if (vq->num_buffers + *nbuffers < 8) *nbuffers = 8 - vq->num_buffers; *nplanes = 1; sizes[0] = PAGE_ALIGN(s->buffersize); dev_dbg(s->dev, "nbuffers=%d sizes[0]=%d\n", *nbuffers, sizes[0]); return 0; } static void airspy_buf_queue(struct vb2_buffer *vb) { struct airspy *s = vb2_get_drv_priv(vb->vb2_queue); struct airspy_frame_buf *buf = container_of(vb, struct airspy_frame_buf, vb); unsigned long flags; /* Check the device has not disconnected between prep and queuing */ if (unlikely(!s->udev)) { vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); return; } spin_lock_irqsave(&s->queued_bufs_lock, flags); list_add_tail(&buf->list, &s->queued_bufs); spin_unlock_irqrestore(&s->queued_bufs_lock, flags); } static int airspy_start_streaming(struct vb2_queue *vq, unsigned int count) { struct airspy *s = vb2_get_drv_priv(vq); int ret; dev_dbg(s->dev, "\n"); if (!s->udev) return -ENODEV; mutex_lock(&s->v4l2_lock); s->sequence = 0; set_bit(POWER_ON, &s->flags); ret = airspy_alloc_stream_bufs(s); if (ret) goto err_clear_bit; ret = airspy_alloc_urbs(s); if (ret) goto err_free_stream_bufs; ret = airspy_submit_urbs(s); if (ret) goto err_free_urbs; /* start hardware streaming */ ret = airspy_ctrl_msg(s, CMD_RECEIVER_MODE, 1, 0, NULL, 0); if (ret) goto err_kill_urbs; goto exit_mutex_unlock; err_kill_urbs: airspy_kill_urbs(s); err_free_urbs: airspy_free_urbs(s); err_free_stream_bufs: airspy_free_stream_bufs(s); err_clear_bit: clear_bit(POWER_ON, &s->flags); /* return all queued buffers to vb2 */ { struct airspy_frame_buf *buf, *tmp; list_for_each_entry_safe(buf, tmp, &s->queued_bufs, list) { list_del(&buf->list); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); } } exit_mutex_unlock: mutex_unlock(&s->v4l2_lock); return ret; } static void airspy_stop_streaming(struct vb2_queue *vq) { struct airspy *s = vb2_get_drv_priv(vq); dev_dbg(s->dev, "\n"); mutex_lock(&s->v4l2_lock); /* stop hardware streaming */ airspy_ctrl_msg(s, CMD_RECEIVER_MODE, 0, 0, NULL, 0); airspy_kill_urbs(s); airspy_free_urbs(s); airspy_free_stream_bufs(s); airspy_cleanup_queued_bufs(s); clear_bit(POWER_ON, &s->flags); mutex_unlock(&s->v4l2_lock); } static struct vb2_ops airspy_vb2_ops = { .queue_setup = airspy_queue_setup, .buf_queue = airspy_buf_queue, .start_streaming = airspy_start_streaming, .stop_streaming = airspy_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; static int airspy_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct airspy *s = video_drvdata(file); strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); strlcpy(cap->card, s->vdev.name, sizeof(cap->card)); usb_make_path(s->udev, cap->bus_info, sizeof(cap->bus_info)); cap->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | V4L2_CAP_TUNER; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } static int airspy_enum_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index >= NUM_FORMATS) return -EINVAL; strlcpy(f->description, formats[f->index].name, sizeof(f->description)); f->pixelformat = formats[f->index].pixelformat; return 0; } static int airspy_g_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f) { struct airspy *s = video_drvdata(file); f->fmt.sdr.pixelformat = s->pixelformat; f->fmt.sdr.buffersize = s->buffersize; memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved)); return 0; } static int airspy_s_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f) { struct airspy *s = video_drvdata(file); struct vb2_queue *q = &s->vb_queue; int i; if (vb2_is_busy(q)) return -EBUSY; memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved)); for (i = 0; i < NUM_FORMATS; i++) { if (formats[i].pixelformat == f->fmt.sdr.pixelformat) { s->pixelformat = formats[i].pixelformat; s->buffersize = formats[i].buffersize; f->fmt.sdr.buffersize = formats[i].buffersize; return 0; } } s->pixelformat = formats[0].pixelformat; s->buffersize = formats[0].buffersize; f->fmt.sdr.pixelformat = formats[0].pixelformat; f->fmt.sdr.buffersize = formats[0].buffersize; return 0; } static int airspy_try_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f) { int i; memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved)); for (i = 0; i < NUM_FORMATS; i++) { if (formats[i].pixelformat == f->fmt.sdr.pixelformat) { f->fmt.sdr.buffersize = formats[i].buffersize; return 0; } } f->fmt.sdr.pixelformat = formats[0].pixelformat; f->fmt.sdr.buffersize = formats[0].buffersize; return 0; } static int airspy_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *v) { int ret; if (v->index == 0) ret = 0; else if (v->index == 1) ret = 0; else ret = -EINVAL; return ret; } static int airspy_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { int ret; if (v->index == 0) { strlcpy(v->name, "AirSpy ADC", sizeof(v->name)); v->type = V4L2_TUNER_ADC; v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; v->rangelow = bands[0].rangelow; v->rangehigh = bands[0].rangehigh; ret = 0; } else if (v->index == 1) { strlcpy(v->name, "AirSpy RF", sizeof(v->name)); v->type = V4L2_TUNER_RF; v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; v->rangelow = bands_rf[0].rangelow; v->rangehigh = bands_rf[0].rangehigh; ret = 0; } else { ret = -EINVAL; } return ret; } static int airspy_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct airspy *s = video_drvdata(file); int ret; if (f->tuner == 0) { f->type = V4L2_TUNER_ADC; f->frequency = s->f_adc; dev_dbg(s->dev, "ADC frequency=%u Hz\n", s->f_adc); ret = 0; } else if (f->tuner == 1) { f->type = V4L2_TUNER_RF; f->frequency = s->f_rf; dev_dbg(s->dev, "RF frequency=%u Hz\n", s->f_rf); ret = 0; } else { ret = -EINVAL; } return ret; } static int airspy_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f) { struct airspy *s = video_drvdata(file); int ret; u8 buf[4]; if (f->tuner == 0) { s->f_adc = clamp_t(unsigned int, f->frequency, bands[0].rangelow, bands[0].rangehigh); dev_dbg(s->dev, "ADC frequency=%u Hz\n", s->f_adc); ret = 0; } else if (f->tuner == 1) { s->f_rf = clamp_t(unsigned int, f->frequency, bands_rf[0].rangelow, bands_rf[0].rangehigh); dev_dbg(s->dev, "RF frequency=%u Hz\n", s->f_rf); buf[0] = (s->f_rf >> 0) & 0xff; buf[1] = (s->f_rf >> 8) & 0xff; buf[2] = (s->f_rf >> 16) & 0xff; buf[3] = (s->f_rf >> 24) & 0xff; ret = airspy_ctrl_msg(s, CMD_SET_FREQ, 0, 0, buf, 4); } else { ret = -EINVAL; } return ret; } static int airspy_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band) { int ret; if (band->tuner == 0) { if (band->index >= ARRAY_SIZE(bands)) { ret = -EINVAL; } else { *band = bands[band->index]; ret = 0; } } else if (band->tuner == 1) { if (band->index >= ARRAY_SIZE(bands_rf)) { ret = -EINVAL; } else { *band = bands_rf[band->index]; ret = 0; } } else { ret = -EINVAL; } return ret; } static const struct v4l2_ioctl_ops airspy_ioctl_ops = { .vidioc_querycap = airspy_querycap, .vidioc_enum_fmt_sdr_cap = airspy_enum_fmt_sdr_cap, .vidioc_g_fmt_sdr_cap = airspy_g_fmt_sdr_cap, .vidioc_s_fmt_sdr_cap = airspy_s_fmt_sdr_cap, .vidioc_try_fmt_sdr_cap = airspy_try_fmt_sdr_cap, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_g_tuner = airspy_g_tuner, .vidioc_s_tuner = airspy_s_tuner, .vidioc_g_frequency = airspy_g_frequency, .vidioc_s_frequency = airspy_s_frequency, .vidioc_enum_freq_bands = airspy_enum_freq_bands, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, .vidioc_log_status = v4l2_ctrl_log_status, }; static const struct v4l2_file_operations airspy_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .read = vb2_fop_read, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, .unlocked_ioctl = video_ioctl2, }; static struct video_device airspy_template = { .name = "AirSpy SDR", .release = video_device_release_empty, .fops = &airspy_fops, .ioctl_ops = &airspy_ioctl_ops, }; static void airspy_video_release(struct v4l2_device *v) { struct airspy *s = container_of(v, struct airspy, v4l2_dev); v4l2_ctrl_handler_free(&s->hdl); v4l2_device_unregister(&s->v4l2_dev); kfree(s); } static int airspy_set_lna_gain(struct airspy *s) { int ret; u8 u8tmp; dev_dbg(s->dev, "lna auto=%d->%d val=%d->%d\n", s->lna_gain_auto->cur.val, s->lna_gain_auto->val, s->lna_gain->cur.val, s->lna_gain->val); ret = airspy_ctrl_msg(s, CMD_SET_LNA_AGC, 0, s->lna_gain_auto->val, &u8tmp, 1); if (ret) goto err; if (s->lna_gain_auto->val == false) { ret = airspy_ctrl_msg(s, CMD_SET_LNA_GAIN, 0, s->lna_gain->val, &u8tmp, 1); if (ret) goto err; } err: if (ret) dev_dbg(s->dev, "failed=%d\n", ret); return ret; } static int airspy_set_mixer_gain(struct airspy *s) { int ret; u8 u8tmp; dev_dbg(s->dev, "mixer auto=%d->%d val=%d->%d\n", s->mixer_gain_auto->cur.val, s->mixer_gain_auto->val, s->mixer_gain->cur.val, s->mixer_gain->val); ret = airspy_ctrl_msg(s, CMD_SET_MIXER_AGC, 0, s->mixer_gain_auto->val, &u8tmp, 1); if (ret) goto err; if (s->mixer_gain_auto->val == false) { ret = airspy_ctrl_msg(s, CMD_SET_MIXER_GAIN, 0, s->mixer_gain->val, &u8tmp, 1); if (ret) goto err; } err: if (ret) dev_dbg(s->dev, "failed=%d\n", ret); return ret; } static int airspy_set_if_gain(struct airspy *s) { int ret; u8 u8tmp; dev_dbg(s->dev, "val=%d->%d\n", s->if_gain->cur.val, s->if_gain->val); ret = airspy_ctrl_msg(s, CMD_SET_VGA_GAIN, 0, s->if_gain->val, &u8tmp, 1); if (ret) goto err; err: if (ret) dev_dbg(s->dev, "failed=%d\n", ret); return ret; } static int airspy_s_ctrl(struct v4l2_ctrl *ctrl) { struct airspy *s = container_of(ctrl->handler, struct airspy, hdl); int ret; switch (ctrl->id) { case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: case V4L2_CID_RF_TUNER_LNA_GAIN: ret = airspy_set_lna_gain(s); break; case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: case V4L2_CID_RF_TUNER_MIXER_GAIN: ret = airspy_set_mixer_gain(s); break; case V4L2_CID_RF_TUNER_IF_GAIN: ret = airspy_set_if_gain(s); break; default: dev_dbg(s->dev, "unknown ctrl: id=%d name=%s\n", ctrl->id, ctrl->name); ret = -EINVAL; } return ret; } static const struct v4l2_ctrl_ops airspy_ctrl_ops = { .s_ctrl = airspy_s_ctrl, }; static int airspy_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct airspy *s; int ret; u8 u8tmp, buf[BUF_SIZE]; s = kzalloc(sizeof(struct airspy), GFP_KERNEL); if (s == NULL) { dev_err(&intf->dev, "Could not allocate memory for state\n"); return -ENOMEM; } mutex_init(&s->v4l2_lock); mutex_init(&s->vb_queue_lock); spin_lock_init(&s->queued_bufs_lock); INIT_LIST_HEAD(&s->queued_bufs); s->dev = &intf->dev; s->udev = interface_to_usbdev(intf); s->f_adc = bands[0].rangelow; s->f_rf = bands_rf[0].rangelow; s->pixelformat = formats[0].pixelformat; s->buffersize = formats[0].buffersize; /* Detect device */ ret = airspy_ctrl_msg(s, CMD_BOARD_ID_READ, 0, 0, &u8tmp, 1); if (ret == 0) ret = airspy_ctrl_msg(s, CMD_VERSION_STRING_READ, 0, 0, buf, BUF_SIZE); if (ret) { dev_err(s->dev, "Could not detect board\n"); goto err_free_mem; } buf[BUF_SIZE - 1] = '\0'; dev_info(s->dev, "Board ID: %02x\n", u8tmp); dev_info(s->dev, "Firmware version: %s\n", buf); /* Init videobuf2 queue structure */ s->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE; s->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ; s->vb_queue.drv_priv = s; s->vb_queue.buf_struct_size = sizeof(struct airspy_frame_buf); s->vb_queue.ops = &airspy_vb2_ops; s->vb_queue.mem_ops = &vb2_vmalloc_memops; s->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; ret = vb2_queue_init(&s->vb_queue); if (ret) { dev_err(s->dev, "Could not initialize vb2 queue\n"); goto err_free_mem; } /* Init video_device structure */ s->vdev = airspy_template; s->vdev.queue = &s->vb_queue; s->vdev.queue->lock = &s->vb_queue_lock; video_set_drvdata(&s->vdev, s); /* Register the v4l2_device structure */ s->v4l2_dev.release = airspy_video_release; ret = v4l2_device_register(&intf->dev, &s->v4l2_dev); if (ret) { dev_err(s->dev, "Failed to register v4l2-device (%d)\n", ret); goto err_free_mem; } /* Register controls */ v4l2_ctrl_handler_init(&s->hdl, 5); s->lna_gain_auto = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops, V4L2_CID_RF_TUNER_LNA_GAIN_AUTO, 0, 1, 1, 0); s->lna_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops, V4L2_CID_RF_TUNER_LNA_GAIN, 0, 14, 1, 8); v4l2_ctrl_auto_cluster(2, &s->lna_gain_auto, 0, false); s->mixer_gain_auto = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops, V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO, 0, 1, 1, 0); s->mixer_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops, V4L2_CID_RF_TUNER_MIXER_GAIN, 0, 15, 1, 8); v4l2_ctrl_auto_cluster(2, &s->mixer_gain_auto, 0, false); s->if_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops, V4L2_CID_RF_TUNER_IF_GAIN, 0, 15, 1, 0); if (s->hdl.error) { ret = s->hdl.error; dev_err(s->dev, "Could not initialize controls\n"); goto err_free_controls; } v4l2_ctrl_handler_setup(&s->hdl); s->v4l2_dev.ctrl_handler = &s->hdl; s->vdev.v4l2_dev = &s->v4l2_dev; s->vdev.lock = &s->v4l2_lock; ret = video_register_device(&s->vdev, VFL_TYPE_SDR, -1); if (ret) { dev_err(s->dev, "Failed to register as video device (%d)\n", ret); goto err_unregister_v4l2_dev; } dev_info(s->dev, "Registered as %s\n", video_device_node_name(&s->vdev)); dev_notice(s->dev, "SDR API is still slightly experimental and functionality changes may follow\n"); return 0; err_free_controls: v4l2_ctrl_handler_free(&s->hdl); err_unregister_v4l2_dev: v4l2_device_unregister(&s->v4l2_dev); err_free_mem: kfree(s); return ret; } /* USB device ID list */ static struct usb_device_id airspy_id_table[] = { { USB_DEVICE(0x1d50, 0x60a1) }, /* AirSpy */ { } }; MODULE_DEVICE_TABLE(usb, airspy_id_table); /* USB subsystem interface */ static struct usb_driver airspy_driver = { .name = KBUILD_MODNAME, .probe = airspy_probe, .disconnect = airspy_disconnect, .id_table = airspy_id_table, }; module_usb_driver(airspy_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("AirSpy SDR"); MODULE_LICENSE("GPL");
gpl-2.0
CandyDevices/kernel_samsung_espresso10
drivers/mmc/host/sdhci-esdhc-imx.c
1390
9366
/* * Freescale eSDHC i.MX controller driver for the platform bus. * * derived from the OF-version. * * Copyright (c) 2010 Pengutronix e.K. * Author: Wolfram Sang <w.sang@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. */ #include <linux/io.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/mmc/host.h> #include <linux/mmc/sdhci-pltfm.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sdio.h> #include <mach/hardware.h> #include <mach/esdhc.h> #include "sdhci.h" #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" /* VENDOR SPEC register */ #define SDHCI_VENDOR_SPEC 0xC0 #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 #define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0) /* * The CMDTYPE of the CMD register (offset 0xE) should be set to * "11" when the STOP CMD12 is issued on imx53 to abort one * open ended multi-blk IO. Otherwise the TC INT wouldn't * be generated. * In exact block transfer, the controller doesn't complete the * operations automatically as required at the end of the * transfer and remains on hold if the abort command is not sent. * As a result, the TC flag is not asserted and SW received timeout * exeception. Bit1 of Vendor Spec registor is used to fix it. */ #define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) struct pltfm_imx_data { int flags; u32 scratchpad; }; static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) { void __iomem *base = host->ioaddr + (reg & ~0x3); u32 shift = (reg & 0x3) * 8; writel(((readl(base) & ~(mask << shift)) | (val << shift)), base); } static u32 esdhc_readl_le(struct sdhci_host *host, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; /* fake CARD_PRESENT flag on mx25/35 */ u32 val = readl(host->ioaddr + reg); if (unlikely((reg == SDHCI_PRESENT_STATE) && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) { struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; if (boarddata && gpio_is_valid(boarddata->cd_gpio) && gpio_get_value(boarddata->cd_gpio)) /* no card, if a valid gpio says so... */ val &= ~SDHCI_CARD_PRESENT; else /* ... in all other cases assume card is present */ val |= SDHCI_CARD_PRESENT; } return val; } static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) /* * these interrupts won't work with a custom card_detect gpio * (only applied to mx25/35) */ val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) && (reg == SDHCI_INT_STATUS) && (val & SDHCI_INT_DATA_END))) { u32 v; v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK; writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); } writel(val, host->ioaddr + reg); } static u16 esdhc_readw_le(struct sdhci_host *host, int reg) { if (unlikely(reg == SDHCI_HOST_VERSION)) reg ^= 2; return readw(host->ioaddr + reg); } static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; switch (reg) { case SDHCI_TRANSFER_MODE: /* * Postpone this write, we must do it together with a * command write that is down below. */ if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) && (host->cmd->opcode == SD_IO_RW_EXTENDED) && (host->cmd->data->blocks > 1) && (host->cmd->data->flags & MMC_DATA_READ)) { u32 v; v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK; writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); } imx_data->scratchpad = val; return; case SDHCI_COMMAND: if ((host->cmd->opcode == MMC_STOP_TRANSMISSION || host->cmd->opcode == MMC_SET_BLOCK_COUNT) && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) val |= SDHCI_CMD_ABORTCMD; writel(val << 16 | imx_data->scratchpad, host->ioaddr + SDHCI_TRANSFER_MODE); return; case SDHCI_BLOCK_SIZE: val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); break; } esdhc_clrset_le(host, 0xffff, val, reg); } static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) { u32 new_val; switch (reg) { case SDHCI_POWER_CONTROL: /* * FSL put some DMA bits here * If your board has a regulator, code should be here */ return; case SDHCI_HOST_CONTROL: /* FSL messed up here, so we can just keep those two */ new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS); /* ensure the endianess */ new_val |= ESDHC_HOST_CONTROL_LE; /* DMA mode bits are shifted */ new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; esdhc_clrset_le(host, 0xffff, new_val, reg); return; } esdhc_clrset_le(host, 0xff, val, reg); } static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); return clk_get_rate(pltfm_host->clk); } static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); return clk_get_rate(pltfm_host->clk) / 256 / 16; } static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) { struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; if (boarddata && gpio_is_valid(boarddata->wp_gpio)) return gpio_get_value(boarddata->wp_gpio); else return -ENOSYS; } static struct sdhci_ops sdhci_esdhc_ops = { .read_l = esdhc_readl_le, .read_w = esdhc_readw_le, .write_l = esdhc_writel_le, .write_w = esdhc_writew_le, .write_b = esdhc_writeb_le, .set_clock = esdhc_set_clock, .get_max_clock = esdhc_pltfm_get_max_clock, .get_min_clock = esdhc_pltfm_get_min_clock, }; static irqreturn_t cd_irq(int irq, void *data) { struct sdhci_host *sdhost = (struct sdhci_host *)data; tasklet_schedule(&sdhost->card_tasklet); return IRQ_HANDLED; }; static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; struct clk *clk; int err; struct pltfm_imx_data *imx_data; clk = clk_get(mmc_dev(host->mmc), NULL); if (IS_ERR(clk)) { dev_err(mmc_dev(host->mmc), "clk err\n"); return PTR_ERR(clk); } clk_enable(clk); pltfm_host->clk = clk; imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); if (!imx_data) { clk_disable(pltfm_host->clk); clk_put(pltfm_host->clk); return -ENOMEM; } pltfm_host->priv = imx_data; host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; if (cpu_is_mx25() || cpu_is_mx35()) { /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; /* write_protect can't be routed to controller, use gpio */ sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; } if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51())) imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; if (boarddata) { err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); if (err) { dev_warn(mmc_dev(host->mmc), "no write-protect pin available!\n"); boarddata->wp_gpio = err; } err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD"); if (err) { dev_warn(mmc_dev(host->mmc), "no card-detect pin available!\n"); goto no_card_detect_pin; } /* i.MX5x has issues to be researched */ if (!cpu_is_mx25() && !cpu_is_mx35()) goto not_supported; err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, mmc_hostname(host->mmc), host); if (err) { dev_warn(mmc_dev(host->mmc), "request irq error\n"); goto no_card_detect_irq; } imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP; /* Now we have a working card_detect again */ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; } return 0; no_card_detect_irq: gpio_free(boarddata->cd_gpio); no_card_detect_pin: boarddata->cd_gpio = err; not_supported: kfree(imx_data); return 0; } static void esdhc_pltfm_exit(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; struct pltfm_imx_data *imx_data = pltfm_host->priv; if (boarddata && gpio_is_valid(boarddata->wp_gpio)) gpio_free(boarddata->wp_gpio); if (boarddata && gpio_is_valid(boarddata->cd_gpio)) { gpio_free(boarddata->cd_gpio); if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) free_irq(gpio_to_irq(boarddata->cd_gpio), host); } clk_disable(pltfm_host->clk); clk_put(pltfm_host->clk); kfree(imx_data); } struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_BROKEN_CARD_DETECTION, /* ADMA has issues. Might be fixable */ .ops = &sdhci_esdhc_ops, .init = esdhc_pltfm_init, .exit = esdhc_pltfm_exit, };
gpl-2.0
DirtyUnicorns/android_kernel_asus_Z00A
drivers/hid/hid-roccat-pyra.c
1646
16900
/* * Roccat Pyra driver for Linux * * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ /* * Roccat Pyra is a mobile gamer mouse which comes in wired and wireless * variant. Wireless variant is not tested. * Userland tools can be found at http://sourceforge.net/projects/roccat */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" #include "hid-roccat-pyra.h" static uint profile_numbers[5] = {0, 1, 2, 3, 4}; /* pyra_class is used for creating sysfs attributes via roccat char device */ static struct class *pyra_class; static void profile_activated(struct pyra_device *pyra, unsigned int new_profile) { pyra->actual_profile = new_profile; pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi; } static int pyra_send_control(struct usb_device *usb_dev, int value, enum pyra_control_requests request) { struct roccat_common2_control control; if ((request == PYRA_CONTROL_REQUEST_PROFILE_SETTINGS || request == PYRA_CONTROL_REQUEST_PROFILE_BUTTONS) && (value < 0 || value > 4)) return -EINVAL; control.command = ROCCAT_COMMON_COMMAND_CONTROL; control.value = value; control.request = request; return roccat_common2_send(usb_dev, ROCCAT_COMMON_COMMAND_CONTROL, &control, sizeof(struct roccat_common2_control)); } static int pyra_get_profile_settings(struct usb_device *usb_dev, struct pyra_profile_settings *buf, int number) { int retval; retval = pyra_send_control(usb_dev, number, PYRA_CONTROL_REQUEST_PROFILE_SETTINGS); if (retval) return retval; return roccat_common2_receive(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS, buf, PYRA_SIZE_PROFILE_SETTINGS); } static int pyra_get_settings(struct usb_device *usb_dev, struct pyra_settings *buf) { return roccat_common2_receive(usb_dev, PYRA_COMMAND_SETTINGS, buf, PYRA_SIZE_SETTINGS); } static int pyra_set_settings(struct usb_device *usb_dev, struct pyra_settings const *settings) { return roccat_common2_send_with_status(usb_dev, PYRA_COMMAND_SETTINGS, settings, PYRA_SIZE_SETTINGS); } static ssize_t pyra_sysfs_read(struct file *fp, struct kobject *kobj, char *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off >= real_size) return 0; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&pyra->pyra_lock); retval = roccat_common2_receive(usb_dev, command, buf, real_size); mutex_unlock(&pyra->pyra_lock); if (retval) return retval; return real_size; } static ssize_t pyra_sysfs_write(struct file *fp, struct kobject *kobj, void const *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&pyra->pyra_lock); retval = roccat_common2_send_with_status(usb_dev, command, (void *)buf, real_size); mutex_unlock(&pyra->pyra_lock); if (retval) return retval; return real_size; } #define PYRA_SYSFS_W(thingy, THINGY) \ static ssize_t pyra_sysfs_write_ ## thingy(struct file *fp, \ struct kobject *kobj, struct bin_attribute *attr, char *buf, \ loff_t off, size_t count) \ { \ return pyra_sysfs_write(fp, kobj, buf, off, count, \ PYRA_SIZE_ ## THINGY, PYRA_COMMAND_ ## THINGY); \ } #define PYRA_SYSFS_R(thingy, THINGY) \ static ssize_t pyra_sysfs_read_ ## thingy(struct file *fp, \ struct kobject *kobj, struct bin_attribute *attr, char *buf, \ loff_t off, size_t count) \ { \ return pyra_sysfs_read(fp, kobj, buf, off, count, \ PYRA_SIZE_ ## THINGY, PYRA_COMMAND_ ## THINGY); \ } #define PYRA_SYSFS_RW(thingy, THINGY) \ PYRA_SYSFS_W(thingy, THINGY) \ PYRA_SYSFS_R(thingy, THINGY) #define PYRA_BIN_ATTRIBUTE_RW(thingy, THINGY) \ { \ .attr = { .name = #thingy, .mode = 0660 }, \ .size = PYRA_SIZE_ ## THINGY, \ .read = pyra_sysfs_read_ ## thingy, \ .write = pyra_sysfs_write_ ## thingy \ } #define PYRA_BIN_ATTRIBUTE_R(thingy, THINGY) \ { \ .attr = { .name = #thingy, .mode = 0440 }, \ .size = PYRA_SIZE_ ## THINGY, \ .read = pyra_sysfs_read_ ## thingy, \ } #define PYRA_BIN_ATTRIBUTE_W(thingy, THINGY) \ { \ .attr = { .name = #thingy, .mode = 0220 }, \ .size = PYRA_SIZE_ ## THINGY, \ .write = pyra_sysfs_write_ ## thingy \ } PYRA_SYSFS_W(control, CONTROL) PYRA_SYSFS_RW(info, INFO) PYRA_SYSFS_RW(profile_settings, PROFILE_SETTINGS) PYRA_SYSFS_RW(profile_buttons, PROFILE_BUTTONS) PYRA_SYSFS_R(settings, SETTINGS) static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); ssize_t retval; retval = pyra_send_control(usb_dev, *(uint *)(attr->private), PYRA_CONTROL_REQUEST_PROFILE_SETTINGS); if (retval) return retval; return pyra_sysfs_read(fp, kobj, buf, off, count, PYRA_SIZE_PROFILE_SETTINGS, PYRA_COMMAND_PROFILE_SETTINGS); } static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); ssize_t retval; retval = pyra_send_control(usb_dev, *(uint *)(attr->private), PYRA_CONTROL_REQUEST_PROFILE_BUTTONS); if (retval) return retval; return pyra_sysfs_read(fp, kobj, buf, off, count, PYRA_SIZE_PROFILE_BUTTONS, PYRA_COMMAND_PROFILE_BUTTONS); } static ssize_t pyra_sysfs_write_settings(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval = 0; struct pyra_roccat_report roccat_report; struct pyra_settings const *settings; if (off != 0 || count != PYRA_SIZE_SETTINGS) return -EINVAL; mutex_lock(&pyra->pyra_lock); settings = (struct pyra_settings const *)buf; retval = pyra_set_settings(usb_dev, settings); if (retval) { mutex_unlock(&pyra->pyra_lock); return retval; } profile_activated(pyra, settings->startup_profile); roccat_report.type = PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2; roccat_report.value = settings->startup_profile + 1; roccat_report.key = 0; roccat_report_event(pyra->chrdev_minor, (uint8_t const *)&roccat_report); mutex_unlock(&pyra->pyra_lock); return PYRA_SIZE_SETTINGS; } static ssize_t pyra_sysfs_show_actual_cpi(struct device *dev, struct device_attribute *attr, char *buf) { struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", pyra->actual_cpi); } static ssize_t pyra_sysfs_show_actual_profile(struct device *dev, struct device_attribute *attr, char *buf) { struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); struct pyra_settings settings; mutex_lock(&pyra->pyra_lock); roccat_common2_receive(usb_dev, PYRA_COMMAND_SETTINGS, &settings, PYRA_SIZE_SETTINGS); mutex_unlock(&pyra->pyra_lock); return snprintf(buf, PAGE_SIZE, "%d\n", settings.startup_profile); } static ssize_t pyra_sysfs_show_firmware_version(struct device *dev, struct device_attribute *attr, char *buf) { struct pyra_device *pyra; struct usb_device *usb_dev; struct pyra_info info; dev = dev->parent->parent; pyra = hid_get_drvdata(dev_get_drvdata(dev)); usb_dev = interface_to_usbdev(to_usb_interface(dev)); mutex_lock(&pyra->pyra_lock); roccat_common2_receive(usb_dev, PYRA_COMMAND_INFO, &info, PYRA_SIZE_INFO); mutex_unlock(&pyra->pyra_lock); return snprintf(buf, PAGE_SIZE, "%d\n", info.firmware_version); } static struct device_attribute pyra_attributes[] = { __ATTR(actual_cpi, 0440, pyra_sysfs_show_actual_cpi, NULL), __ATTR(actual_profile, 0440, pyra_sysfs_show_actual_profile, NULL), __ATTR(firmware_version, 0440, pyra_sysfs_show_firmware_version, NULL), __ATTR(startup_profile, 0440, pyra_sysfs_show_actual_profile, NULL), __ATTR_NULL }; static struct bin_attribute pyra_bin_attributes[] = { PYRA_BIN_ATTRIBUTE_W(control, CONTROL), PYRA_BIN_ATTRIBUTE_RW(info, INFO), PYRA_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS), PYRA_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS), PYRA_BIN_ATTRIBUTE_RW(settings, SETTINGS), { .attr = { .name = "profile1_settings", .mode = 0440 }, .size = PYRA_SIZE_PROFILE_SETTINGS, .read = pyra_sysfs_read_profilex_settings, .private = &profile_numbers[0] }, { .attr = { .name = "profile2_settings", .mode = 0440 }, .size = PYRA_SIZE_PROFILE_SETTINGS, .read = pyra_sysfs_read_profilex_settings, .private = &profile_numbers[1] }, { .attr = { .name = "profile3_settings", .mode = 0440 }, .size = PYRA_SIZE_PROFILE_SETTINGS, .read = pyra_sysfs_read_profilex_settings, .private = &profile_numbers[2] }, { .attr = { .name = "profile4_settings", .mode = 0440 }, .size = PYRA_SIZE_PROFILE_SETTINGS, .read = pyra_sysfs_read_profilex_settings, .private = &profile_numbers[3] }, { .attr = { .name = "profile5_settings", .mode = 0440 }, .size = PYRA_SIZE_PROFILE_SETTINGS, .read = pyra_sysfs_read_profilex_settings, .private = &profile_numbers[4] }, { .attr = { .name = "profile1_buttons", .mode = 0440 }, .size = PYRA_SIZE_PROFILE_BUTTONS, .read = pyra_sysfs_read_profilex_buttons, .private = &profile_numbers[0] }, { .attr = { .name = "profile2_buttons", .mode = 0440 }, .size = PYRA_SIZE_PROFILE_BUTTONS, .read = pyra_sysfs_read_profilex_buttons, .private = &profile_numbers[1] }, { .attr = { .name = "profile3_buttons", .mode = 0440 }, .size = PYRA_SIZE_PROFILE_BUTTONS, .read = pyra_sysfs_read_profilex_buttons, .private = &profile_numbers[2] }, { .attr = { .name = "profile4_buttons", .mode = 0440 }, .size = PYRA_SIZE_PROFILE_BUTTONS, .read = pyra_sysfs_read_profilex_buttons, .private = &profile_numbers[3] }, { .attr = { .name = "profile5_buttons", .mode = 0440 }, .size = PYRA_SIZE_PROFILE_BUTTONS, .read = pyra_sysfs_read_profilex_buttons, .private = &profile_numbers[4] }, __ATTR_NULL }; static int pyra_init_pyra_device_struct(struct usb_device *usb_dev, struct pyra_device *pyra) { struct pyra_settings settings; int retval, i; mutex_init(&pyra->pyra_lock); retval = pyra_get_settings(usb_dev, &settings); if (retval) return retval; for (i = 0; i < 5; ++i) { retval = pyra_get_profile_settings(usb_dev, &pyra->profile_settings[i], i); if (retval) return retval; } profile_activated(pyra, settings.startup_profile); return 0; } static int pyra_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); struct pyra_device *pyra; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) { pyra = kzalloc(sizeof(*pyra), GFP_KERNEL); if (!pyra) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, pyra); retval = pyra_init_pyra_device_struct(usb_dev, pyra); if (retval) { hid_err(hdev, "couldn't init struct pyra_device\n"); goto exit_free; } retval = roccat_connect(pyra_class, hdev, sizeof(struct pyra_roccat_report)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); } else { pyra->chrdev_minor = retval; pyra->roccat_claimed = 1; } } else { hid_set_drvdata(hdev, NULL); } return 0; exit_free: kfree(pyra); return retval; } static void pyra_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct pyra_device *pyra; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) { pyra = hid_get_drvdata(hdev); if (pyra->roccat_claimed) roccat_disconnect(pyra->chrdev_minor); kfree(hid_get_drvdata(hdev)); } } static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { hid_err(hdev, "hw start failed\n"); goto exit; } retval = pyra_init_specials(hdev); if (retval) { hid_err(hdev, "couldn't install mouse\n"); goto exit_stop; } return 0; exit_stop: hid_hw_stop(hdev); exit: return retval; } static void pyra_remove(struct hid_device *hdev) { pyra_remove_specials(hdev); hid_hw_stop(hdev); } static void pyra_keep_values_up_to_date(struct pyra_device *pyra, u8 const *data) { struct pyra_mouse_event_button const *button_event; switch (data[0]) { case PYRA_MOUSE_REPORT_NUMBER_BUTTON: button_event = (struct pyra_mouse_event_button const *)data; switch (button_event->type) { case PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2: profile_activated(pyra, button_event->data1 - 1); break; case PYRA_MOUSE_EVENT_BUTTON_TYPE_CPI: pyra->actual_cpi = button_event->data1; break; } break; } } static void pyra_report_to_chrdev(struct pyra_device const *pyra, u8 const *data) { struct pyra_roccat_report roccat_report; struct pyra_mouse_event_button const *button_event; if (data[0] != PYRA_MOUSE_REPORT_NUMBER_BUTTON) return; button_event = (struct pyra_mouse_event_button const *)data; switch (button_event->type) { case PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2: case PYRA_MOUSE_EVENT_BUTTON_TYPE_CPI: roccat_report.type = button_event->type; roccat_report.value = button_event->data1; roccat_report.key = 0; roccat_report_event(pyra->chrdev_minor, (uint8_t const *)&roccat_report); break; case PYRA_MOUSE_EVENT_BUTTON_TYPE_MACRO: case PYRA_MOUSE_EVENT_BUTTON_TYPE_SHORTCUT: case PYRA_MOUSE_EVENT_BUTTON_TYPE_QUICKLAUNCH: if (button_event->data2 == PYRA_MOUSE_EVENT_BUTTON_PRESS) { roccat_report.type = button_event->type; roccat_report.key = button_event->data1; /* * pyra reports profile numbers with range 1-5. * Keeping this behaviour. */ roccat_report.value = pyra->actual_profile + 1; roccat_report_event(pyra->chrdev_minor, (uint8_t const *)&roccat_report); } break; } } static int pyra_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct pyra_device *pyra = hid_get_drvdata(hdev); if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) return 0; if (pyra == NULL) return 0; pyra_keep_values_up_to_date(pyra, data); if (pyra->roccat_claimed) pyra_report_to_chrdev(pyra, data); return 0; } static const struct hid_device_id pyra_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) }, { } }; MODULE_DEVICE_TABLE(hid, pyra_devices); static struct hid_driver pyra_driver = { .name = "pyra", .id_table = pyra_devices, .probe = pyra_probe, .remove = pyra_remove, .raw_event = pyra_raw_event }; static int __init pyra_init(void) { int retval; /* class name has to be same as driver name */ pyra_class = class_create(THIS_MODULE, "pyra"); if (IS_ERR(pyra_class)) return PTR_ERR(pyra_class); pyra_class->dev_attrs = pyra_attributes; pyra_class->dev_bin_attrs = pyra_bin_attributes; retval = hid_register_driver(&pyra_driver); if (retval) class_destroy(pyra_class); return retval; } static void __exit pyra_exit(void) { hid_unregister_driver(&pyra_driver); class_destroy(pyra_class); } module_init(pyra_init); module_exit(pyra_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat Pyra driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
Radium-Devices/Radium_yu
drivers/iio/common/hid-sensors/hid-sensor-attributes.c
2670
6062
/* * HID Sensors Driver * Copyright (c) 2012, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/hid-sensor-hub.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> static int pow_10(unsigned power) { int i; int ret = 1; for (i = 0; i < power; ++i) ret = ret * 10; return ret; } static void simple_div(int dividend, int divisor, int *whole, int *micro_frac) { int rem; int exp = 0; *micro_frac = 0; if (divisor == 0) { *whole = 0; return; } *whole = dividend/divisor; rem = dividend % divisor; if (rem) { while (rem <= divisor) { rem *= 10; exp++; } *micro_frac = (rem / divisor) * pow_10(6-exp); } } static void split_micro_fraction(unsigned int no, int exp, int *val1, int *val2) { *val1 = no/pow_10(exp); *val2 = no%pow_10(exp) * pow_10(6-exp); } /* VTF format uses exponent and variable size format. For example if the size is 2 bytes 0x0067 with VTF16E14 format -> +1.03 To convert just change to 0x67 to decimal and use two decimal as E14 stands for 10^-2. Negative numbers are 2's complement */ static void convert_from_vtf_format(u32 value, int size, int exp, int *val1, int *val2) { int sign = 1; if (value & BIT(size*8 - 1)) { value = ((1LL << (size * 8)) - value); sign = -1; } exp = hid_sensor_convert_exponent(exp); if (exp >= 0) { *val1 = sign * value * pow_10(exp); *val2 = 0; } else { split_micro_fraction(value, -exp, val1, val2); if (*val1) *val1 = sign * (*val1); else *val2 = sign * (*val2); } } static u32 convert_to_vtf_format(int size, int exp, int val1, int val2) { u32 value; int sign = 1; if (val1 < 0 || val2 < 0) sign = -1; exp = hid_sensor_convert_exponent(exp); if (exp < 0) { value = abs(val1) * pow_10(-exp); value += abs(val2) / pow_10(6+exp); } else value = abs(val1) / pow_10(exp); if (sign < 0) value = ((1LL << (size * 8)) - value); return value; } int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st, int *val1, int *val2) { s32 value; int ret; ret = sensor_hub_get_feature(st->hsdev, st->poll.report_id, st->poll.index, &value); if (ret < 0 || value < 0) { *val1 = *val2 = 0; return -EINVAL; } else { if (st->poll.units == HID_USAGE_SENSOR_UNITS_MILLISECOND) simple_div(1000, value, val1, val2); else if (st->poll.units == HID_USAGE_SENSOR_UNITS_SECOND) simple_div(1, value, val1, val2); else { *val1 = *val2 = 0; return -EINVAL; } } return IIO_VAL_INT_PLUS_MICRO; } EXPORT_SYMBOL(hid_sensor_read_samp_freq_value); int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st, int val1, int val2) { s32 value; int ret; if (val1 < 0 || val2 < 0) ret = -EINVAL; value = val1 * pow_10(6) + val2; if (value) { if (st->poll.units == HID_USAGE_SENSOR_UNITS_MILLISECOND) value = pow_10(9)/value; else if (st->poll.units == HID_USAGE_SENSOR_UNITS_SECOND) value = pow_10(6)/value; else value = 0; } ret = sensor_hub_set_feature(st->hsdev, st->poll.report_id, st->poll.index, value); if (ret < 0 || value < 0) ret = -EINVAL; return ret; } EXPORT_SYMBOL(hid_sensor_write_samp_freq_value); int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st, int *val1, int *val2) { s32 value; int ret; ret = sensor_hub_get_feature(st->hsdev, st->sensitivity.report_id, st->sensitivity.index, &value); if (ret < 0 || value < 0) { *val1 = *val2 = 0; return -EINVAL; } else { convert_from_vtf_format(value, st->sensitivity.size, st->sensitivity.unit_expo, val1, val2); } return IIO_VAL_INT_PLUS_MICRO; } EXPORT_SYMBOL(hid_sensor_read_raw_hyst_value); int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st, int val1, int val2) { s32 value; int ret; value = convert_to_vtf_format(st->sensitivity.size, st->sensitivity.unit_expo, val1, val2); ret = sensor_hub_set_feature(st->hsdev, st->sensitivity.report_id, st->sensitivity.index, value); if (ret < 0 || value < 0) ret = -EINVAL; return ret; } EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value); int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev, u32 usage_id, struct hid_sensor_common *st) { sensor_hub_input_get_attribute_info(hsdev, HID_FEATURE_REPORT, usage_id, HID_USAGE_SENSOR_PROP_REPORT_INTERVAL, &st->poll); sensor_hub_input_get_attribute_info(hsdev, HID_FEATURE_REPORT, usage_id, HID_USAGE_SENSOR_PROP_REPORT_STATE, &st->report_state); sensor_hub_input_get_attribute_info(hsdev, HID_FEATURE_REPORT, usage_id, HID_USAGE_SENSOR_PROY_POWER_STATE, &st->power_state); sensor_hub_input_get_attribute_info(hsdev, HID_FEATURE_REPORT, usage_id, HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS, &st->sensitivity); hid_dbg(hsdev->hdev, "common attributes: %x:%x, %x:%x, %x:%x %x:%x\n", st->poll.index, st->poll.report_id, st->report_state.index, st->report_state.report_id, st->power_state.index, st->power_state.report_id, st->sensitivity.index, st->sensitivity.report_id); return 0; } EXPORT_SYMBOL(hid_sensor_parse_common_attributes); MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>"); MODULE_DESCRIPTION("HID Sensor common attribute processing"); MODULE_LICENSE("GPL");
gpl-2.0
gandalf-3d/mordorKernel-note3
drivers/net/wireless/b43/wa.c
8558
18347
/* Broadcom B43 wireless driver PHY workarounds. Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it> Copyright (c) 2005-2007 Michael Buesch <m@bues.ch> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43.h" #include "main.h" #include "tables.h" #include "phy_common.h" #include "wa.h" static void b43_wa_papd(struct b43_wldev *dev) { u16 backup; backup = b43_ofdmtab_read16(dev, B43_OFDMTAB_PWRDYN2, 0); b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, 7); b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 0, 0); b43_dummy_transmission(dev, true, true); b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, backup); } static void b43_wa_auxclipthr(struct b43_wldev *dev) { b43_phy_write(dev, B43_PHY_OFDM(0x8E), 0x3800); } static void b43_wa_afcdac(struct b43_wldev *dev) { b43_phy_write(dev, 0x0035, 0x03FF); b43_phy_write(dev, 0x0036, 0x0400); } static void b43_wa_txdc_offset(struct b43_wldev *dev) { b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 0, 0x0051); } void b43_wa_initgains(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; b43_phy_write(dev, B43_PHY_LNAHPFCTL, 0x1FF9); b43_phy_mask(dev, B43_PHY_LPFGAINCTL, 0xFF0F); if (phy->rev <= 2) b43_ofdmtab_write16(dev, B43_OFDMTAB_LPFGAIN, 0, 0x1FBF); b43_radio_write16(dev, 0x0002, 0x1FBF); b43_phy_write(dev, 0x0024, 0x4680); b43_phy_write(dev, 0x0020, 0x0003); b43_phy_write(dev, 0x001D, 0x0F40); b43_phy_write(dev, 0x001F, 0x1C00); if (phy->rev <= 3) b43_phy_maskset(dev, 0x002A, 0x00FF, 0x0400); else if (phy->rev == 5) { b43_phy_maskset(dev, 0x002A, 0x00FF, 0x1A00); b43_phy_write(dev, 0x00CC, 0x2121); } if (phy->rev >= 3) b43_phy_write(dev, 0x00BA, 0x3ED5); } static void b43_wa_divider(struct b43_wldev *dev) { b43_phy_mask(dev, 0x002B, ~0x0100); b43_phy_write(dev, 0x008E, 0x58C1); } static void b43_wa_gt(struct b43_wldev *dev) /* Gain table. */ { if (dev->phy.rev <= 2) { b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 0, 15); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 1, 31); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 2, 42); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 3, 48); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 4, 58); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 0, 3); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 1, 3); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 2, 7); } else { b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25); } } static void b43_wa_rssi_lt(struct b43_wldev *dev) /* RSSI lookup table */ { int i; if (0 /* FIXME: For APHY.rev=2 this might be needed */) { for (i = 0; i < 8; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_RSSI, i, i + 8); for (i = 8; i < 16; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_RSSI, i, i - 8); } else { for (i = 0; i < 64; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_RSSI, i, i); } } static void b43_wa_analog(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; u16 ofdmrev; ofdmrev = b43_phy_read(dev, B43_PHY_VERSION_OFDM) & B43_PHYVER_VERSION; if (ofdmrev > 2) { if (phy->type == B43_PHYTYPE_A) b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1808); else b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1000); } else { b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 3, 0x1044); b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 4, 0x7201); b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 6, 0x0040); } } static void b43_wa_dac(struct b43_wldev *dev) { if (dev->phy.analog == 1) b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0034) | 0x0008); else b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0078) | 0x0010); } static void b43_wa_fft(struct b43_wldev *dev) /* Fine frequency table */ { int i; if (dev->phy.type == B43_PHYTYPE_A) for (i = 0; i < B43_TAB_FINEFREQA_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqa[i]); else for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqg[i]); } static void b43_wa_nft(struct b43_wldev *dev) /* Noise figure table */ { struct b43_phy *phy = &dev->phy; int i; if (phy->type == B43_PHYTYPE_A) { if (phy->rev == 2) for (i = 0; i < B43_TAB_NOISEA2_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea2[i]); else for (i = 0; i < B43_TAB_NOISEA3_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea3[i]); } else { if (phy->rev == 1) for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg1[i]); else for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg2[i]); } } static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */ { int i; for (i = 0; i < B43_TAB_ROTOR_SIZE; i++) b43_ofdmtab_write32(dev, B43_OFDMTAB_ROTOR, i, b43_tab_rotor[i]); } static void b43_write_null_nst(struct b43_wldev *dev) { int i; for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE, i, 0); } static void b43_write_nst(struct b43_wldev *dev, const u16 *nst) { int i; for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE, i, nst[i]); } static void b43_wa_nst(struct b43_wldev *dev) /* Noise scale table */ { struct b43_phy *phy = &dev->phy; if (phy->type == B43_PHYTYPE_A) { if (phy->rev <= 1) b43_write_null_nst(dev); else if (phy->rev == 2) b43_write_nst(dev, b43_tab_noisescalea2); else if (phy->rev == 3) b43_write_nst(dev, b43_tab_noisescalea3); else b43_write_nst(dev, b43_tab_noisescaleg3); } else { if (phy->rev >= 6) { if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN) b43_write_nst(dev, b43_tab_noisescaleg3); else b43_write_nst(dev, b43_tab_noisescaleg2); } else { b43_write_nst(dev, b43_tab_noisescaleg1); } } } static void b43_wa_art(struct b43_wldev *dev) /* ADV retard table */ { int i; for (i = 0; i < B43_TAB_RETARD_SIZE; i++) b43_ofdmtab_write32(dev, B43_OFDMTAB_ADVRETARD, i, b43_tab_retard[i]); } static void b43_wa_txlna_gain(struct b43_wldev *dev) { b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 13, 0x0000); } static void b43_wa_crs_reset(struct b43_wldev *dev) { b43_phy_write(dev, 0x002C, 0x0064); } static void b43_wa_2060txlna_gain(struct b43_wldev *dev) { b43_hf_write(dev, b43_hf_read(dev) | B43_HF_2060W); } static void b43_wa_lms(struct b43_wldev *dev) { b43_phy_maskset(dev, 0x0055, 0xFFC0, 0x0004); } static void b43_wa_mixedsignal(struct b43_wldev *dev) { b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, 3); } static void b43_wa_msst(struct b43_wldev *dev) /* Min sigma square table */ { struct b43_phy *phy = &dev->phy; int i; const u16 *tab; if (phy->type == B43_PHYTYPE_A) { tab = b43_tab_sigmasqr1; } else if (phy->type == B43_PHYTYPE_G) { tab = b43_tab_sigmasqr2; } else { B43_WARN_ON(1); return; } for (i = 0; i < B43_TAB_SIGMASQR_SIZE; i++) { b43_ofdmtab_write16(dev, B43_OFDMTAB_MINSIGSQ, i, tab[i]); } } static void b43_wa_iqadc(struct b43_wldev *dev) { if (dev->phy.analog == 4) b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 0, b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 0) & ~0xF000); } static void b43_wa_crs_ed(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; if (phy->rev == 1) { b43_phy_write(dev, B43_PHY_CRSTHRES1_R1, 0x4F19); } else if (phy->rev == 2) { b43_phy_write(dev, B43_PHY_CRSTHRES1, 0x1861); b43_phy_write(dev, B43_PHY_CRSTHRES2, 0x0271); b43_phy_set(dev, B43_PHY_ANTDWELL, 0x0800); } else { b43_phy_write(dev, B43_PHY_CRSTHRES1, 0x0098); b43_phy_write(dev, B43_PHY_CRSTHRES2, 0x0070); b43_phy_write(dev, B43_PHY_OFDM(0xC9), 0x0080); b43_phy_set(dev, B43_PHY_ANTDWELL, 0x0800); } } static void b43_wa_crs_thr(struct b43_wldev *dev) { b43_phy_maskset(dev, B43_PHY_CRS0, ~0x03C0, 0xD000); } static void b43_wa_crs_blank(struct b43_wldev *dev) { b43_phy_write(dev, B43_PHY_OFDM(0x2C), 0x005A); } static void b43_wa_cck_shiftbits(struct b43_wldev *dev) { b43_phy_write(dev, B43_PHY_CCKSHIFTBITS, 0x0026); } static void b43_wa_wrssi_offset(struct b43_wldev *dev) { int i; if (dev->phy.rev == 1) { for (i = 0; i < 16; i++) { b43_ofdmtab_write16(dev, B43_OFDMTAB_WRSSI_R1, i, 0x0020); } } else { for (i = 0; i < 32; i++) { b43_ofdmtab_write16(dev, B43_OFDMTAB_WRSSI, i, 0x0820); } } } static void b43_wa_txpuoff_rxpuon(struct b43_wldev *dev) { b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_0F, 2, 15); b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_0F, 3, 20); } static void b43_wa_altagc(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; if (phy->rev == 1) { b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 0, 254); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 1, 13); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 2, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 3, 25); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, 0, 0x2710); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, 1, 0x9B83); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, 2, 0x9B83); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, 3, 0x0F8D); b43_phy_write(dev, B43_PHY_LMS, 4); } else { b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0, 254); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 1, 13); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 2, 19); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 3, 25); } b43_phy_maskset(dev, B43_PHY_CCKSHIFTBITS_WA, 0x00FF, 0x5700); b43_phy_maskset(dev, B43_PHY_OFDM(0x1A), ~0x007F, 0x000F); b43_phy_maskset(dev, B43_PHY_OFDM(0x1A), ~0x3F80, 0x2B80); b43_phy_maskset(dev, B43_PHY_ANTWRSETT, 0xF0FF, 0x0300); b43_radio_set(dev, 0x7A, 0x0008); b43_phy_maskset(dev, B43_PHY_N1P1GAIN, ~0x000F, 0x0008); b43_phy_maskset(dev, B43_PHY_P1P2GAIN, ~0x0F00, 0x0600); b43_phy_maskset(dev, B43_PHY_N1N2GAIN, ~0x0F00, 0x0700); b43_phy_maskset(dev, B43_PHY_N1P1GAIN, ~0x0F00, 0x0100); if (phy->rev == 1) { b43_phy_maskset(dev, B43_PHY_N1N2GAIN, ~0x000F, 0x0007); } b43_phy_maskset(dev, B43_PHY_OFDM(0x88), ~0x00FF, 0x001C); b43_phy_maskset(dev, B43_PHY_OFDM(0x88), ~0x3F00, 0x0200); b43_phy_maskset(dev, B43_PHY_OFDM(0x96), ~0x00FF, 0x001C); b43_phy_maskset(dev, B43_PHY_OFDM(0x89), ~0x00FF, 0x0020); b43_phy_maskset(dev, B43_PHY_OFDM(0x89), ~0x3F00, 0x0200); b43_phy_maskset(dev, B43_PHY_OFDM(0x82), ~0x00FF, 0x002E); b43_phy_maskset(dev, B43_PHY_OFDM(0x96), 0x00FF, 0x1A00); b43_phy_maskset(dev, B43_PHY_OFDM(0x81), ~0x00FF, 0x0028); b43_phy_maskset(dev, B43_PHY_OFDM(0x81), 0x00FF, 0x2C00); if (phy->rev == 1) { b43_phy_write(dev, B43_PHY_PEAK_COUNT, 0x092B); b43_phy_maskset(dev, B43_PHY_OFDM(0x1B), ~0x001E, 0x0002); } else { b43_phy_mask(dev, B43_PHY_OFDM(0x1B), ~0x001E); b43_phy_write(dev, B43_PHY_OFDM(0x1F), 0x287A); b43_phy_maskset(dev, B43_PHY_LPFGAINCTL, ~0x000F, 0x0004); if (phy->rev >= 6) { b43_phy_write(dev, B43_PHY_OFDM(0x22), 0x287A); b43_phy_maskset(dev, B43_PHY_LPFGAINCTL, 0x0FFF, 0x3000); } } b43_phy_maskset(dev, B43_PHY_DIVSRCHIDX, 0x8080, 0x7874); b43_phy_write(dev, B43_PHY_OFDM(0x8E), 0x1C00); if (phy->rev == 1) { b43_phy_maskset(dev, B43_PHY_DIVP1P2GAIN, ~0x0F00, 0x0600); b43_phy_write(dev, B43_PHY_OFDM(0x8B), 0x005E); b43_phy_maskset(dev, B43_PHY_ANTWRSETT, ~0x00FF, 0x001E); b43_phy_write(dev, B43_PHY_OFDM(0x8D), 0x0002); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3_R1, 0, 0); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3_R1, 1, 7); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3_R1, 2, 16); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3_R1, 3, 28); } else { b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3, 0, 0); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3, 1, 7); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3, 2, 16); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3, 3, 28); } if (phy->rev >= 6) { b43_phy_mask(dev, B43_PHY_OFDM(0x26), ~0x0003); b43_phy_mask(dev, B43_PHY_OFDM(0x26), ~0x1000); } b43_phy_read(dev, B43_PHY_VERSION_OFDM); /* Dummy read */ } static void b43_wa_tr_ltov(struct b43_wldev *dev) /* TR Lookup Table Original Values */ { b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0xC480); } static void b43_wa_cpll_nonpilot(struct b43_wldev *dev) { b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_11, 0, 0); b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_11, 1, 0); } static void b43_wa_rssi_adc(struct b43_wldev *dev) { if (dev->phy.analog == 4) b43_phy_write(dev, 0x00DC, 0x7454); } static void b43_wa_boards_a(struct b43_wldev *dev) { if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM && dev->dev->board_type == SSB_BOARD_BU4306 && dev->dev->board_rev < 0x30) { b43_phy_write(dev, 0x0010, 0xE000); b43_phy_write(dev, 0x0013, 0x0140); b43_phy_write(dev, 0x0014, 0x0280); } else { if (dev->dev->board_type == SSB_BOARD_MP4318 && dev->dev->board_rev < 0x20) { b43_phy_write(dev, 0x0013, 0x0210); b43_phy_write(dev, 0x0014, 0x0840); } else { b43_phy_write(dev, 0x0013, 0x0140); b43_phy_write(dev, 0x0014, 0x0280); } if (dev->phy.rev <= 4) b43_phy_write(dev, 0x0010, 0xE000); else b43_phy_write(dev, 0x0010, 0x2000); b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 1, 0x0039); b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 7, 0x0040); } } static void b43_wa_boards_g(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; struct b43_phy *phy = &dev->phy; if (dev->dev->board_vendor != SSB_BOARDVENDOR_BCM || dev->dev->board_type != SSB_BOARD_BU4306 || dev->dev->board_rev != 0x17) { if (phy->rev < 2) { b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX_R1, 1, 0x0002); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX_R1, 2, 0x0001); } else { b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 1, 0x0002); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 2, 0x0001); if ((sprom->boardflags_lo & B43_BFL_EXTLNA) && (phy->rev >= 7)) { b43_phy_mask(dev, B43_PHY_EXTG(0x11), 0xF7FF); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0020, 0x0001); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0021, 0x0001); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0022, 0x0001); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0023, 0x0000); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0000, 0x0000); b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0003, 0x0002); } } } if (sprom->boardflags_lo & B43_BFL_FEM) { b43_phy_write(dev, B43_PHY_GTABCTL, 0x3120); b43_phy_write(dev, B43_PHY_GTABDATA, 0xC480); } } void b43_wa_all(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; if (phy->type == B43_PHYTYPE_A) { switch (phy->rev) { case 2: b43_wa_papd(dev); b43_wa_auxclipthr(dev); b43_wa_afcdac(dev); b43_wa_txdc_offset(dev); b43_wa_initgains(dev); b43_wa_divider(dev); b43_wa_gt(dev); b43_wa_rssi_lt(dev); b43_wa_analog(dev); b43_wa_dac(dev); b43_wa_fft(dev); b43_wa_nft(dev); b43_wa_rt(dev); b43_wa_nst(dev); b43_wa_art(dev); b43_wa_txlna_gain(dev); b43_wa_crs_reset(dev); b43_wa_2060txlna_gain(dev); b43_wa_lms(dev); break; case 3: b43_wa_papd(dev); b43_wa_mixedsignal(dev); b43_wa_rssi_lt(dev); b43_wa_txdc_offset(dev); b43_wa_initgains(dev); b43_wa_dac(dev); b43_wa_nft(dev); b43_wa_nst(dev); b43_wa_msst(dev); b43_wa_analog(dev); b43_wa_gt(dev); b43_wa_txpuoff_rxpuon(dev); b43_wa_txlna_gain(dev); break; case 5: b43_wa_iqadc(dev); case 6: b43_wa_papd(dev); b43_wa_rssi_lt(dev); b43_wa_txdc_offset(dev); b43_wa_initgains(dev); b43_wa_dac(dev); b43_wa_nft(dev); b43_wa_nst(dev); b43_wa_msst(dev); b43_wa_analog(dev); b43_wa_gt(dev); b43_wa_txpuoff_rxpuon(dev); b43_wa_txlna_gain(dev); break; case 7: b43_wa_iqadc(dev); b43_wa_papd(dev); b43_wa_rssi_lt(dev); b43_wa_txdc_offset(dev); b43_wa_initgains(dev); b43_wa_dac(dev); b43_wa_nft(dev); b43_wa_nst(dev); b43_wa_msst(dev); b43_wa_analog(dev); b43_wa_gt(dev); b43_wa_txpuoff_rxpuon(dev); b43_wa_txlna_gain(dev); b43_wa_rssi_adc(dev); default: B43_WARN_ON(1); } b43_wa_boards_a(dev); } else if (phy->type == B43_PHYTYPE_G) { switch (phy->rev) { case 1://XXX review rev1 b43_wa_crs_ed(dev); b43_wa_crs_thr(dev); b43_wa_crs_blank(dev); b43_wa_cck_shiftbits(dev); b43_wa_fft(dev); b43_wa_nft(dev); b43_wa_rt(dev); b43_wa_nst(dev); b43_wa_art(dev); b43_wa_wrssi_offset(dev); b43_wa_altagc(dev); break; case 2: case 6: case 7: case 8: case 9: b43_wa_tr_ltov(dev); b43_wa_crs_ed(dev); b43_wa_rssi_lt(dev); b43_wa_nft(dev); b43_wa_nst(dev); b43_wa_msst(dev); b43_wa_wrssi_offset(dev); b43_wa_altagc(dev); b43_wa_analog(dev); b43_wa_txpuoff_rxpuon(dev); break; default: B43_WARN_ON(1); } b43_wa_boards_g(dev); } else { /* No N PHY support so far, LP PHY is in phy_lp.c */ B43_WARN_ON(1); } b43_wa_cpll_nonpilot(dev); }
gpl-2.0
haydenbbickerton/dv7_4285dx-kernel
net/sunrpc/auth_gss/gss_krb5_seqnum.c
11630
4619
/* * linux/net/sunrpc/gss_krb5_seqnum.c * * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/util_seqnum.c * * Copyright (c) 2000 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@umich.edu> */ /* * Copyright 1993 by OpenVision Technologies, Inc. * * Permission to use, copy, modify, distribute, and sell this software * and its documentation for any purpose is hereby granted without fee, * provided that the above copyright notice appears in all copies and * that both that copyright notice and this permission notice appear in * supporting documentation, and that the name of OpenVision not be used * in advertising or publicity pertaining to distribution of the software * without specific, written prior permission. OpenVision makes no * representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied warranty. * * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #include <linux/types.h> #include <linux/sunrpc/gss_krb5.h> #include <linux/crypto.h> #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH #endif static s32 krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, unsigned char *cksum, unsigned char *buf) { struct crypto_blkcipher *cipher; unsigned char plain[8]; s32 code; dprintk("RPC: %s:\n", __func__); cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(cipher)) return PTR_ERR(cipher); plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); plain[3] = (unsigned char) ((seqnum >> 0) & 0xff); plain[4] = direction; plain[5] = direction; plain[6] = direction; plain[7] = direction; code = krb5_rc4_setup_seq_key(kctx, cipher, cksum); if (code) goto out; code = krb5_encrypt(cipher, cksum, plain, buf, 8); out: crypto_free_blkcipher(cipher); return code; } s32 krb5_make_seq_num(struct krb5_ctx *kctx, struct crypto_blkcipher *key, int direction, u32 seqnum, unsigned char *cksum, unsigned char *buf) { unsigned char plain[8]; if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) return krb5_make_rc4_seq_num(kctx, direction, seqnum, cksum, buf); plain[0] = (unsigned char) (seqnum & 0xff); plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); plain[3] = (unsigned char) ((seqnum >> 24) & 0xff); plain[4] = direction; plain[5] = direction; plain[6] = direction; plain[7] = direction; return krb5_encrypt(key, cksum, plain, buf, 8); } static s32 krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, unsigned char *buf, int *direction, s32 *seqnum) { struct crypto_blkcipher *cipher; unsigned char plain[8]; s32 code; dprintk("RPC: %s:\n", __func__); cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(cipher)) return PTR_ERR(cipher); code = krb5_rc4_setup_seq_key(kctx, cipher, cksum); if (code) goto out; code = krb5_decrypt(cipher, cksum, buf, plain, 8); if (code) goto out; if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || (plain[4] != plain[7])) { code = (s32)KG_BAD_SEQ; goto out; } *direction = plain[4]; *seqnum = ((plain[0] << 24) | (plain[1] << 16) | (plain[2] << 8) | (plain[3])); out: crypto_free_blkcipher(cipher); return code; } s32 krb5_get_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, unsigned char *buf, int *direction, u32 *seqnum) { s32 code; unsigned char plain[8]; struct crypto_blkcipher *key = kctx->seq; dprintk("RPC: krb5_get_seq_num:\n"); if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) return krb5_get_rc4_seq_num(kctx, cksum, buf, direction, seqnum); if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) return code; if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || (plain[4] != plain[7])) return (s32)KG_BAD_SEQ; *direction = plain[4]; *seqnum = ((plain[0]) | (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); return 0; }
gpl-2.0
TeamOrion-Devices/kernel_lge_msm8974
drivers/infiniband/hw/qib/qib_pio_copy.c
14702
2273
/* * Copyright (c) 2009 QLogic Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "qib.h" /** * qib_pio_copy - copy data to MMIO space, in multiples of 32-bits * @to: destination, in MMIO space (must be 64-bit aligned) * @from: source (must be 64-bit aligned) * @count: number of 32-bit quantities to copy * * Copy data from kernel space to MMIO space, in multiples of 32 bits at a * time. Order of access is not guaranteed, nor is a memory barrier * performed afterwards. */ void qib_pio_copy(void __iomem *to, const void *from, size_t count) { #ifdef CONFIG_64BIT u64 __iomem *dst = to; const u64 *src = from; const u64 *end = src + (count >> 1); while (src < end) __raw_writeq(*src++, dst++); if (count & 1) __raw_writel(*(const u32 *)src, dst); #else u32 __iomem *dst = to; const u32 *src = from; const u32 *end = src + count; while (src < end) __raw_writel(*src++, dst++); #endif }
gpl-2.0
golden-guy/android_kernel_asus_grouper
arch/arm/nwfpe/double_cpdo.c
14958
4122
/* NetWinder Floating Point Emulator (c) Rebel.COM, 1998,1999 Direct questions, comments to Scott Bambrough <scottb@netwinder.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "fpa11.h" #include "softfloat.h" #include "fpopcode.h" union float64_components { float64 f64; unsigned int i[2]; }; float64 float64_exp(float64 Fm); float64 float64_ln(float64 Fm); float64 float64_sin(float64 rFm); float64 float64_cos(float64 rFm); float64 float64_arcsin(float64 rFm); float64 float64_arctan(float64 rFm); float64 float64_log(float64 rFm); float64 float64_tan(float64 rFm); float64 float64_arccos(float64 rFm); float64 float64_pow(float64 rFn, float64 rFm); float64 float64_pol(float64 rFn, float64 rFm); static float64 float64_rsf(struct roundingData *roundData, float64 rFn, float64 rFm) { return float64_sub(roundData, rFm, rFn); } static float64 float64_rdv(struct roundingData *roundData, float64 rFn, float64 rFm) { return float64_div(roundData, rFm, rFn); } static float64 (*const dyadic_double[16])(struct roundingData*, float64 rFn, float64 rFm) = { [ADF_CODE >> 20] = float64_add, [MUF_CODE >> 20] = float64_mul, [SUF_CODE >> 20] = float64_sub, [RSF_CODE >> 20] = float64_rsf, [DVF_CODE >> 20] = float64_div, [RDF_CODE >> 20] = float64_rdv, [RMF_CODE >> 20] = float64_rem, /* strictly, these opcodes should not be implemented */ [FML_CODE >> 20] = float64_mul, [FDV_CODE >> 20] = float64_div, [FRD_CODE >> 20] = float64_rdv, }; static float64 float64_mvf(struct roundingData *roundData,float64 rFm) { return rFm; } static float64 float64_mnf(struct roundingData *roundData,float64 rFm) { union float64_components u; u.f64 = rFm; #ifdef __ARMEB__ u.i[0] ^= 0x80000000; #else u.i[1] ^= 0x80000000; #endif return u.f64; } static float64 float64_abs(struct roundingData *roundData,float64 rFm) { union float64_components u; u.f64 = rFm; #ifdef __ARMEB__ u.i[0] &= 0x7fffffff; #else u.i[1] &= 0x7fffffff; #endif return u.f64; } static float64 (*const monadic_double[16])(struct roundingData *, float64 rFm) = { [MVF_CODE >> 20] = float64_mvf, [MNF_CODE >> 20] = float64_mnf, [ABS_CODE >> 20] = float64_abs, [RND_CODE >> 20] = float64_round_to_int, [URD_CODE >> 20] = float64_round_to_int, [SQT_CODE >> 20] = float64_sqrt, [NRM_CODE >> 20] = float64_mvf, }; unsigned int DoubleCPDO(struct roundingData *roundData, const unsigned int opcode, FPREG * rFd) { FPA11 *fpa11 = GET_FPA11(); float64 rFm; unsigned int Fm, opc_mask_shift; Fm = getFm(opcode); if (CONSTANT_FM(opcode)) { rFm = getDoubleConstant(Fm); } else { switch (fpa11->fType[Fm]) { case typeSingle: rFm = float32_to_float64(fpa11->fpreg[Fm].fSingle); break; case typeDouble: rFm = fpa11->fpreg[Fm].fDouble; break; default: return 0; } } opc_mask_shift = (opcode & MASK_ARITHMETIC_OPCODE) >> 20; if (!MONADIC_INSTRUCTION(opcode)) { unsigned int Fn = getFn(opcode); float64 rFn; switch (fpa11->fType[Fn]) { case typeSingle: rFn = float32_to_float64(fpa11->fpreg[Fn].fSingle); break; case typeDouble: rFn = fpa11->fpreg[Fn].fDouble; break; default: return 0; } if (dyadic_double[opc_mask_shift]) { rFd->fDouble = dyadic_double[opc_mask_shift](roundData, rFn, rFm); } else { return 0; } } else { if (monadic_double[opc_mask_shift]) { rFd->fDouble = monadic_double[opc_mask_shift](roundData, rFm); } else { return 0; } } return 1; }
gpl-2.0
nerdyblonde/N80XX_Kernel
arch/arm/mach-exynos/board-exynos4212-mfd.c
111
10658
/* linux/arch/arm/mach-exynos/board-exynos4212-mfd.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <linux/gpio.h> #include <mach/irqs.h> #include "board-exynos4212.h" #ifdef CONFIG_MFD_MAX14577 #include <linux/mfd/max14577.h> #include <linux/mfd/max14577-private.h> #if defined(CONFIG_USE_MUIC) #include <linux/muic/muic.h> #endif /* CONFIG_USE_MUIC */ #endif /* CONFIG_MFD_MAX14577 */ #ifdef CONFIG_MFD_MAX77693 #include <linux/mfd/max77693.h> #include <linux/mfd/max77693-private.h> #ifdef CONFIG_LEDS_MAX77693 #include <linux/leds-max77693.h> #endif /* CONFIG_LEDS_MAX77693 */ #endif /* CONFIG_MFD_MAX77693 */ #ifdef CONFIG_MFD_MAX14577 #if defined(CONFIG_MUIC_MAX14577) extern struct muic_platform_data max14577_muic_pdata; static int muic_init_gpio_cb(int switch_sel) { struct muic_platform_data *pdata = &max14577_muic_pdata; const char *usb_mode; const char *uart_mode; int ret = 0; pr_info("%s\n", __func__); if (switch_sel & SWITCH_SEL_USB_MASK) { pdata->usb_path = MUIC_PATH_USB_AP; usb_mode = "PDA"; } else { pdata->usb_path = MUIC_PATH_USB_CP; usb_mode = "MODEM"; } if (pdata->set_gpio_usb_sel) ret = pdata->set_gpio_usb_sel(pdata->uart_path); if (switch_sel & SWITCH_SEL_UART_MASK) { pdata->uart_path = MUIC_PATH_UART_AP; uart_mode = "AP"; } else { pdata->uart_path = MUIC_PATH_UART_CP; uart_mode = "CP"; } if (pdata->set_gpio_uart_sel) ret = pdata->set_gpio_uart_sel(pdata->uart_path); pr_info("%s: usb_path(%s), uart_path(%s)\n", __func__, usb_mode, uart_mode); return ret; } struct muic_platform_data max14577_muic_pdata = { .init_gpio_cb = muic_init_gpio_cb, }; #endif /* CONFIG_MUIC_MAX14577 */ extern struct max14577_regulator_data max14577_regulators; extern struct max14577_platform_data exynos4_max14577_info; static int max14577_set_gpio_pogo_cb(int new_dev) { struct max14577_platform_data *pdata = &exynos4_max14577_info; int gpio_val = GPIO_LEVEL_LOW; int ret = 0; pr_info("%s new_dev(%d)\n", __func__, new_dev); switch (new_dev) { case ATTACHED_DEV_JIG_UART_OFF_MUIC: case ATTACHED_DEV_JIG_UART_OFF_VB_MUIC: gpio_val = GPIO_LEVEL_HIGH; break; default: gpio_val = GPIO_LEVEL_LOW; break; } if (pdata->set_gpio_pogo_vbatt_en) ret = pdata->set_gpio_pogo_vbatt_en(gpio_val); /* wait 500ms for safely switching VBATT <-> VBUS voltage input */ msleep(500); if (pdata->set_gpio_pogo_vbus_en) ret = pdata->set_gpio_pogo_vbus_en(gpio_val); return ret; } static int max14577_set_gpio_pogo_vbatt_en(int gpio_val) { const char *mode; int pogo_vbatt_en_gpio = exynos4_max14577_info.gpio_pogo_vbatt_en; int pogo_vbatt_en_val; int ret; ret = gpio_request(pogo_vbatt_en_gpio, "GPIO_POGO_VBATT_EN"); if (ret) { pr_err("failed to gpio_request GPIO_POGO_VBATT_EN\n"); return ret; } pogo_vbatt_en_val = gpio_get_value(pogo_vbatt_en_gpio); pr_info("%s: pogo_vbatt_en(%d), GPIO_POGO_VBATT_EN(%d)=%c ->", __func__, gpio_val, pogo_vbatt_en_gpio, (pogo_vbatt_en_val == GPIO_LEVEL_LOW ? 'L' : 'H')); if (gpio_val == GPIO_LEVEL_LOW) { mode = "POGO_VBATT_EN DISABLE"; } else if (gpio_val == GPIO_LEVEL_HIGH) { mode = "POGO_VBATT_EN ENABLE"; } else { mode = "Error"; goto out; } if (gpio_is_valid(pogo_vbatt_en_gpio)) gpio_set_value(pogo_vbatt_en_gpio, gpio_val); out: pogo_vbatt_en_val = gpio_get_value(pogo_vbatt_en_gpio); gpio_free(pogo_vbatt_en_gpio); pr_info(" %s, GPIO_POGO_VBATT_EN(%d)=%c\n", mode, pogo_vbatt_en_gpio, (pogo_vbatt_en_val == GPIO_LEVEL_LOW ? 'L' : 'H')); return 0; } static int max14577_set_gpio_pogo_vbus_en(int gpio_val) { const char *mode; int pogo_vbus_en_gpio = exynos4_max14577_info.gpio_pogo_vbus_en; int pogo_vbus_en_val; int ret; ret = gpio_request(pogo_vbus_en_gpio, "GPIO_POGO_VBUS_EN"); if (ret) { pr_err("failed to gpio_request GPIO_POGO_VBUS_EN\n"); return ret; } pogo_vbus_en_val = gpio_get_value(pogo_vbus_en_gpio); pr_info("%s: pogo_vbus_en(%d), GPIO_POGO_VBUS_EN(%d)=%c ->", __func__, gpio_val, pogo_vbus_en_gpio, (pogo_vbus_en_val == GPIO_LEVEL_LOW ? 'L' : 'H')); if (gpio_val == GPIO_LEVEL_LOW) { mode = "POGO_VBUS_EN DISABLE"; } else if (gpio_val == GPIO_LEVEL_HIGH) { mode = "POGO_VBUS_EN ENABLE"; } else { mode = "Error"; goto out; } if (gpio_is_valid(pogo_vbus_en_gpio)) gpio_set_value(pogo_vbus_en_gpio, gpio_val); out: pogo_vbus_en_val = gpio_get_value(pogo_vbus_en_gpio); gpio_free(pogo_vbus_en_gpio); pr_info(" %s, GPIO_POGO_VBUS_EN(%d)=%c\n", mode, pogo_vbus_en_gpio, (pogo_vbus_en_val == GPIO_LEVEL_LOW ? 'L' : 'H')); return 0; } struct max14577_platform_data exynos4_max14577_info = { .irq_base = IRQ_BOARD_IFIC_START, .irq_gpio = GPIO_IF_PMIC_IRQ, .wakeup = true, #if defined(GPIO_POGO_VBATT_EN) .gpio_pogo_vbatt_en = GPIO_POGO_VBATT_EN, .set_gpio_pogo_vbatt_en = max14577_set_gpio_pogo_vbatt_en, #endif /* GPIO_POGO_VBATT_EN */ #if defined(GPIO_POGO_VBUS_EN) .gpio_pogo_vbus_en = GPIO_POGO_VBUS_EN, .set_gpio_pogo_vbus_en = max14577_set_gpio_pogo_vbus_en, #endif /* GPIO_POGO_VBUS_EN */ #if defined(CONFIG_MUIC_MAX14577) .muic_pdata = &max14577_muic_pdata, #endif /* CONFIG_MUIC_MAX14577 */ .num_regulators = MAX14577_REG_MAX, .regulators = &max14577_regulators, }; #endif /* CONFIG_MFD_MAX14577 */ #ifdef CONFIG_MFD_MAX77693 extern struct max77693_muic_data max77693_muic; extern struct max77693_regulator_data max77693_regulators; #ifdef CONFIG_LEDS_MAX77693 struct max77693_led_platform_data max77693_led_pdata = { .num_leds = 4, .leds[0].name = "leds-sec1", .leds[0].id = MAX77693_FLASH_LED_1, .leds[0].timer = MAX77693_FLASH_TIME_500MS, .leds[0].timer_mode = MAX77693_TIMER_MODE_MAX_TIMER, .leds[0].cntrl_mode = MAX77693_LED_CTRL_BY_FLASHSTB, .leds[0].brightness = 0x1F, .leds[1].name = "leds-sec2", .leds[1].id = MAX77693_FLASH_LED_2, .leds[1].timer = MAX77693_FLASH_TIME_500MS, .leds[1].timer_mode = MAX77693_TIMER_MODE_MAX_TIMER, .leds[1].cntrl_mode = MAX77693_LED_CTRL_BY_FLASHSTB, .leds[1].brightness = 0x1F, .leds[2].name = "torch-sec1", .leds[2].id = MAX77693_TORCH_LED_1, .leds[2].cntrl_mode = MAX77693_LED_CTRL_BY_FLASHSTB, .leds[2].brightness = 0x03, .leds[3].name = "torch-sec2", .leds[3].id = MAX77693_TORCH_LED_2, .leds[3].cntrl_mode = MAX77693_LED_CTRL_BY_FLASHSTB, .leds[3].brightness = 0x04, }; #endif #if defined(CONFIG_MACH_GC1) static void motor_init_hw(void) { if (gpio_request(EXYNOS4_GPD0(0), "VIBTONE_PWM") < 0) printk(KERN_ERR "[VIB] gpio requst is failed\n"); else { gpio_direction_output(EXYNOS4_GPD0(0), 0); printk(KERN_DEBUG "[VIB] gpio request is succeed\n"); } } static void motor_en(bool enable) { gpio_direction_output(EXYNOS4_GPD0(0), enable); printk(KERN_DEBUG "[VIB] motor_enabled GPIO GPD0(0) : %d\n", gpio_get_value(EXYNOS4_GPD0(0))); } #endif #ifdef CONFIG_MACH_BAFFIN static void motor_en(bool enable) { gpio_direction_output(EXYNOS4_GPY2(2), enable); printk(KERN_DEBUG "[VIB] motor_enabled GPIO GPY2(2) : %d\n", gpio_get_value(EXYNOS4_GPY2(2))); } #endif #if defined(CONFIG_MACH_T0) && defined(CONFIG_TARGET_LOCALE_KOR) || \ defined(CONFIG_MACH_T0_JPN_LTE_DCM) static void motor_en(bool enable) { gpio_direction_output(EXYNOS4_GPC0(3), enable); printk(KERN_DEBUG "[VIB] motor_enabled GPIO GPC0(3) : %d\n", gpio_get_value(EXYNOS4_GPC0(3))); } #endif #ifdef CONFIG_VIBETONZ static struct max77693_haptic_platform_data max77693_haptic_pdata = { #if defined(CONFIG_MACH_GC1) .reg2 = MOTOR_ERM, .pwm_id = 1, .init_hw = motor_init_hw, .motor_en = motor_en, #else .reg2 = MOTOR_LRA | EXT_PWM | DIVIDER_128, .pwm_id = 0, .init_hw = NULL, .motor_en = NULL, #endif .max_timeout = 10000, #if defined(CONFIG_MACH_GC2PD) .duty = 37900, #else .duty = 35500, #endif #if defined(CONFIG_MACH_SUPERIOR_KOR_SKT) .period = 38295, #elif defined(CONFIG_MACH_ZEST) .period = 38054, #else .period = 37904, #endif .regulator_name = "vmotor", }; #endif #ifdef CONFIG_BATTERY_MAX77693_CHARGER static struct max77693_charger_platform_data max77693_charger_pdata = { #ifdef CONFIG_BATTERY_WPC_CHARGER .wpc_irq_gpio = GPIO_WPC_INT, #if defined(CONFIG_MACH_M0) || defined(CONFIG_MACH_C1) || \ defined(CONFIG_MACH_M3) || defined(CONFIG_MACH_T0) || \ defined(CONFIG_MACH_GD2) .vbus_irq_gpio = GPIO_V_BUS_INT, #endif #if defined(CONFIG_MACH_T0) || \ defined(CONFIG_MACH_GD2) .wc_pwr_det = true, #else .wc_pwr_det = false, #endif #endif }; #endif struct max77693_platform_data exynos4_max77693_info = { .irq_base = IRQ_BOARD_IFIC_START, .irq_gpio = GPIO_IF_PMIC_IRQ, .wakeup = 1, .muic = &max77693_muic, .regulators = &max77693_regulators, .num_regulators = MAX77693_REG_MAX, #if defined(CONFIG_CHARGER_MAX77693_BAT) .charger_data = &sec_battery_pdata, #elif defined(CONFIG_BATTERY_MAX77693_CHARGER) .charger_data = &max77693_charger_pdata, #endif #ifdef CONFIG_VIBETONZ .haptic_data = &max77693_haptic_pdata, #endif #ifdef CONFIG_LEDS_MAX77693 .led_data = &max77693_led_pdata, #endif }; #endif /* CONFIG_MFD_MAX77693 */ #if defined(CONFIG_MUIC_I2C_USE_I2C17_EMUL) /* I2C17 */ static struct i2c_board_info i2c_devs17_emul[] __initdata = { #ifdef CONFIG_MFD_MAX14577 { I2C_BOARD_INFO(MFD_DEV_NAME, MAX14577_I2C_ADDR), .platform_data = &exynos4_max14577_info, } #endif #ifdef CONFIG_MFD_MAX77693 { I2C_BOARD_INFO("max77693", (0xCC >> 1)), .platform_data = &exynos4_max77693_info, } #endif }; static struct i2c_gpio_platform_data gpio_i2c_data17 = { .sda_pin = GPIO_IF_PMIC_SDA, .scl_pin = GPIO_IF_PMIC_SCL, }; struct platform_device s3c_device_i2c17 = { .name = "i2c-gpio", .id = 17, .dev.platform_data = &gpio_i2c_data17, }; #endif /* CONFIG_MUIC_I2C_USE_I2C17_EMUL */ static struct platform_device *exynos4_mfd_device[] __initdata = { #if defined(CONFIG_MUIC_I2C_USE_I2C17_EMUL) &s3c_device_i2c17, #endif /* CONFIG_MUIC_I2C_USE_I2C17_EMUL */ }; void __init exynos4_exynos4212_mfd_init(void) { pr_info("%s\n", __func__); #if defined(CONFIG_WATCH_02_BD) if (system_rev == 0x02) exynos4_max14577_info.set_gpio_pogo_cb = max14577_set_gpio_pogo_cb; #endif /* CONFIG_WATCH_02_BD */ #if defined(CONFIG_MUIC_I2C_USE_I2C17_EMUL) i2c_register_board_info(17, i2c_devs17_emul, ARRAY_SIZE(i2c_devs17_emul)); #endif /* CONFIG_MUIC_I2C_USE_I2C17_EMUL */ platform_add_devices(exynos4_mfd_device, ARRAY_SIZE(exynos4_mfd_device)); }
gpl-2.0
Benzonat0r/android_kernel_samsung_golden
arch/arm/mach-ux500/debug-last-io.c
111
1553
/* * Copyright (C) ST-Ericsson SA 2012 * * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> * License terms: GNU General Public License (GPL) version 2 */ #include <linux/types.h> #include <linux/dma-mapping.h> #include <mach/hardware.h> struct ux500_debug_last_io { void *pc; void __iomem *vaddr; u64 jiffies; } ____cacheline_aligned; static struct ux500_debug_last_io *ux500_last_io; static dma_addr_t ux500_last_io_phys; static void __iomem *l2x0_base; void ux500_debug_last_io_save(void *pc, void __iomem *vaddr) { int index = smp_processor_id(); if (ux500_last_io && /* Ignore L2CC writes as they appear in each write{b,h,l} */ ((unsigned long)l2x0_base != ((unsigned long)vaddr & ~(SZ_4K - 1)))) { ux500_last_io[index].pc = pc; ux500_last_io[index].vaddr = vaddr; /* Reading without lock */ ux500_last_io[index].jiffies = jiffies_64; } } static int __init ux500_debug_last_io_init(void) { size_t size; size = sizeof(struct ux500_debug_last_io) * num_possible_cpus(); ux500_last_io = dma_alloc_coherent(NULL, size, &ux500_last_io_phys, GFP_KERNEL); if (!ux500_last_io) { printk(KERN_ERR"%s: Failed to allocate memory\n", __func__); return -ENOMEM; } if (cpu_is_u5500()) l2x0_base = __io_address(U5500_L2CC_BASE); else if (cpu_is_u8500() || cpu_is_u9540()) l2x0_base = __io_address(U8500_L2CC_BASE); /* * CONFIG_UX500_DEBUG_LAST_IO is only intended for debugging. * It should not be left enabled. */ WARN_ON(1); return 0; } arch_initcall(ux500_debug_last_io_init);
gpl-2.0
edoko/Air_Kernel_for_GN
arch/arm/mach-omap2/dmtimer.c
111
7815
/** * OMAP2+ Dual-Mode Timers - platform device registration * * Contains first level initialization routines which extracts timers * information from hwmod database and registers with linux device model. * It also has low level function to change the timer input clock source. * * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ * Tarun Kanti DebBarma <tarun.kanti@ti.com> * Thara Gopinath <thara@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clk.h> #include <linux/err.h> #include <linux/slab.h> #include <plat/dmtimer.h> #include <plat/omap_device.h> #include <plat/cpu.h> #include <plat/omap_hwmod.h> #include <plat/omap-pm.h> #include "powerdomain.h" static u8 __initdata system_timer_id; /** * omap2_dm_timer_set_src - change the timer input clock source * @pdev: timer platform device pointer * @source: array index of parent clock source */ static int omap2_dm_timer_set_src(struct platform_device *pdev, int source) { int ret; struct dmtimer_platform_data *pdata = pdev->dev.platform_data; struct clk *new_fclk; char *fclk_name = "32k_ck"; /* default name */ struct clk *fclk = clk_get(&pdev->dev, "fck"); if (IS_ERR_OR_NULL(fclk)) { dev_err(&pdev->dev, "%s: %d: clk_get() FAILED\n", __func__, __LINE__); return -EINVAL; } switch (source) { case OMAP_TIMER_SRC_SYS_CLK: fclk_name = "sys_ck"; break; case OMAP_TIMER_SRC_32_KHZ: fclk_name = "32k_ck"; break; case OMAP_TIMER_SRC_EXT_CLK: if (pdata->timer_ip_type == OMAP_TIMER_IP_VERSION_1) { fclk_name = "alt_ck"; break; } default: dev_err(&pdev->dev, "%s: %d: invalid clk src.\n", __func__, __LINE__); clk_put(fclk); return -EINVAL; } new_fclk = clk_get(&pdev->dev, fclk_name); if (IS_ERR_OR_NULL(new_fclk)) { dev_err(&pdev->dev, "%s: %d: clk_get() %s FAILED\n", __func__, __LINE__, fclk_name); clk_put(fclk); return -EINVAL; } ret = clk_set_parent(fclk, new_fclk); if (IS_ERR_VALUE(ret)) { dev_err(&pdev->dev, "%s: clk_set_parent() to %s FAILED\n", __func__, fclk_name); ret = -EINVAL; } clk_put(new_fclk); clk_put(fclk); return ret; } struct omap_device_pm_latency omap2_dmtimer_latency[] = { { .deactivate_func = omap_device_idle_hwmods, .activate_func = omap_device_enable_hwmods, .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST, }, }; /** * omap_timer_init - build and register timer device with an * associated timer hwmod * @oh: timer hwmod pointer to be used to build timer device * @user: parameter that can be passed from calling hwmod API * * Called by omap_hwmod_for_each_by_class to register each of the timer * devices present in the system. The number of timer devices is known * by parsing through the hwmod database for a given class name. At the * end of function call memory is allocated for timer device and it is * registered to the framework ready to be proved by the driver. */ static int __init omap_timer_init(struct omap_hwmod *oh, void *unused) { int id; int ret = 0; char *name = "omap_timer"; struct dmtimer_platform_data *pdata; struct omap_device *od; struct omap_secure_timer_dev_attr *secure_timer_dev_attr; struct powerdomain *pwrdm; /* * Extract the IDs from name field in hwmod database * and use the same for constructing ids' for the * timer devices. In a way, we are avoiding usage of * static variable witin the function to do the same. * CAUTION: We have to be careful and make sure the * name in hwmod database does not change in which case * we might either make corresponding change here or * switch back static variable mechanism. */ sscanf(oh->name, "timer%2d", &id); if (unlikely(id == system_timer_id)) return ret; pr_debug("%s: %s\n", __func__, oh->name); /* do not register secure timer */ secure_timer_dev_attr = oh->dev_attr; if (secure_timer_dev_attr && secure_timer_dev_attr->is_secure_timer) return ret; pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) { pr_err("%s: No memory for [%s]\n", __func__, oh->name); return -ENOMEM; } pdata->set_timer_src = omap2_dm_timer_set_src; pdata->timer_ip_type = oh->class->rev; pwrdm = omap_hwmod_get_pwrdm(oh); if (!pwrdm) { pr_debug("%s: could not find pwrdm for (%s) in omap hwmod!\n", __func__, oh->name); return -EINVAL; } pdata->loses_context = pwrdm_can_ever_lose_context(pwrdm); od = omap_device_build(name, id, oh, pdata, sizeof(*pdata), omap2_dmtimer_latency, ARRAY_SIZE(omap2_dmtimer_latency), pdata->is_early_init); if (IS_ERR(od)) { pr_err("%s: Can't build omap_device for %s: %s.\n", __func__, name, oh->name); ret = -EINVAL; } kfree(pdata); return ret; } /** * omap2_system_timer_init - top level system timer initialization * called from omap2_gp_timer_init() in timer-gp.c * @id : system timer id * * This function does hwmod setup for the system timer entry needed * prior to building and registering the device. After the device is * registered early probe initiated. */ int __init omap2_system_timer_init(u8 id) { int ret = 0; char *name = "omap_timer"; struct dmtimer_platform_data *pdata; struct omap_device *od; struct omap_hwmod *oh; char system_timer_name[8]; /* 8 = sizeof("timerXX0") */ system_timer_id = id; sprintf(system_timer_name, "timer%d", id); ret = omap_hwmod_setup_one(system_timer_name); if (ret) { pr_err("%s: omap_hwmod_setup_one(%s) failed.\n", __func__, system_timer_name); return ret; } oh = omap_hwmod_lookup(system_timer_name); if (!oh) { pr_debug("%s: could not find (%s) in omap_hwmod_list!\n", __func__, system_timer_name); return -EINVAL; } pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) { pr_err("%s: No memory for [%s]\n", __func__, oh->name); return -ENOMEM; } pdata->is_early_init = 1; pdata->set_timer_src = omap2_dm_timer_set_src; pdata->timer_ip_type = oh->class->rev; pdata->needs_manual_reset = 0; od = omap_device_build(name, id, oh, pdata, sizeof(*pdata), omap2_dmtimer_latency, ARRAY_SIZE(omap2_dmtimer_latency), pdata->is_early_init); if (IS_ERR(od)) { pr_err("%s: Can't build omap_device for %s: %s.\n", __func__, name, oh->name); ret = -EINVAL; } kfree(pdata); if (!ret) { early_platform_driver_register_all("earlytimer"); early_platform_driver_probe("earlytimer", 1, 0); } return 0; } /** * omap2_system_timer_set_src - change the timer input clock source * Allow system timer to program clock source before pm_runtime * framework is available during system boot. * @timer: pointer to struct omap_dm_timer * @source: array index of parent clock source */ int __init omap2_system_timer_set_src(struct omap_dm_timer *timer, int source) { int ret; if (IS_ERR_OR_NULL(timer) || IS_ERR_OR_NULL(timer->fclk)) return -EINVAL; clk_disable(timer->fclk); ret = omap2_dm_timer_set_src(timer->pdev, source); clk_enable(timer->fclk); return ret; } /** * omap2_dm_timer_init - top level regular device initialization * * Uses dedicated hwmod api to parse through hwmod database for * given class name and then build and register the timer device. */ static int __init omap2_dm_timer_init(void) { int ret; ret = omap_hwmod_for_each_by_class("timer", omap_timer_init, NULL); if (unlikely(ret)) { pr_err("%s: device registration failed.\n", __func__); return -EINVAL; } return 0; } arch_initcall(omap2_dm_timer_init);
gpl-2.0
thaskell1/linux
drivers/regulator/mt6311-regulator.c
367
4820
/* * Copyright (c) 2015 MediaTek Inc. * Author: Henry Chen <henryc.chen@mediatek.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/err.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/regmap.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/regulator/mt6311.h> #include <linux/slab.h> #include "mt6311-regulator.h" static const struct regmap_config mt6311_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = MT6311_FQMTR_CON4, }; /* Default limits measured in millivolts and milliamps */ #define MT6311_MIN_UV 600000 #define MT6311_MAX_UV 1393750 #define MT6311_STEP_UV 6250 static const struct regulator_linear_range buck_volt_range[] = { REGULATOR_LINEAR_RANGE(MT6311_MIN_UV, 0, 0x7f, MT6311_STEP_UV), }; static const struct regulator_ops mt6311_buck_ops = { .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_time_sel = regulator_set_voltage_time_sel, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, }; static const struct regulator_ops mt6311_ldo_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, }; #define MT6311_BUCK(_id) \ {\ .name = #_id,\ .ops = &mt6311_buck_ops,\ .of_match = of_match_ptr(#_id),\ .regulators_node = of_match_ptr("regulators"),\ .type = REGULATOR_VOLTAGE,\ .id = MT6311_ID_##_id,\ .n_voltages = (MT6311_MAX_UV - MT6311_MIN_UV) / MT6311_STEP_UV + 1,\ .min_uV = MT6311_MIN_UV,\ .uV_step = MT6311_STEP_UV,\ .owner = THIS_MODULE,\ .linear_ranges = buck_volt_range, \ .n_linear_ranges = ARRAY_SIZE(buck_volt_range), \ .enable_reg = MT6311_VDVFS11_CON9,\ .enable_mask = MT6311_PMIC_VDVFS11_EN_MASK,\ .vsel_reg = MT6311_VDVFS11_CON12,\ .vsel_mask = MT6311_PMIC_VDVFS11_VOSEL_MASK,\ } #define MT6311_LDO(_id) \ {\ .name = #_id,\ .ops = &mt6311_ldo_ops,\ .of_match = of_match_ptr(#_id),\ .regulators_node = of_match_ptr("regulators"),\ .type = REGULATOR_VOLTAGE,\ .id = MT6311_ID_##_id,\ .owner = THIS_MODULE,\ .enable_reg = MT6311_LDO_CON3,\ .enable_mask = MT6311_PMIC_RG_VBIASN_EN_MASK,\ } static const struct regulator_desc mt6311_regulators[] = { MT6311_BUCK(VDVFS), MT6311_LDO(VBIASN), }; /* * I2C driver interface functions */ static int mt6311_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct regulator_config config = { }; struct regulator_dev *rdev; struct regmap *regmap; int i, ret; unsigned int data; regmap = devm_regmap_init_i2c(i2c, &mt6311_regmap_config); if (IS_ERR(regmap)) { ret = PTR_ERR(regmap); dev_err(&i2c->dev, "Failed to allocate register map: %d\n", ret); return ret; } ret = regmap_read(regmap, MT6311_SWCID, &data); if (ret < 0) { dev_err(&i2c->dev, "Failed to read DEVICE_ID reg: %d\n", ret); return ret; } switch (data) { case MT6311_E1_CID_CODE: case MT6311_E2_CID_CODE: case MT6311_E3_CID_CODE: break; default: dev_err(&i2c->dev, "Unsupported device id = 0x%x.\n", data); return -ENODEV; } for (i = 0; i < MT6311_MAX_REGULATORS; i++) { config.dev = &i2c->dev; config.regmap = regmap; rdev = devm_regulator_register(&i2c->dev, &mt6311_regulators[i], &config); if (IS_ERR(rdev)) { dev_err(&i2c->dev, "Failed to register MT6311 regulator\n"); return PTR_ERR(rdev); } } return 0; } static const struct i2c_device_id mt6311_i2c_id[] = { {"mt6311", 0}, {}, }; MODULE_DEVICE_TABLE(i2c, mt6311_i2c_id); #ifdef CONFIG_OF static const struct of_device_id mt6311_dt_ids[] = { { .compatible = "mediatek,mt6311-regulator", .data = &mt6311_i2c_id[0] }, {}, }; MODULE_DEVICE_TABLE(of, mt6311_dt_ids); #endif static struct i2c_driver mt6311_regulator_driver = { .driver = { .name = "mt6311", .of_match_table = of_match_ptr(mt6311_dt_ids), }, .probe = mt6311_i2c_probe, .id_table = mt6311_i2c_id, }; module_i2c_driver(mt6311_regulator_driver); MODULE_AUTHOR("Henry Chen <henryc.chen@mediatek.com>"); MODULE_DESCRIPTION("Regulator device driver for Mediatek MT6311"); MODULE_LICENSE("GPL v2");
gpl-2.0
zhuyj/gmac
drivers/rtc/rtc-ds1742.c
367
7119
/* * An rtc driver for the Dallas DS1742 * * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Copyright (C) 2006 Torsten Ertbjerg Rasmussen <tr@newtec.dk> * - nvram size determined from resource * - this ds1742 driver now supports ds1743. */ #include <linux/bcd.h> #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/rtc.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/module.h> #define RTC_SIZE 8 #define RTC_CONTROL 0 #define RTC_CENTURY 0 #define RTC_SECONDS 1 #define RTC_MINUTES 2 #define RTC_HOURS 3 #define RTC_DAY 4 #define RTC_DATE 5 #define RTC_MONTH 6 #define RTC_YEAR 7 #define RTC_CENTURY_MASK 0x3f #define RTC_SECONDS_MASK 0x7f #define RTC_DAY_MASK 0x07 /* Bits in the Control/Century register */ #define RTC_WRITE 0x80 #define RTC_READ 0x40 /* Bits in the Seconds register */ #define RTC_STOP 0x80 /* Bits in the Day register */ #define RTC_BATT_FLAG 0x80 struct rtc_plat_data { void __iomem *ioaddr_nvram; void __iomem *ioaddr_rtc; size_t size_nvram; unsigned long last_jiffies; struct bin_attribute nvram_attr; }; static int ds1742_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr_rtc; u8 century; century = bin2bcd((tm->tm_year + 1900) / 100); writeb(RTC_WRITE, ioaddr + RTC_CONTROL); writeb(bin2bcd(tm->tm_year % 100), ioaddr + RTC_YEAR); writeb(bin2bcd(tm->tm_mon + 1), ioaddr + RTC_MONTH); writeb(bin2bcd(tm->tm_wday) & RTC_DAY_MASK, ioaddr + RTC_DAY); writeb(bin2bcd(tm->tm_mday), ioaddr + RTC_DATE); writeb(bin2bcd(tm->tm_hour), ioaddr + RTC_HOURS); writeb(bin2bcd(tm->tm_min), ioaddr + RTC_MINUTES); writeb(bin2bcd(tm->tm_sec) & RTC_SECONDS_MASK, ioaddr + RTC_SECONDS); /* RTC_CENTURY and RTC_CONTROL share same register */ writeb(RTC_WRITE | (century & RTC_CENTURY_MASK), ioaddr + RTC_CENTURY); writeb(century & RTC_CENTURY_MASK, ioaddr + RTC_CONTROL); return 0; } static int ds1742_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr_rtc; unsigned int year, month, day, hour, minute, second, week; unsigned int century; /* give enough time to update RTC in case of continuous read */ if (pdata->last_jiffies == jiffies) msleep(1); pdata->last_jiffies = jiffies; writeb(RTC_READ, ioaddr + RTC_CONTROL); second = readb(ioaddr + RTC_SECONDS) & RTC_SECONDS_MASK; minute = readb(ioaddr + RTC_MINUTES); hour = readb(ioaddr + RTC_HOURS); day = readb(ioaddr + RTC_DATE); week = readb(ioaddr + RTC_DAY) & RTC_DAY_MASK; month = readb(ioaddr + RTC_MONTH); year = readb(ioaddr + RTC_YEAR); century = readb(ioaddr + RTC_CENTURY) & RTC_CENTURY_MASK; writeb(0, ioaddr + RTC_CONTROL); tm->tm_sec = bcd2bin(second); tm->tm_min = bcd2bin(minute); tm->tm_hour = bcd2bin(hour); tm->tm_mday = bcd2bin(day); tm->tm_wday = bcd2bin(week); tm->tm_mon = bcd2bin(month) - 1; /* year is 1900 + tm->tm_year */ tm->tm_year = bcd2bin(year) + bcd2bin(century) * 100 - 1900; return rtc_valid_tm(tm); } static const struct rtc_class_ops ds1742_rtc_ops = { .read_time = ds1742_rtc_read_time, .set_time = ds1742_rtc_set_time, }; static ssize_t ds1742_nvram_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t size) { struct device *dev = container_of(kobj, struct device, kobj); struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr_nvram; ssize_t count; for (count = 0; count < size; count++) *buf++ = readb(ioaddr + pos++); return count; } static ssize_t ds1742_nvram_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t size) { struct device *dev = container_of(kobj, struct device, kobj); struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr_nvram; ssize_t count; for (count = 0; count < size; count++) writeb(*buf++, ioaddr + pos++); return count; } static int ds1742_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; struct resource *res; unsigned int cen, sec; struct rtc_plat_data *pdata; void __iomem *ioaddr; int ret = 0; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ioaddr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(ioaddr)) return PTR_ERR(ioaddr); pdata->ioaddr_nvram = ioaddr; pdata->size_nvram = resource_size(res) - RTC_SIZE; pdata->ioaddr_rtc = ioaddr + pdata->size_nvram; sysfs_bin_attr_init(&pdata->nvram_attr); pdata->nvram_attr.attr.name = "nvram"; pdata->nvram_attr.attr.mode = S_IRUGO | S_IWUSR; pdata->nvram_attr.read = ds1742_nvram_read; pdata->nvram_attr.write = ds1742_nvram_write; pdata->nvram_attr.size = pdata->size_nvram; /* turn RTC on if it was not on */ ioaddr = pdata->ioaddr_rtc; sec = readb(ioaddr + RTC_SECONDS); if (sec & RTC_STOP) { sec &= RTC_SECONDS_MASK; cen = readb(ioaddr + RTC_CENTURY) & RTC_CENTURY_MASK; writeb(RTC_WRITE, ioaddr + RTC_CONTROL); writeb(sec, ioaddr + RTC_SECONDS); writeb(cen & RTC_CENTURY_MASK, ioaddr + RTC_CONTROL); } if (!(readb(ioaddr + RTC_DAY) & RTC_BATT_FLAG)) dev_warn(&pdev->dev, "voltage-low detected.\n"); pdata->last_jiffies = jiffies; platform_set_drvdata(pdev, pdata); rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &ds1742_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr); if (ret) dev_err(&pdev->dev, "Unable to create sysfs entry: %s\n", pdata->nvram_attr.attr.name); return 0; } static int ds1742_rtc_remove(struct platform_device *pdev) { struct rtc_plat_data *pdata = platform_get_drvdata(pdev); sysfs_remove_bin_file(&pdev->dev.kobj, &pdata->nvram_attr); return 0; } static const struct of_device_id __maybe_unused ds1742_rtc_of_match[] = { { .compatible = "maxim,ds1742", }, { } }; MODULE_DEVICE_TABLE(of, ds1742_rtc_of_match); static struct platform_driver ds1742_rtc_driver = { .probe = ds1742_rtc_probe, .remove = ds1742_rtc_remove, .driver = { .name = "rtc-ds1742", .of_match_table = of_match_ptr(ds1742_rtc_of_match), }, }; module_platform_driver(ds1742_rtc_driver); MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); MODULE_DESCRIPTION("Dallas DS1742 RTC driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:rtc-ds1742");
gpl-2.0
mtitinger/linux-next
fs/fscache/object-list.c
1135
11690
/* Global fscache object list maintainer and viewer * * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define FSCACHE_DEBUG_LEVEL COOKIE #include <linux/module.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/key.h> #include <keys/user-type.h> #include "internal.h" static struct rb_root fscache_object_list; static DEFINE_RWLOCK(fscache_object_list_lock); struct fscache_objlist_data { unsigned long config; /* display configuration */ #define FSCACHE_OBJLIST_CONFIG_KEY 0x00000001 /* show object keys */ #define FSCACHE_OBJLIST_CONFIG_AUX 0x00000002 /* show object auxdata */ #define FSCACHE_OBJLIST_CONFIG_COOKIE 0x00000004 /* show objects with cookies */ #define FSCACHE_OBJLIST_CONFIG_NOCOOKIE 0x00000008 /* show objects without cookies */ #define FSCACHE_OBJLIST_CONFIG_BUSY 0x00000010 /* show busy objects */ #define FSCACHE_OBJLIST_CONFIG_IDLE 0x00000020 /* show idle objects */ #define FSCACHE_OBJLIST_CONFIG_PENDWR 0x00000040 /* show objects with pending writes */ #define FSCACHE_OBJLIST_CONFIG_NOPENDWR 0x00000080 /* show objects without pending writes */ #define FSCACHE_OBJLIST_CONFIG_READS 0x00000100 /* show objects with active reads */ #define FSCACHE_OBJLIST_CONFIG_NOREADS 0x00000200 /* show objects without active reads */ #define FSCACHE_OBJLIST_CONFIG_EVENTS 0x00000400 /* show objects with events */ #define FSCACHE_OBJLIST_CONFIG_NOEVENTS 0x00000800 /* show objects without no events */ #define FSCACHE_OBJLIST_CONFIG_WORK 0x00001000 /* show objects with work */ #define FSCACHE_OBJLIST_CONFIG_NOWORK 0x00002000 /* show objects without work */ u8 buf[512]; /* key and aux data buffer */ }; /* * Add an object to the object list * - we use the address of the fscache_object structure as the key into the * tree */ void fscache_objlist_add(struct fscache_object *obj) { struct fscache_object *xobj; struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL; ASSERT(RB_EMPTY_NODE(&obj->objlist_link)); write_lock(&fscache_object_list_lock); while (*p) { parent = *p; xobj = rb_entry(parent, struct fscache_object, objlist_link); if (obj < xobj) p = &(*p)->rb_left; else if (obj > xobj) p = &(*p)->rb_right; else BUG(); } rb_link_node(&obj->objlist_link, parent, p); rb_insert_color(&obj->objlist_link, &fscache_object_list); write_unlock(&fscache_object_list_lock); } /* * Remove an object from the object list. */ void fscache_objlist_remove(struct fscache_object *obj) { if (RB_EMPTY_NODE(&obj->objlist_link)) return; write_lock(&fscache_object_list_lock); BUG_ON(RB_EMPTY_ROOT(&fscache_object_list)); rb_erase(&obj->objlist_link, &fscache_object_list); write_unlock(&fscache_object_list_lock); } /* * find the object in the tree on or after the specified index */ static struct fscache_object *fscache_objlist_lookup(loff_t *_pos) { struct fscache_object *pobj, *obj = NULL, *minobj = NULL; struct rb_node *p; unsigned long pos; if (*_pos >= (unsigned long) ERR_PTR(-ENOENT)) return NULL; pos = *_pos; /* banners (can't represent line 0 by pos 0 as that would involve * returning a NULL pointer) */ if (pos == 0) return (struct fscache_object *)(long)++(*_pos); if (pos < 3) return (struct fscache_object *)pos; pobj = (struct fscache_object *)pos; p = fscache_object_list.rb_node; while (p) { obj = rb_entry(p, struct fscache_object, objlist_link); if (pobj < obj) { if (!minobj || minobj > obj) minobj = obj; p = p->rb_left; } else if (pobj > obj) { p = p->rb_right; } else { minobj = obj; break; } obj = NULL; } if (!minobj) *_pos = (unsigned long) ERR_PTR(-ENOENT); else if (minobj != obj) *_pos = (unsigned long) minobj; return minobj; } /* * set up the iterator to start reading from the first line */ static void *fscache_objlist_start(struct seq_file *m, loff_t *_pos) __acquires(&fscache_object_list_lock) { read_lock(&fscache_object_list_lock); return fscache_objlist_lookup(_pos); } /* * move to the next line */ static void *fscache_objlist_next(struct seq_file *m, void *v, loff_t *_pos) { (*_pos)++; return fscache_objlist_lookup(_pos); } /* * clean up after reading */ static void fscache_objlist_stop(struct seq_file *m, void *v) __releases(&fscache_object_list_lock) { read_unlock(&fscache_object_list_lock); } /* * display an object */ static int fscache_objlist_show(struct seq_file *m, void *v) { struct fscache_objlist_data *data = m->private; struct fscache_object *obj = v; struct fscache_cookie *cookie; unsigned long config = data->config; char _type[3], *type; u8 *buf = data->buf, *p; if ((unsigned long) v == 1) { seq_puts(m, "OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS" " EM EV FL S" " | NETFS_COOKIE_DEF TY FL NETFS_DATA"); if (config & (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX)) seq_puts(m, " "); if (config & FSCACHE_OBJLIST_CONFIG_KEY) seq_puts(m, "OBJECT_KEY"); if ((config & (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX)) == (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX)) seq_puts(m, ", "); if (config & FSCACHE_OBJLIST_CONFIG_AUX) seq_puts(m, "AUX_DATA"); seq_puts(m, "\n"); return 0; } if ((unsigned long) v == 2) { seq_puts(m, "======== ======== ==== ===== === === === == =====" " == == == =" " | ================ == == ================"); if (config & (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX)) seq_puts(m, " ================"); seq_puts(m, "\n"); return 0; } /* filter out any unwanted objects */ #define FILTER(criterion, _yes, _no) \ do { \ unsigned long yes = FSCACHE_OBJLIST_CONFIG_##_yes; \ unsigned long no = FSCACHE_OBJLIST_CONFIG_##_no; \ if (criterion) { \ if (!(config & yes)) \ return 0; \ } else { \ if (!(config & no)) \ return 0; \ } \ } while(0) cookie = obj->cookie; if (~config) { FILTER(cookie->def, COOKIE, NOCOOKIE); FILTER(fscache_object_is_active(obj) || obj->n_ops != 0 || obj->n_obj_ops != 0 || obj->flags || !list_empty(&obj->dependents), BUSY, IDLE); FILTER(test_bit(FSCACHE_OBJECT_PENDING_WRITE, &obj->flags), PENDWR, NOPENDWR); FILTER(atomic_read(&obj->n_reads), READS, NOREADS); FILTER(obj->events & obj->event_mask, EVENTS, NOEVENTS); FILTER(work_busy(&obj->work), WORK, NOWORK); } seq_printf(m, "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %2lx %1x | ", obj->debug_id, obj->parent ? obj->parent->debug_id : -1, obj->state->short_name, obj->n_children, obj->n_ops, obj->n_obj_ops, obj->n_in_progress, obj->n_exclusive, atomic_read(&obj->n_reads), obj->event_mask, obj->events, obj->flags, work_busy(&obj->work)); if (fscache_use_cookie(obj)) { uint16_t keylen = 0, auxlen = 0; switch (cookie->def->type) { case 0: type = "IX"; break; case 1: type = "DT"; break; default: sprintf(_type, "%02u", cookie->def->type); type = _type; break; } seq_printf(m, "%-16s %s %2lx %16p", cookie->def->name, type, cookie->flags, cookie->netfs_data); if (cookie->def->get_key && config & FSCACHE_OBJLIST_CONFIG_KEY) keylen = cookie->def->get_key(cookie->netfs_data, buf, 400); if (cookie->def->get_aux && config & FSCACHE_OBJLIST_CONFIG_AUX) auxlen = cookie->def->get_aux(cookie->netfs_data, buf + keylen, 512 - keylen); fscache_unuse_cookie(obj); if (keylen > 0 || auxlen > 0) { seq_puts(m, " "); for (p = buf; keylen > 0; keylen--) seq_printf(m, "%02x", *p++); if (auxlen > 0) { if (config & FSCACHE_OBJLIST_CONFIG_KEY) seq_puts(m, ", "); for (; auxlen > 0; auxlen--) seq_printf(m, "%02x", *p++); } } seq_puts(m, "\n"); } else { seq_puts(m, "<no_netfs>\n"); } return 0; } static const struct seq_operations fscache_objlist_ops = { .start = fscache_objlist_start, .stop = fscache_objlist_stop, .next = fscache_objlist_next, .show = fscache_objlist_show, }; /* * get the configuration for filtering the list */ static void fscache_objlist_config(struct fscache_objlist_data *data) { #ifdef CONFIG_KEYS struct user_key_payload *confkey; unsigned long config; struct key *key; const char *buf; int len; key = request_key(&key_type_user, "fscache:objlist", NULL); if (IS_ERR(key)) goto no_config; config = 0; rcu_read_lock(); confkey = key->payload.data; buf = confkey->data; for (len = confkey->datalen - 1; len >= 0; len--) { switch (buf[len]) { case 'K': config |= FSCACHE_OBJLIST_CONFIG_KEY; break; case 'A': config |= FSCACHE_OBJLIST_CONFIG_AUX; break; case 'C': config |= FSCACHE_OBJLIST_CONFIG_COOKIE; break; case 'c': config |= FSCACHE_OBJLIST_CONFIG_NOCOOKIE; break; case 'B': config |= FSCACHE_OBJLIST_CONFIG_BUSY; break; case 'b': config |= FSCACHE_OBJLIST_CONFIG_IDLE; break; case 'W': config |= FSCACHE_OBJLIST_CONFIG_PENDWR; break; case 'w': config |= FSCACHE_OBJLIST_CONFIG_NOPENDWR; break; case 'R': config |= FSCACHE_OBJLIST_CONFIG_READS; break; case 'r': config |= FSCACHE_OBJLIST_CONFIG_NOREADS; break; case 'S': config |= FSCACHE_OBJLIST_CONFIG_WORK; break; case 's': config |= FSCACHE_OBJLIST_CONFIG_NOWORK; break; } } rcu_read_unlock(); key_put(key); if (!(config & (FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE))) config |= FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE; if (!(config & (FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE))) config |= FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE; if (!(config & (FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR))) config |= FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR; if (!(config & (FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS))) config |= FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS; if (!(config & (FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS))) config |= FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS; if (!(config & (FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK))) config |= FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK; data->config = config; return; no_config: #endif data->config = ULONG_MAX; } /* * open "/proc/fs/fscache/objects" to provide a list of active objects * - can be configured by a user-defined key added to the caller's keyrings */ static int fscache_objlist_open(struct inode *inode, struct file *file) { struct fscache_objlist_data *data; data = __seq_open_private(file, &fscache_objlist_ops, sizeof(*data)); if (!data) return -ENOMEM; /* get the configuration key */ fscache_objlist_config(data); return 0; } /* * clean up on close */ static int fscache_objlist_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; kfree(m->private); m->private = NULL; return seq_release(inode, file); } const struct file_operations fscache_objlist_fops = { .owner = THIS_MODULE, .open = fscache_objlist_open, .read = seq_read, .llseek = seq_lseek, .release = fscache_objlist_release, };
gpl-2.0
geekboxzone/mmallow_kernel
drivers/vme/bridges/vme_tsi148.c
1647
74442
/* * Support for the Tundra TSI148 VME-PCI Bridge Chip * * Author: Martyn Welch <martyn.welch@ge.com> * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. * * Based on work by Tom Armistead and Ajit Prem * Copyright 2004 Motorola Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/mm.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/proc_fs.h> #include <linux/pci.h> #include <linux/poll.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/byteorder/generic.h> #include <linux/vme.h> #include "../vme_bridge.h" #include "vme_tsi148.h" static int tsi148_probe(struct pci_dev *, const struct pci_device_id *); static void tsi148_remove(struct pci_dev *); /* Module parameter */ static bool err_chk; static int geoid; static const char driver_name[] = "vme_tsi148"; static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) }, { }, }; static struct pci_driver tsi148_driver = { .name = driver_name, .id_table = tsi148_ids, .probe = tsi148_probe, .remove = tsi148_remove, }; static void reg_join(unsigned int high, unsigned int low, unsigned long long *variable) { *variable = (unsigned long long)high << 32; *variable |= (unsigned long long)low; } static void reg_split(unsigned long long variable, unsigned int *high, unsigned int *low) { *low = (unsigned int)variable & 0xFFFFFFFF; *high = (unsigned int)(variable >> 32); } /* * Wakes up DMA queue. */ static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge, int channel_mask) { u32 serviced = 0; if (channel_mask & TSI148_LCSR_INTS_DMA0S) { wake_up(&bridge->dma_queue[0]); serviced |= TSI148_LCSR_INTC_DMA0C; } if (channel_mask & TSI148_LCSR_INTS_DMA1S) { wake_up(&bridge->dma_queue[1]); serviced |= TSI148_LCSR_INTC_DMA1C; } return serviced; } /* * Wake up location monitor queue */ static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat) { int i; u32 serviced = 0; for (i = 0; i < 4; i++) { if (stat & TSI148_LCSR_INTS_LMS[i]) { /* We only enable interrupts if the callback is set */ bridge->lm_callback[i](i); serviced |= TSI148_LCSR_INTC_LMC[i]; } } return serviced; } /* * Wake up mail box queue. * * XXX This functionality is not exposed up though API. */ static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat) { int i; u32 val; u32 serviced = 0; struct tsi148_driver *bridge; bridge = tsi148_bridge->driver_priv; for (i = 0; i < 4; i++) { if (stat & TSI148_LCSR_INTS_MBS[i]) { val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]); dev_err(tsi148_bridge->parent, "VME Mailbox %d received" ": 0x%x\n", i, val); serviced |= TSI148_LCSR_INTC_MBC[i]; } } return serviced; } /* * Display error & status message when PERR (PCI) exception interrupt occurs. */ static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge) { struct tsi148_driver *bridge; bridge = tsi148_bridge->driver_priv; dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, " "attributes: %08x\n", ioread32be(bridge->base + TSI148_LCSR_EDPAU), ioread32be(bridge->base + TSI148_LCSR_EDPAL), ioread32be(bridge->base + TSI148_LCSR_EDPAT)); dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split " "completion reg: %08x\n", ioread32be(bridge->base + TSI148_LCSR_EDPXA), ioread32be(bridge->base + TSI148_LCSR_EDPXS)); iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT); return TSI148_LCSR_INTC_PERRC; } /* * Save address and status when VME error interrupt occurs. */ static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge) { unsigned int error_addr_high, error_addr_low; unsigned long long error_addr; u32 error_attrib; struct vme_bus_error *error; struct tsi148_driver *bridge; bridge = tsi148_bridge->driver_priv; error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU); error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL); error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT); reg_join(error_addr_high, error_addr_low, &error_addr); /* Check for exception register overflow (we have lost error data) */ if (error_attrib & TSI148_LCSR_VEAT_VEOF) { dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow " "Occurred\n"); } error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC); if (error) { error->address = error_addr; error->attributes = error_attrib; list_add_tail(&error->list, &tsi148_bridge->vme_errors); } else { dev_err(tsi148_bridge->parent, "Unable to alloc memory for " "VMEbus Error reporting\n"); dev_err(tsi148_bridge->parent, "VME Bus Error at address: " "0x%llx, attributes: %08x\n", error_addr, error_attrib); } /* Clear Status */ iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT); return TSI148_LCSR_INTC_VERRC; } /* * Wake up IACK queue. */ static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge) { wake_up(&bridge->iack_queue); return TSI148_LCSR_INTC_IACKC; } /* * Calling VME bus interrupt callback if provided. */ static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat) { int vec, i, serviced = 0; struct tsi148_driver *bridge; bridge = tsi148_bridge->driver_priv; for (i = 7; i > 0; i--) { if (stat & (1 << i)) { /* * Note: Even though the registers are defined as * 32-bits in the spec, we only want to issue 8-bit * IACK cycles on the bus, read from offset 3. */ vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3); vme_irq_handler(tsi148_bridge, i, vec); serviced |= (1 << i); } } return serviced; } /* * Top level interrupt handler. Clears appropriate interrupt status bits and * then calls appropriate sub handler(s). */ static irqreturn_t tsi148_irqhandler(int irq, void *ptr) { u32 stat, enable, serviced = 0; struct vme_bridge *tsi148_bridge; struct tsi148_driver *bridge; tsi148_bridge = ptr; bridge = tsi148_bridge->driver_priv; /* Determine which interrupts are unmasked and set */ enable = ioread32be(bridge->base + TSI148_LCSR_INTEO); stat = ioread32be(bridge->base + TSI148_LCSR_INTS); /* Only look at unmasked interrupts */ stat &= enable; if (unlikely(!stat)) return IRQ_NONE; /* Call subhandlers as appropriate */ /* DMA irqs */ if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S)) serviced |= tsi148_DMA_irqhandler(bridge, stat); /* Location monitor irqs */ if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S)) serviced |= tsi148_LM_irqhandler(bridge, stat); /* Mail box irqs */ if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S | TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S)) serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat); /* PCI bus error */ if (stat & TSI148_LCSR_INTS_PERRS) serviced |= tsi148_PERR_irqhandler(tsi148_bridge); /* VME bus error */ if (stat & TSI148_LCSR_INTS_VERRS) serviced |= tsi148_VERR_irqhandler(tsi148_bridge); /* IACK irq */ if (stat & TSI148_LCSR_INTS_IACKS) serviced |= tsi148_IACK_irqhandler(bridge); /* VME bus irqs */ if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S | TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S | TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S | TSI148_LCSR_INTS_IRQ1S)) serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat); /* Clear serviced interrupts */ iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC); return IRQ_HANDLED; } static int tsi148_irq_init(struct vme_bridge *tsi148_bridge) { int result; unsigned int tmp; struct pci_dev *pdev; struct tsi148_driver *bridge; pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev); bridge = tsi148_bridge->driver_priv; /* Initialise list for VME bus errors */ INIT_LIST_HEAD(&tsi148_bridge->vme_errors); mutex_init(&tsi148_bridge->irq_mtx); result = request_irq(pdev->irq, tsi148_irqhandler, IRQF_SHARED, driver_name, tsi148_bridge); if (result) { dev_err(tsi148_bridge->parent, "Can't get assigned pci irq " "vector %02X\n", pdev->irq); return result; } /* Enable and unmask interrupts */ tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO | TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO | TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO | TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO | TSI148_LCSR_INTEO_IACKEO; /* This leaves the following interrupts masked. * TSI148_LCSR_INTEO_VIEEO * TSI148_LCSR_INTEO_SYSFLEO * TSI148_LCSR_INTEO_ACFLEO */ /* Don't enable Location Monitor interrupts here - they will be * enabled when the location monitors are properly configured and * a callback has been attached. * TSI148_LCSR_INTEO_LM0EO * TSI148_LCSR_INTEO_LM1EO * TSI148_LCSR_INTEO_LM2EO * TSI148_LCSR_INTEO_LM3EO */ /* Don't enable VME interrupts until we add a handler, else the board * will respond to it and we don't want that unless it knows how to * properly deal with it. * TSI148_LCSR_INTEO_IRQ7EO * TSI148_LCSR_INTEO_IRQ6EO * TSI148_LCSR_INTEO_IRQ5EO * TSI148_LCSR_INTEO_IRQ4EO * TSI148_LCSR_INTEO_IRQ3EO * TSI148_LCSR_INTEO_IRQ2EO * TSI148_LCSR_INTEO_IRQ1EO */ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); return 0; } static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge, struct pci_dev *pdev) { struct tsi148_driver *bridge = tsi148_bridge->driver_priv; /* Turn off interrupts */ iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO); iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN); /* Clear all interrupts */ iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC); /* Detach interrupt handler */ free_irq(pdev->irq, tsi148_bridge); } /* * Check to see if an IACk has been received, return true (1) or false (0). */ static int tsi148_iack_received(struct tsi148_driver *bridge) { u32 tmp; tmp = ioread32be(bridge->base + TSI148_LCSR_VICR); if (tmp & TSI148_LCSR_VICR_IRQS) return 0; else return 1; } /* * Configure VME interrupt */ static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level, int state, int sync) { struct pci_dev *pdev; u32 tmp; struct tsi148_driver *bridge; bridge = tsi148_bridge->driver_priv; /* We need to do the ordering differently for enabling and disabling */ if (state == 0) { tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN); tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1]; iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1]; iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); if (sync != 0) { pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev); synchronize_irq(pdev->irq); } } else { tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1]; iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN); tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1]; iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); } } /* * Generate a VME bus interrupt at the requested level & vector. Wait for * interrupt to be acked. */ static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, int statid) { u32 tmp; struct tsi148_driver *bridge; bridge = tsi148_bridge->driver_priv; mutex_lock(&bridge->vme_int); /* Read VICR register */ tmp = ioread32be(bridge->base + TSI148_LCSR_VICR); /* Set Status/ID */ tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) | (statid & TSI148_LCSR_VICR_STID_M); iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR); /* Assert VMEbus IRQ */ tmp = tmp | TSI148_LCSR_VICR_IRQL[level]; iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR); /* XXX Consider implementing a timeout? */ wait_event_interruptible(bridge->iack_queue, tsi148_iack_received(bridge)); mutex_unlock(&bridge->vme_int); return 0; } /* * Find the first error in this address range */ static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge, u32 aspace, unsigned long long address, size_t count) { struct list_head *err_pos; struct vme_bus_error *vme_err, *valid = NULL; unsigned long long bound; bound = address + count; /* * XXX We are currently not looking at the address space when parsing * for errors. This is because parsing the Address Modifier Codes * is going to be quite resource intensive to do properly. We * should be OK just looking at the addresses and this is certainly * much better than what we had before. */ err_pos = NULL; /* Iterate through errors */ list_for_each(err_pos, &tsi148_bridge->vme_errors) { vme_err = list_entry(err_pos, struct vme_bus_error, list); if ((vme_err->address >= address) && (vme_err->address < bound)) { valid = vme_err; break; } } return valid; } /* * Clear errors in the provided address range. */ static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge, u32 aspace, unsigned long long address, size_t count) { struct list_head *err_pos, *temp; struct vme_bus_error *vme_err; unsigned long long bound; bound = address + count; /* * XXX We are currently not looking at the address space when parsing * for errors. This is because parsing the Address Modifier Codes * is going to be quite resource intensive to do properly. We * should be OK just looking at the addresses and this is certainly * much better than what we had before. */ err_pos = NULL; /* Iterate through errors */ list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) { vme_err = list_entry(err_pos, struct vme_bus_error, list); if ((vme_err->address >= address) && (vme_err->address < bound)) { list_del(err_pos); kfree(vme_err); } } } /* * Initialize a slave window with the requested attributes. */ static int tsi148_slave_set(struct vme_slave_resource *image, int enabled, unsigned long long vme_base, unsigned long long size, dma_addr_t pci_base, u32 aspace, u32 cycle) { unsigned int i, addr = 0, granularity = 0; unsigned int temp_ctl = 0; unsigned int vme_base_low, vme_base_high; unsigned int vme_bound_low, vme_bound_high; unsigned int pci_offset_low, pci_offset_high; unsigned long long vme_bound, pci_offset; struct vme_bridge *tsi148_bridge; struct tsi148_driver *bridge; tsi148_bridge = image->parent; bridge = tsi148_bridge->driver_priv; i = image->number; switch (aspace) { case VME_A16: granularity = 0x10; addr |= TSI148_LCSR_ITAT_AS_A16; break; case VME_A24: granularity = 0x1000; addr |= TSI148_LCSR_ITAT_AS_A24; break; case VME_A32: granularity = 0x10000; addr |= TSI148_LCSR_ITAT_AS_A32; break; case VME_A64: granularity = 0x10000; addr |= TSI148_LCSR_ITAT_AS_A64; break; case VME_CRCSR: case VME_USER1: case VME_USER2: case VME_USER3: case VME_USER4: default: dev_err(tsi148_bridge->parent, "Invalid address space\n"); return -EINVAL; break; } /* Convert 64-bit variables to 2x 32-bit variables */ reg_split(vme_base, &vme_base_high, &vme_base_low); /* * Bound address is a valid address for the window, adjust * accordingly */ vme_bound = vme_base + size - granularity; reg_split(vme_bound, &vme_bound_high, &vme_bound_low); pci_offset = (unsigned long long)pci_base - vme_base; reg_split(pci_offset, &pci_offset_high, &pci_offset_low); if (vme_base_low & (granularity - 1)) { dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n"); return -EINVAL; } if (vme_bound_low & (granularity - 1)) { dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n"); return -EINVAL; } if (pci_offset_low & (granularity - 1)) { dev_err(tsi148_bridge->parent, "Invalid PCI Offset " "alignment\n"); return -EINVAL; } /* Disable while we are mucking around */ temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITAT); temp_ctl &= ~TSI148_LCSR_ITAT_EN; iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITAT); /* Setup mapping */ iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITSAU); iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITSAL); iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITEAU); iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITEAL); iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITOFU); iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITOFL); /* Setup 2eSST speeds */ temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M; switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { case VME_2eSST160: temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160; break; case VME_2eSST267: temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267; break; case VME_2eSST320: temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320; break; } /* Setup cycle types */ temp_ctl &= ~(0x1F << 7); if (cycle & VME_BLT) temp_ctl |= TSI148_LCSR_ITAT_BLT; if (cycle & VME_MBLT) temp_ctl |= TSI148_LCSR_ITAT_MBLT; if (cycle & VME_2eVME) temp_ctl |= TSI148_LCSR_ITAT_2eVME; if (cycle & VME_2eSST) temp_ctl |= TSI148_LCSR_ITAT_2eSST; if (cycle & VME_2eSSTB) temp_ctl |= TSI148_LCSR_ITAT_2eSSTB; /* Setup address space */ temp_ctl &= ~TSI148_LCSR_ITAT_AS_M; temp_ctl |= addr; temp_ctl &= ~0xF; if (cycle & VME_SUPER) temp_ctl |= TSI148_LCSR_ITAT_SUPR ; if (cycle & VME_USER) temp_ctl |= TSI148_LCSR_ITAT_NPRIV; if (cycle & VME_PROG) temp_ctl |= TSI148_LCSR_ITAT_PGM; if (cycle & VME_DATA) temp_ctl |= TSI148_LCSR_ITAT_DATA; /* Write ctl reg without enable */ iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITAT); if (enabled) temp_ctl |= TSI148_LCSR_ITAT_EN; iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITAT); return 0; } /* * Get slave window configuration. */ static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled, unsigned long long *vme_base, unsigned long long *size, dma_addr_t *pci_base, u32 *aspace, u32 *cycle) { unsigned int i, granularity = 0, ctl = 0; unsigned int vme_base_low, vme_base_high; unsigned int vme_bound_low, vme_bound_high; unsigned int pci_offset_low, pci_offset_high; unsigned long long vme_bound, pci_offset; struct tsi148_driver *bridge; bridge = image->parent->driver_priv; i = image->number; /* Read registers */ ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITAT); vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITSAU); vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITSAL); vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITEAU); vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITEAL); pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITOFU); pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITOFL); /* Convert 64-bit variables to 2x 32-bit variables */ reg_join(vme_base_high, vme_base_low, vme_base); reg_join(vme_bound_high, vme_bound_low, &vme_bound); reg_join(pci_offset_high, pci_offset_low, &pci_offset); *pci_base = (dma_addr_t)vme_base + pci_offset; *enabled = 0; *aspace = 0; *cycle = 0; if (ctl & TSI148_LCSR_ITAT_EN) *enabled = 1; if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) { granularity = 0x10; *aspace |= VME_A16; } if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) { granularity = 0x1000; *aspace |= VME_A24; } if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) { granularity = 0x10000; *aspace |= VME_A32; } if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) { granularity = 0x10000; *aspace |= VME_A64; } /* Need granularity before we set the size */ *size = (unsigned long long)((vme_bound - *vme_base) + granularity); if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160) *cycle |= VME_2eSST160; if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267) *cycle |= VME_2eSST267; if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320) *cycle |= VME_2eSST320; if (ctl & TSI148_LCSR_ITAT_BLT) *cycle |= VME_BLT; if (ctl & TSI148_LCSR_ITAT_MBLT) *cycle |= VME_MBLT; if (ctl & TSI148_LCSR_ITAT_2eVME) *cycle |= VME_2eVME; if (ctl & TSI148_LCSR_ITAT_2eSST) *cycle |= VME_2eSST; if (ctl & TSI148_LCSR_ITAT_2eSSTB) *cycle |= VME_2eSSTB; if (ctl & TSI148_LCSR_ITAT_SUPR) *cycle |= VME_SUPER; if (ctl & TSI148_LCSR_ITAT_NPRIV) *cycle |= VME_USER; if (ctl & TSI148_LCSR_ITAT_PGM) *cycle |= VME_PROG; if (ctl & TSI148_LCSR_ITAT_DATA) *cycle |= VME_DATA; return 0; } /* * Allocate and map PCI Resource */ static int tsi148_alloc_resource(struct vme_master_resource *image, unsigned long long size) { unsigned long long existing_size; int retval = 0; struct pci_dev *pdev; struct vme_bridge *tsi148_bridge; tsi148_bridge = image->parent; pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev); existing_size = (unsigned long long)(image->bus_resource.end - image->bus_resource.start); /* If the existing size is OK, return */ if ((size != 0) && (existing_size == (size - 1))) return 0; if (existing_size != 0) { iounmap(image->kern_base); image->kern_base = NULL; kfree(image->bus_resource.name); release_resource(&image->bus_resource); memset(&image->bus_resource, 0, sizeof(struct resource)); } /* Exit here if size is zero */ if (size == 0) return 0; if (image->bus_resource.name == NULL) { image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC); if (image->bus_resource.name == NULL) { dev_err(tsi148_bridge->parent, "Unable to allocate " "memory for resource name\n"); retval = -ENOMEM; goto err_name; } } sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name, image->number); image->bus_resource.start = 0; image->bus_resource.end = (unsigned long)size; image->bus_resource.flags = IORESOURCE_MEM; retval = pci_bus_alloc_resource(pdev->bus, &image->bus_resource, size, size, PCIBIOS_MIN_MEM, 0, NULL, NULL); if (retval) { dev_err(tsi148_bridge->parent, "Failed to allocate mem " "resource for window %d size 0x%lx start 0x%lx\n", image->number, (unsigned long)size, (unsigned long)image->bus_resource.start); goto err_resource; } image->kern_base = ioremap_nocache( image->bus_resource.start, size); if (image->kern_base == NULL) { dev_err(tsi148_bridge->parent, "Failed to remap resource\n"); retval = -ENOMEM; goto err_remap; } return 0; err_remap: release_resource(&image->bus_resource); err_resource: kfree(image->bus_resource.name); memset(&image->bus_resource, 0, sizeof(struct resource)); err_name: return retval; } /* * Free and unmap PCI Resource */ static void tsi148_free_resource(struct vme_master_resource *image) { iounmap(image->kern_base); image->kern_base = NULL; release_resource(&image->bus_resource); kfree(image->bus_resource.name); memset(&image->bus_resource, 0, sizeof(struct resource)); } /* * Set the attributes of an outbound window. */ static int tsi148_master_set(struct vme_master_resource *image, int enabled, unsigned long long vme_base, unsigned long long size, u32 aspace, u32 cycle, u32 dwidth) { int retval = 0; unsigned int i; unsigned int temp_ctl = 0; unsigned int pci_base_low, pci_base_high; unsigned int pci_bound_low, pci_bound_high; unsigned int vme_offset_low, vme_offset_high; unsigned long long pci_bound, vme_offset, pci_base; struct vme_bridge *tsi148_bridge; struct tsi148_driver *bridge; tsi148_bridge = image->parent; bridge = tsi148_bridge->driver_priv; /* Verify input data */ if (vme_base & 0xFFFF) { dev_err(tsi148_bridge->parent, "Invalid VME Window " "alignment\n"); retval = -EINVAL; goto err_window; } if ((size == 0) && (enabled != 0)) { dev_err(tsi148_bridge->parent, "Size must be non-zero for " "enabled windows\n"); retval = -EINVAL; goto err_window; } spin_lock(&image->lock); /* Let's allocate the resource here rather than further up the stack as * it avoids pushing loads of bus dependent stuff up the stack. If size * is zero, any existing resource will be freed. */ retval = tsi148_alloc_resource(image, size); if (retval) { spin_unlock(&image->lock); dev_err(tsi148_bridge->parent, "Unable to allocate memory for " "resource\n"); goto err_res; } if (size == 0) { pci_base = 0; pci_bound = 0; vme_offset = 0; } else { pci_base = (unsigned long long)image->bus_resource.start; /* * Bound address is a valid address for the window, adjust * according to window granularity. */ pci_bound = pci_base + (size - 0x10000); vme_offset = vme_base - pci_base; } /* Convert 64-bit variables to 2x 32-bit variables */ reg_split(pci_base, &pci_base_high, &pci_base_low); reg_split(pci_bound, &pci_bound_high, &pci_bound_low); reg_split(vme_offset, &vme_offset_high, &vme_offset_low); if (pci_base_low & 0xFFFF) { spin_unlock(&image->lock); dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n"); retval = -EINVAL; goto err_gran; } if (pci_bound_low & 0xFFFF) { spin_unlock(&image->lock); dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n"); retval = -EINVAL; goto err_gran; } if (vme_offset_low & 0xFFFF) { spin_unlock(&image->lock); dev_err(tsi148_bridge->parent, "Invalid VME Offset " "alignment\n"); retval = -EINVAL; goto err_gran; } i = image->number; /* Disable while we are mucking around */ temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTAT); temp_ctl &= ~TSI148_LCSR_OTAT_EN; iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTAT); /* Setup 2eSST speeds */ temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M; switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { case VME_2eSST160: temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160; break; case VME_2eSST267: temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267; break; case VME_2eSST320: temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320; break; } /* Setup cycle types */ if (cycle & VME_BLT) { temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; temp_ctl |= TSI148_LCSR_OTAT_TM_BLT; } if (cycle & VME_MBLT) { temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT; } if (cycle & VME_2eVME) { temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME; } if (cycle & VME_2eSST) { temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST; } if (cycle & VME_2eSSTB) { dev_warn(tsi148_bridge->parent, "Currently not setting " "Broadcast Select Registers\n"); temp_ctl &= ~TSI148_LCSR_OTAT_TM_M; temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB; } /* Setup data width */ temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M; switch (dwidth) { case VME_D16: temp_ctl |= TSI148_LCSR_OTAT_DBW_16; break; case VME_D32: temp_ctl |= TSI148_LCSR_OTAT_DBW_32; break; default: spin_unlock(&image->lock); dev_err(tsi148_bridge->parent, "Invalid data width\n"); retval = -EINVAL; goto err_dwidth; } /* Setup address space */ temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M; switch (aspace) { case VME_A16: temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16; break; case VME_A24: temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24; break; case VME_A32: temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32; break; case VME_A64: temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64; break; case VME_CRCSR: temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR; break; case VME_USER1: temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1; break; case VME_USER2: temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2; break; case VME_USER3: temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3; break; case VME_USER4: temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4; break; default: spin_unlock(&image->lock); dev_err(tsi148_bridge->parent, "Invalid address space\n"); retval = -EINVAL; goto err_aspace; break; } temp_ctl &= ~(3<<4); if (cycle & VME_SUPER) temp_ctl |= TSI148_LCSR_OTAT_SUP; if (cycle & VME_PROG) temp_ctl |= TSI148_LCSR_OTAT_PGM; /* Setup mapping */ iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTSAU); iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTSAL); iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTEAU); iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTEAL); iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTOFU); iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTOFL); /* Write ctl reg without enable */ iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTAT); if (enabled) temp_ctl |= TSI148_LCSR_OTAT_EN; iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTAT); spin_unlock(&image->lock); return 0; err_aspace: err_dwidth: err_gran: tsi148_free_resource(image); err_res: err_window: return retval; } /* * Set the attributes of an outbound window. * * XXX Not parsing prefetch information. */ static int __tsi148_master_get(struct vme_master_resource *image, int *enabled, unsigned long long *vme_base, unsigned long long *size, u32 *aspace, u32 *cycle, u32 *dwidth) { unsigned int i, ctl; unsigned int pci_base_low, pci_base_high; unsigned int pci_bound_low, pci_bound_high; unsigned int vme_offset_low, vme_offset_high; unsigned long long pci_base, pci_bound, vme_offset; struct tsi148_driver *bridge; bridge = image->parent->driver_priv; i = image->number; ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTAT); pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTSAU); pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTSAL); pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTEAU); pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTEAL); vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTOFU); vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTOFL); /* Convert 64-bit variables to 2x 32-bit variables */ reg_join(pci_base_high, pci_base_low, &pci_base); reg_join(pci_bound_high, pci_bound_low, &pci_bound); reg_join(vme_offset_high, vme_offset_low, &vme_offset); *vme_base = pci_base + vme_offset; *size = (unsigned long long)(pci_bound - pci_base) + 0x10000; *enabled = 0; *aspace = 0; *cycle = 0; *dwidth = 0; if (ctl & TSI148_LCSR_OTAT_EN) *enabled = 1; /* Setup address space */ if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16) *aspace |= VME_A16; if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24) *aspace |= VME_A24; if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32) *aspace |= VME_A32; if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64) *aspace |= VME_A64; if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR) *aspace |= VME_CRCSR; if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1) *aspace |= VME_USER1; if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2) *aspace |= VME_USER2; if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3) *aspace |= VME_USER3; if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4) *aspace |= VME_USER4; /* Setup 2eSST speeds */ if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160) *cycle |= VME_2eSST160; if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267) *cycle |= VME_2eSST267; if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320) *cycle |= VME_2eSST320; /* Setup cycle types */ if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT) *cycle |= VME_SCT; if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT) *cycle |= VME_BLT; if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT) *cycle |= VME_MBLT; if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME) *cycle |= VME_2eVME; if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST) *cycle |= VME_2eSST; if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB) *cycle |= VME_2eSSTB; if (ctl & TSI148_LCSR_OTAT_SUP) *cycle |= VME_SUPER; else *cycle |= VME_USER; if (ctl & TSI148_LCSR_OTAT_PGM) *cycle |= VME_PROG; else *cycle |= VME_DATA; /* Setup data width */ if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16) *dwidth = VME_D16; if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32) *dwidth = VME_D32; return 0; } static int tsi148_master_get(struct vme_master_resource *image, int *enabled, unsigned long long *vme_base, unsigned long long *size, u32 *aspace, u32 *cycle, u32 *dwidth) { int retval; spin_lock(&image->lock); retval = __tsi148_master_get(image, enabled, vme_base, size, aspace, cycle, dwidth); spin_unlock(&image->lock); return retval; } static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf, size_t count, loff_t offset) { int retval, enabled; unsigned long long vme_base, size; u32 aspace, cycle, dwidth; struct vme_bus_error *vme_err = NULL; struct vme_bridge *tsi148_bridge; void *addr = image->kern_base + offset; unsigned int done = 0; unsigned int count32; tsi148_bridge = image->parent; spin_lock(&image->lock); /* The following code handles VME address alignment. We cannot use * memcpy_xxx directly here because it may cut small data transfers in * to 8-bit cycles, thus making D16 cycle impossible. * On the other hand, the bridge itself assures that the maximum data * cycle configured for the transfer is used and splits it * automatically for non-aligned addresses, so we don't want the * overhead of needlessly forcing small transfers for the entire cycle. */ if ((uintptr_t)addr & 0x1) { *(u8 *)buf = ioread8(addr); done += 1; if (done == count) goto out; } if ((uintptr_t)(addr + done) & 0x2) { if ((count - done) < 2) { *(u8 *)(buf + done) = ioread8(addr + done); done += 1; goto out; } else { *(u16 *)(buf + done) = ioread16(addr + done); done += 2; } } count32 = (count - done) & ~0x3; if (count32 > 0) { memcpy_fromio(buf + done, addr + done, count32); done += count32; } if ((count - done) & 0x2) { *(u16 *)(buf + done) = ioread16(addr + done); done += 2; } if ((count - done) & 0x1) { *(u8 *)(buf + done) = ioread8(addr + done); done += 1; } out: retval = count; if (!err_chk) goto skip_chk; __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle, &dwidth); vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset, count); if (vme_err != NULL) { dev_err(image->parent->parent, "First VME read error detected " "an at address 0x%llx\n", vme_err->address); retval = vme_err->address - (vme_base + offset); /* Clear down save errors in this address range */ tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset, count); } skip_chk: spin_unlock(&image->lock); return retval; } static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf, size_t count, loff_t offset) { int retval = 0, enabled; unsigned long long vme_base, size; u32 aspace, cycle, dwidth; void *addr = image->kern_base + offset; unsigned int done = 0; unsigned int count32; struct vme_bus_error *vme_err = NULL; struct vme_bridge *tsi148_bridge; struct tsi148_driver *bridge; tsi148_bridge = image->parent; bridge = tsi148_bridge->driver_priv; spin_lock(&image->lock); /* Here we apply for the same strategy we do in master_read * function in order to assure D16 cycle when required. */ if ((uintptr_t)addr & 0x1) { iowrite8(*(u8 *)buf, addr); done += 1; if (done == count) goto out; } if ((uintptr_t)(addr + done) & 0x2) { if ((count - done) < 2) { iowrite8(*(u8 *)(buf + done), addr + done); done += 1; goto out; } else { iowrite16(*(u16 *)(buf + done), addr + done); done += 2; } } count32 = (count - done) & ~0x3; if (count32 > 0) { memcpy_toio(addr + done, buf + done, count32); done += count32; } if ((count - done) & 0x2) { iowrite16(*(u16 *)(buf + done), addr + done); done += 2; } if ((count - done) & 0x1) { iowrite8(*(u8 *)(buf + done), addr + done); done += 1; } out: retval = count; /* * Writes are posted. We need to do a read on the VME bus to flush out * all of the writes before we check for errors. We can't guarantee * that reading the data we have just written is safe. It is believed * that there isn't any read, write re-ordering, so we can read any * location in VME space, so lets read the Device ID from the tsi148's * own registers as mapped into CR/CSR space. * * We check for saved errors in the written address range/space. */ if (!err_chk) goto skip_chk; /* * Get window info first, to maximise the time that the buffers may * fluch on their own */ __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle, &dwidth); ioread16(bridge->flush_image->kern_base + 0x7F000); vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset, count); if (vme_err != NULL) { dev_warn(tsi148_bridge->parent, "First VME write error detected" " an at address 0x%llx\n", vme_err->address); retval = vme_err->address - (vme_base + offset); /* Clear down save errors in this address range */ tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset, count); } skip_chk: spin_unlock(&image->lock); return retval; } /* * Perform an RMW cycle on the VME bus. * * Requires a previously configured master window, returns final value. */ static unsigned int tsi148_master_rmw(struct vme_master_resource *image, unsigned int mask, unsigned int compare, unsigned int swap, loff_t offset) { unsigned long long pci_addr; unsigned int pci_addr_high, pci_addr_low; u32 tmp, result; int i; struct tsi148_driver *bridge; bridge = image->parent->driver_priv; /* Find the PCI address that maps to the desired VME address */ i = image->number; /* Locking as we can only do one of these at a time */ mutex_lock(&bridge->vme_rmw); /* Lock image */ spin_lock(&image->lock); pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTSAU); pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTSAL); reg_join(pci_addr_high, pci_addr_low, &pci_addr); reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low); /* Configure registers */ iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN); iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC); iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS); iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU); iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL); /* Enable RMW */ tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL); tmp |= TSI148_LCSR_VMCTRL_RMWEN; iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL); /* Kick process off with a read to the required address. */ result = ioread32be(image->kern_base + offset); /* Disable RMW */ tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL); tmp &= ~TSI148_LCSR_VMCTRL_RMWEN; iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL); spin_unlock(&image->lock); mutex_unlock(&bridge->vme_rmw); return result; } static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr, u32 aspace, u32 cycle, u32 dwidth) { u32 val; val = be32_to_cpu(*attr); /* Setup 2eSST speeds */ switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { case VME_2eSST160: val |= TSI148_LCSR_DSAT_2eSSTM_160; break; case VME_2eSST267: val |= TSI148_LCSR_DSAT_2eSSTM_267; break; case VME_2eSST320: val |= TSI148_LCSR_DSAT_2eSSTM_320; break; } /* Setup cycle types */ if (cycle & VME_SCT) val |= TSI148_LCSR_DSAT_TM_SCT; if (cycle & VME_BLT) val |= TSI148_LCSR_DSAT_TM_BLT; if (cycle & VME_MBLT) val |= TSI148_LCSR_DSAT_TM_MBLT; if (cycle & VME_2eVME) val |= TSI148_LCSR_DSAT_TM_2eVME; if (cycle & VME_2eSST) val |= TSI148_LCSR_DSAT_TM_2eSST; if (cycle & VME_2eSSTB) { dev_err(dev, "Currently not setting Broadcast Select " "Registers\n"); val |= TSI148_LCSR_DSAT_TM_2eSSTB; } /* Setup data width */ switch (dwidth) { case VME_D16: val |= TSI148_LCSR_DSAT_DBW_16; break; case VME_D32: val |= TSI148_LCSR_DSAT_DBW_32; break; default: dev_err(dev, "Invalid data width\n"); return -EINVAL; } /* Setup address space */ switch (aspace) { case VME_A16: val |= TSI148_LCSR_DSAT_AMODE_A16; break; case VME_A24: val |= TSI148_LCSR_DSAT_AMODE_A24; break; case VME_A32: val |= TSI148_LCSR_DSAT_AMODE_A32; break; case VME_A64: val |= TSI148_LCSR_DSAT_AMODE_A64; break; case VME_CRCSR: val |= TSI148_LCSR_DSAT_AMODE_CRCSR; break; case VME_USER1: val |= TSI148_LCSR_DSAT_AMODE_USER1; break; case VME_USER2: val |= TSI148_LCSR_DSAT_AMODE_USER2; break; case VME_USER3: val |= TSI148_LCSR_DSAT_AMODE_USER3; break; case VME_USER4: val |= TSI148_LCSR_DSAT_AMODE_USER4; break; default: dev_err(dev, "Invalid address space\n"); return -EINVAL; break; } if (cycle & VME_SUPER) val |= TSI148_LCSR_DSAT_SUP; if (cycle & VME_PROG) val |= TSI148_LCSR_DSAT_PGM; *attr = cpu_to_be32(val); return 0; } static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr, u32 aspace, u32 cycle, u32 dwidth) { u32 val; val = be32_to_cpu(*attr); /* Setup 2eSST speeds */ switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) { case VME_2eSST160: val |= TSI148_LCSR_DDAT_2eSSTM_160; break; case VME_2eSST267: val |= TSI148_LCSR_DDAT_2eSSTM_267; break; case VME_2eSST320: val |= TSI148_LCSR_DDAT_2eSSTM_320; break; } /* Setup cycle types */ if (cycle & VME_SCT) val |= TSI148_LCSR_DDAT_TM_SCT; if (cycle & VME_BLT) val |= TSI148_LCSR_DDAT_TM_BLT; if (cycle & VME_MBLT) val |= TSI148_LCSR_DDAT_TM_MBLT; if (cycle & VME_2eVME) val |= TSI148_LCSR_DDAT_TM_2eVME; if (cycle & VME_2eSST) val |= TSI148_LCSR_DDAT_TM_2eSST; if (cycle & VME_2eSSTB) { dev_err(dev, "Currently not setting Broadcast Select " "Registers\n"); val |= TSI148_LCSR_DDAT_TM_2eSSTB; } /* Setup data width */ switch (dwidth) { case VME_D16: val |= TSI148_LCSR_DDAT_DBW_16; break; case VME_D32: val |= TSI148_LCSR_DDAT_DBW_32; break; default: dev_err(dev, "Invalid data width\n"); return -EINVAL; } /* Setup address space */ switch (aspace) { case VME_A16: val |= TSI148_LCSR_DDAT_AMODE_A16; break; case VME_A24: val |= TSI148_LCSR_DDAT_AMODE_A24; break; case VME_A32: val |= TSI148_LCSR_DDAT_AMODE_A32; break; case VME_A64: val |= TSI148_LCSR_DDAT_AMODE_A64; break; case VME_CRCSR: val |= TSI148_LCSR_DDAT_AMODE_CRCSR; break; case VME_USER1: val |= TSI148_LCSR_DDAT_AMODE_USER1; break; case VME_USER2: val |= TSI148_LCSR_DDAT_AMODE_USER2; break; case VME_USER3: val |= TSI148_LCSR_DDAT_AMODE_USER3; break; case VME_USER4: val |= TSI148_LCSR_DDAT_AMODE_USER4; break; default: dev_err(dev, "Invalid address space\n"); return -EINVAL; break; } if (cycle & VME_SUPER) val |= TSI148_LCSR_DDAT_SUP; if (cycle & VME_PROG) val |= TSI148_LCSR_DDAT_PGM; *attr = cpu_to_be32(val); return 0; } /* * Add a link list descriptor to the list * * Note: DMA engine expects the DMA descriptor to be big endian. */ static int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count) { struct tsi148_dma_entry *entry, *prev; u32 address_high, address_low, val; struct vme_dma_pattern *pattern_attr; struct vme_dma_pci *pci_attr; struct vme_dma_vme *vme_attr; int retval = 0; struct vme_bridge *tsi148_bridge; tsi148_bridge = list->parent->parent; /* Descriptor must be aligned on 64-bit boundaries */ entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL); if (entry == NULL) { dev_err(tsi148_bridge->parent, "Failed to allocate memory for " "dma resource structure\n"); retval = -ENOMEM; goto err_mem; } /* Test descriptor alignment */ if ((unsigned long)&entry->descriptor & 0x7) { dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 " "byte boundary as required: %p\n", &entry->descriptor); retval = -EINVAL; goto err_align; } /* Given we are going to fill out the structure, we probably don't * need to zero it, but better safe than sorry for now. */ memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor)); /* Fill out source part */ switch (src->type) { case VME_DMA_PATTERN: pattern_attr = src->private; entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern); val = TSI148_LCSR_DSAT_TYP_PAT; /* Default behaviour is 32 bit pattern */ if (pattern_attr->type & VME_DMA_PATTERN_BYTE) val |= TSI148_LCSR_DSAT_PSZ; /* It seems that the default behaviour is to increment */ if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0) val |= TSI148_LCSR_DSAT_NIN; entry->descriptor.dsat = cpu_to_be32(val); break; case VME_DMA_PCI: pci_attr = src->private; reg_split((unsigned long long)pci_attr->address, &address_high, &address_low); entry->descriptor.dsau = cpu_to_be32(address_high); entry->descriptor.dsal = cpu_to_be32(address_low); entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI); break; case VME_DMA_VME: vme_attr = src->private; reg_split((unsigned long long)vme_attr->address, &address_high, &address_low); entry->descriptor.dsau = cpu_to_be32(address_high); entry->descriptor.dsal = cpu_to_be32(address_low); entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME); retval = tsi148_dma_set_vme_src_attributes( tsi148_bridge->parent, &entry->descriptor.dsat, vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth); if (retval < 0) goto err_source; break; default: dev_err(tsi148_bridge->parent, "Invalid source type\n"); retval = -EINVAL; goto err_source; break; } /* Assume last link - this will be over-written by adding another */ entry->descriptor.dnlau = cpu_to_be32(0); entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA); /* Fill out destination part */ switch (dest->type) { case VME_DMA_PCI: pci_attr = dest->private; reg_split((unsigned long long)pci_attr->address, &address_high, &address_low); entry->descriptor.ddau = cpu_to_be32(address_high); entry->descriptor.ddal = cpu_to_be32(address_low); entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI); break; case VME_DMA_VME: vme_attr = dest->private; reg_split((unsigned long long)vme_attr->address, &address_high, &address_low); entry->descriptor.ddau = cpu_to_be32(address_high); entry->descriptor.ddal = cpu_to_be32(address_low); entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME); retval = tsi148_dma_set_vme_dest_attributes( tsi148_bridge->parent, &entry->descriptor.ddat, vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth); if (retval < 0) goto err_dest; break; default: dev_err(tsi148_bridge->parent, "Invalid destination type\n"); retval = -EINVAL; goto err_dest; break; } /* Fill out count */ entry->descriptor.dcnt = cpu_to_be32((u32)count); /* Add to list */ list_add_tail(&entry->list, &list->entries); /* Fill out previous descriptors "Next Address" */ if (entry->list.prev != &list->entries) { prev = list_entry(entry->list.prev, struct tsi148_dma_entry, list); /* We need the bus address for the pointer */ entry->dma_handle = dma_map_single(tsi148_bridge->parent, &entry->descriptor, sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE); reg_split((unsigned long long)entry->dma_handle, &address_high, &address_low); entry->descriptor.dnlau = cpu_to_be32(address_high); entry->descriptor.dnlal = cpu_to_be32(address_low); } return 0; err_dest: err_source: err_align: kfree(entry); err_mem: return retval; } /* * Check to see if the provided DMA channel is busy. */ static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel) { u32 tmp; struct tsi148_driver *bridge; bridge = tsi148_bridge->driver_priv; tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSTA); if (tmp & TSI148_LCSR_DSTA_BSY) return 0; else return 1; } /* * Execute a previously generated link list * * XXX Need to provide control register configuration. */ static int tsi148_dma_list_exec(struct vme_dma_list *list) { struct vme_dma_resource *ctrlr; int channel, retval = 0; struct tsi148_dma_entry *entry; u32 bus_addr_high, bus_addr_low; u32 val, dctlreg = 0; struct vme_bridge *tsi148_bridge; struct tsi148_driver *bridge; ctrlr = list->parent; tsi148_bridge = ctrlr->parent; bridge = tsi148_bridge->driver_priv; mutex_lock(&ctrlr->mtx); channel = ctrlr->number; if (!list_empty(&ctrlr->running)) { /* * XXX We have an active DMA transfer and currently haven't * sorted out the mechanism for "pending" DMA transfers. * Return busy. */ /* Need to add to pending here */ mutex_unlock(&ctrlr->mtx); return -EBUSY; } else { list_add(&list->list, &ctrlr->running); } /* Get first bus address and write into registers */ entry = list_first_entry(&list->entries, struct tsi148_dma_entry, list); entry->dma_handle = dma_map_single(tsi148_bridge->parent, &entry->descriptor, sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE); mutex_unlock(&ctrlr->mtx); reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low); iowrite32be(bus_addr_high, bridge->base + TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU); iowrite32be(bus_addr_low, bridge->base + TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL); dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL); /* Start the operation */ iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base + TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL); wait_event_interruptible(bridge->dma_queue[channel], tsi148_dma_busy(ctrlr->parent, channel)); /* * Read status register, this register is valid until we kick off a * new transfer. */ val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSTA); if (val & TSI148_LCSR_DSTA_VBE) { dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val); retval = -EIO; } /* Remove list from running list */ mutex_lock(&ctrlr->mtx); list_del(&list->list); mutex_unlock(&ctrlr->mtx); return retval; } /* * Clean up a previously generated link list * * We have a separate function, don't assume that the chain can't be reused. */ static int tsi148_dma_list_empty(struct vme_dma_list *list) { struct list_head *pos, *temp; struct tsi148_dma_entry *entry; struct vme_bridge *tsi148_bridge = list->parent->parent; /* detach and free each entry */ list_for_each_safe(pos, temp, &list->entries) { list_del(pos); entry = list_entry(pos, struct tsi148_dma_entry, list); dma_unmap_single(tsi148_bridge->parent, entry->dma_handle, sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE); kfree(entry); } return 0; } /* * All 4 location monitors reside at the same base - this is therefore a * system wide configuration. * * This does not enable the LM monitor - that should be done when the first * callback is attached and disabled when the last callback is removed. */ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base, u32 aspace, u32 cycle) { u32 lm_base_high, lm_base_low, lm_ctl = 0; int i; struct vme_bridge *tsi148_bridge; struct tsi148_driver *bridge; tsi148_bridge = lm->parent; bridge = tsi148_bridge->driver_priv; mutex_lock(&lm->mtx); /* If we already have a callback attached, we can't move it! */ for (i = 0; i < lm->monitors; i++) { if (bridge->lm_callback[i] != NULL) { mutex_unlock(&lm->mtx); dev_err(tsi148_bridge->parent, "Location monitor " "callback attached, can't reset\n"); return -EBUSY; } } switch (aspace) { case VME_A16: lm_ctl |= TSI148_LCSR_LMAT_AS_A16; break; case VME_A24: lm_ctl |= TSI148_LCSR_LMAT_AS_A24; break; case VME_A32: lm_ctl |= TSI148_LCSR_LMAT_AS_A32; break; case VME_A64: lm_ctl |= TSI148_LCSR_LMAT_AS_A64; break; default: mutex_unlock(&lm->mtx); dev_err(tsi148_bridge->parent, "Invalid address space\n"); return -EINVAL; break; } if (cycle & VME_SUPER) lm_ctl |= TSI148_LCSR_LMAT_SUPR ; if (cycle & VME_USER) lm_ctl |= TSI148_LCSR_LMAT_NPRIV; if (cycle & VME_PROG) lm_ctl |= TSI148_LCSR_LMAT_PGM; if (cycle & VME_DATA) lm_ctl |= TSI148_LCSR_LMAT_DATA; reg_split(lm_base, &lm_base_high, &lm_base_low); iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU); iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL); iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT); mutex_unlock(&lm->mtx); return 0; } /* Get configuration of the callback monitor and return whether it is enabled * or disabled. */ static int tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base, u32 *aspace, u32 *cycle) { u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0; struct tsi148_driver *bridge; bridge = lm->parent->driver_priv; mutex_lock(&lm->mtx); lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU); lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL); lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT); reg_join(lm_base_high, lm_base_low, lm_base); if (lm_ctl & TSI148_LCSR_LMAT_EN) enabled = 1; if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16) *aspace |= VME_A16; if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24) *aspace |= VME_A24; if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32) *aspace |= VME_A32; if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64) *aspace |= VME_A64; if (lm_ctl & TSI148_LCSR_LMAT_SUPR) *cycle |= VME_SUPER; if (lm_ctl & TSI148_LCSR_LMAT_NPRIV) *cycle |= VME_USER; if (lm_ctl & TSI148_LCSR_LMAT_PGM) *cycle |= VME_PROG; if (lm_ctl & TSI148_LCSR_LMAT_DATA) *cycle |= VME_DATA; mutex_unlock(&lm->mtx); return enabled; } /* * Attach a callback to a specific location monitor. * * Callback will be passed the monitor triggered. */ static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor, void (*callback)(int)) { u32 lm_ctl, tmp; struct vme_bridge *tsi148_bridge; struct tsi148_driver *bridge; tsi148_bridge = lm->parent; bridge = tsi148_bridge->driver_priv; mutex_lock(&lm->mtx); /* Ensure that the location monitor is configured - need PGM or DATA */ lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT); if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) { mutex_unlock(&lm->mtx); dev_err(tsi148_bridge->parent, "Location monitor not properly " "configured\n"); return -EINVAL; } /* Check that a callback isn't already attached */ if (bridge->lm_callback[monitor] != NULL) { mutex_unlock(&lm->mtx); dev_err(tsi148_bridge->parent, "Existing callback attached\n"); return -EBUSY; } /* Attach callback */ bridge->lm_callback[monitor] = callback; /* Enable Location Monitor interrupt */ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN); tmp |= TSI148_LCSR_INTEN_LMEN[monitor]; iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN); tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); tmp |= TSI148_LCSR_INTEO_LMEO[monitor]; iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); /* Ensure that global Location Monitor Enable set */ if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) { lm_ctl |= TSI148_LCSR_LMAT_EN; iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT); } mutex_unlock(&lm->mtx); return 0; } /* * Detach a callback function forn a specific location monitor. */ static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor) { u32 lm_en, tmp; struct tsi148_driver *bridge; bridge = lm->parent->driver_priv; mutex_lock(&lm->mtx); /* Disable Location Monitor and ensure previous interrupts are clear */ lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN); lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor]; iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN); tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO); tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor]; iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); iowrite32be(TSI148_LCSR_INTC_LMC[monitor], bridge->base + TSI148_LCSR_INTC); /* Detach callback */ bridge->lm_callback[monitor] = NULL; /* If all location monitors disabled, disable global Location Monitor */ if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) { tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT); tmp &= ~TSI148_LCSR_LMAT_EN; iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT); } mutex_unlock(&lm->mtx); return 0; } /* * Determine Geographical Addressing */ static int tsi148_slot_get(struct vme_bridge *tsi148_bridge) { u32 slot = 0; struct tsi148_driver *bridge; bridge = tsi148_bridge->driver_priv; if (!geoid) { slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT); slot = slot & TSI148_LCSR_VSTAT_GA_M; } else slot = geoid; return (int)slot; } static void *tsi148_alloc_consistent(struct device *parent, size_t size, dma_addr_t *dma) { struct pci_dev *pdev; /* Find pci_dev container of dev */ pdev = container_of(parent, struct pci_dev, dev); return pci_alloc_consistent(pdev, size, dma); } static void tsi148_free_consistent(struct device *parent, size_t size, void *vaddr, dma_addr_t dma) { struct pci_dev *pdev; /* Find pci_dev container of dev */ pdev = container_of(parent, struct pci_dev, dev); pci_free_consistent(pdev, size, vaddr, dma); } /* * Configure CR/CSR space * * Access to the CR/CSR can be configured at power-up. The location of the * CR/CSR registers in the CR/CSR address space is determined by the boards * Auto-ID or Geographic address. This function ensures that the window is * enabled at an offset consistent with the boards geopgraphic address. * * Each board has a 512kB window, with the highest 4kB being used for the * boards registers, this means there is a fix length 508kB window which must * be mapped onto PCI memory. */ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge, struct pci_dev *pdev) { u32 cbar, crat, vstat; u32 crcsr_bus_high, crcsr_bus_low; int retval; struct tsi148_driver *bridge; bridge = tsi148_bridge->driver_priv; /* Allocate mem for CR/CSR image */ bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE, &bridge->crcsr_bus); if (bridge->crcsr_kernel == NULL) { dev_err(tsi148_bridge->parent, "Failed to allocate memory for " "CR/CSR image\n"); return -ENOMEM; } memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE); reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low); iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU); iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL); /* Ensure that the CR/CSR is configured at the correct offset */ cbar = ioread32be(bridge->base + TSI148_CBAR); cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3; vstat = tsi148_slot_get(tsi148_bridge); if (cbar != vstat) { cbar = vstat; dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n"); iowrite32be(cbar<<3, bridge->base + TSI148_CBAR); } dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar); crat = ioread32be(bridge->base + TSI148_LCSR_CRAT); if (crat & TSI148_LCSR_CRAT_EN) { dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n"); iowrite32be(crat | TSI148_LCSR_CRAT_EN, bridge->base + TSI148_LCSR_CRAT); } else dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n"); /* If we want flushed, error-checked writes, set up a window * over the CR/CSR registers. We read from here to safely flush * through VME writes. */ if (err_chk) { retval = tsi148_master_set(bridge->flush_image, 1, (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT, VME_D16); if (retval) dev_err(tsi148_bridge->parent, "Configuring flush image" " failed\n"); } return 0; } static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge, struct pci_dev *pdev) { u32 crat; struct tsi148_driver *bridge; bridge = tsi148_bridge->driver_priv; /* Turn off CR/CSR space */ crat = ioread32be(bridge->base + TSI148_LCSR_CRAT); iowrite32be(crat & ~TSI148_LCSR_CRAT_EN, bridge->base + TSI148_LCSR_CRAT); /* Free image */ iowrite32be(0, bridge->base + TSI148_LCSR_CROU); iowrite32be(0, bridge->base + TSI148_LCSR_CROL); pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel, bridge->crcsr_bus); } static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int retval, i, master_num; u32 data; struct list_head *pos = NULL, *n; struct vme_bridge *tsi148_bridge; struct tsi148_driver *tsi148_device; struct vme_master_resource *master_image; struct vme_slave_resource *slave_image; struct vme_dma_resource *dma_ctrlr; struct vme_lm_resource *lm; /* If we want to support more than one of each bridge, we need to * dynamically generate this so we get one per device */ tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL); if (tsi148_bridge == NULL) { dev_err(&pdev->dev, "Failed to allocate memory for device " "structure\n"); retval = -ENOMEM; goto err_struct; } tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL); if (tsi148_device == NULL) { dev_err(&pdev->dev, "Failed to allocate memory for device " "structure\n"); retval = -ENOMEM; goto err_driver; } tsi148_bridge->driver_priv = tsi148_device; /* Enable the device */ retval = pci_enable_device(pdev); if (retval) { dev_err(&pdev->dev, "Unable to enable device\n"); goto err_enable; } /* Map Registers */ retval = pci_request_regions(pdev, driver_name); if (retval) { dev_err(&pdev->dev, "Unable to reserve resources\n"); goto err_resource; } /* map registers in BAR 0 */ tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0), 4096); if (!tsi148_device->base) { dev_err(&pdev->dev, "Unable to remap CRG region\n"); retval = -EIO; goto err_remap; } /* Check to see if the mapping worked out */ data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF; if (data != PCI_VENDOR_ID_TUNDRA) { dev_err(&pdev->dev, "CRG region check failed\n"); retval = -EIO; goto err_test; } /* Initialize wait queues & mutual exclusion flags */ init_waitqueue_head(&tsi148_device->dma_queue[0]); init_waitqueue_head(&tsi148_device->dma_queue[1]); init_waitqueue_head(&tsi148_device->iack_queue); mutex_init(&tsi148_device->vme_int); mutex_init(&tsi148_device->vme_rmw); tsi148_bridge->parent = &pdev->dev; strcpy(tsi148_bridge->name, driver_name); /* Setup IRQ */ retval = tsi148_irq_init(tsi148_bridge); if (retval != 0) { dev_err(&pdev->dev, "Chip Initialization failed.\n"); goto err_irq; } /* If we are going to flush writes, we need to read from the VME bus. * We need to do this safely, thus we read the devices own CR/CSR * register. To do this we must set up a window in CR/CSR space and * hence have one less master window resource available. */ master_num = TSI148_MAX_MASTER; if (err_chk) { master_num--; tsi148_device->flush_image = kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL); if (tsi148_device->flush_image == NULL) { dev_err(&pdev->dev, "Failed to allocate memory for " "flush resource structure\n"); retval = -ENOMEM; goto err_master; } tsi148_device->flush_image->parent = tsi148_bridge; spin_lock_init(&tsi148_device->flush_image->lock); tsi148_device->flush_image->locked = 1; tsi148_device->flush_image->number = master_num; tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 | VME_A32 | VME_A64; tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | VME_PROG | VME_DATA; tsi148_device->flush_image->width_attr = VME_D16 | VME_D32; memset(&tsi148_device->flush_image->bus_resource, 0, sizeof(struct resource)); tsi148_device->flush_image->kern_base = NULL; } /* Add master windows to list */ INIT_LIST_HEAD(&tsi148_bridge->master_resources); for (i = 0; i < master_num; i++) { master_image = kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL); if (master_image == NULL) { dev_err(&pdev->dev, "Failed to allocate memory for " "master resource structure\n"); retval = -ENOMEM; goto err_master; } master_image->parent = tsi148_bridge; spin_lock_init(&master_image->lock); master_image->locked = 0; master_image->number = i; master_image->address_attr = VME_A16 | VME_A24 | VME_A32 | VME_A64; master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | VME_PROG | VME_DATA; master_image->width_attr = VME_D16 | VME_D32; memset(&master_image->bus_resource, 0, sizeof(struct resource)); master_image->kern_base = NULL; list_add_tail(&master_image->list, &tsi148_bridge->master_resources); } /* Add slave windows to list */ INIT_LIST_HEAD(&tsi148_bridge->slave_resources); for (i = 0; i < TSI148_MAX_SLAVE; i++) { slave_image = kmalloc(sizeof(struct vme_slave_resource), GFP_KERNEL); if (slave_image == NULL) { dev_err(&pdev->dev, "Failed to allocate memory for " "slave resource structure\n"); retval = -ENOMEM; goto err_slave; } slave_image->parent = tsi148_bridge; mutex_init(&slave_image->mtx); slave_image->locked = 0; slave_image->number = i; slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 | VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 | VME_USER3 | VME_USER4; slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | VME_PROG | VME_DATA; list_add_tail(&slave_image->list, &tsi148_bridge->slave_resources); } /* Add dma engines to list */ INIT_LIST_HEAD(&tsi148_bridge->dma_resources); for (i = 0; i < TSI148_MAX_DMA; i++) { dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource), GFP_KERNEL); if (dma_ctrlr == NULL) { dev_err(&pdev->dev, "Failed to allocate memory for " "dma resource structure\n"); retval = -ENOMEM; goto err_dma; } dma_ctrlr->parent = tsi148_bridge; mutex_init(&dma_ctrlr->mtx); dma_ctrlr->locked = 0; dma_ctrlr->number = i; dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM | VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME | VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME | VME_DMA_PATTERN_TO_MEM; INIT_LIST_HEAD(&dma_ctrlr->pending); INIT_LIST_HEAD(&dma_ctrlr->running); list_add_tail(&dma_ctrlr->list, &tsi148_bridge->dma_resources); } /* Add location monitor to list */ INIT_LIST_HEAD(&tsi148_bridge->lm_resources); lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL); if (lm == NULL) { dev_err(&pdev->dev, "Failed to allocate memory for " "location monitor resource structure\n"); retval = -ENOMEM; goto err_lm; } lm->parent = tsi148_bridge; mutex_init(&lm->mtx); lm->locked = 0; lm->number = 1; lm->monitors = 4; list_add_tail(&lm->list, &tsi148_bridge->lm_resources); tsi148_bridge->slave_get = tsi148_slave_get; tsi148_bridge->slave_set = tsi148_slave_set; tsi148_bridge->master_get = tsi148_master_get; tsi148_bridge->master_set = tsi148_master_set; tsi148_bridge->master_read = tsi148_master_read; tsi148_bridge->master_write = tsi148_master_write; tsi148_bridge->master_rmw = tsi148_master_rmw; tsi148_bridge->dma_list_add = tsi148_dma_list_add; tsi148_bridge->dma_list_exec = tsi148_dma_list_exec; tsi148_bridge->dma_list_empty = tsi148_dma_list_empty; tsi148_bridge->irq_set = tsi148_irq_set; tsi148_bridge->irq_generate = tsi148_irq_generate; tsi148_bridge->lm_set = tsi148_lm_set; tsi148_bridge->lm_get = tsi148_lm_get; tsi148_bridge->lm_attach = tsi148_lm_attach; tsi148_bridge->lm_detach = tsi148_lm_detach; tsi148_bridge->slot_get = tsi148_slot_get; tsi148_bridge->alloc_consistent = tsi148_alloc_consistent; tsi148_bridge->free_consistent = tsi148_free_consistent; data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT); dev_info(&pdev->dev, "Board is%s the VME system controller\n", (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not"); if (!geoid) dev_info(&pdev->dev, "VME geographical address is %d\n", data & TSI148_LCSR_VSTAT_GA_M); else dev_info(&pdev->dev, "VME geographical address is set to %d\n", geoid); dev_info(&pdev->dev, "VME Write and flush and error check is %s\n", err_chk ? "enabled" : "disabled"); if (tsi148_crcsr_init(tsi148_bridge, pdev)) { dev_err(&pdev->dev, "CR/CSR configuration failed.\n"); goto err_crcsr; } retval = vme_register_bridge(tsi148_bridge); if (retval != 0) { dev_err(&pdev->dev, "Chip Registration failed.\n"); goto err_reg; } pci_set_drvdata(pdev, tsi148_bridge); /* Clear VME bus "board fail", and "power-up reset" lines */ data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT); data &= ~TSI148_LCSR_VSTAT_BRDFL; data |= TSI148_LCSR_VSTAT_CPURST; iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT); return 0; err_reg: tsi148_crcsr_exit(tsi148_bridge, pdev); err_crcsr: err_lm: /* resources are stored in link list */ list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) { lm = list_entry(pos, struct vme_lm_resource, list); list_del(pos); kfree(lm); } err_dma: /* resources are stored in link list */ list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) { dma_ctrlr = list_entry(pos, struct vme_dma_resource, list); list_del(pos); kfree(dma_ctrlr); } err_slave: /* resources are stored in link list */ list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) { slave_image = list_entry(pos, struct vme_slave_resource, list); list_del(pos); kfree(slave_image); } err_master: /* resources are stored in link list */ list_for_each_safe(pos, n, &tsi148_bridge->master_resources) { master_image = list_entry(pos, struct vme_master_resource, list); list_del(pos); kfree(master_image); } tsi148_irq_exit(tsi148_bridge, pdev); err_irq: err_test: iounmap(tsi148_device->base); err_remap: pci_release_regions(pdev); err_resource: pci_disable_device(pdev); err_enable: kfree(tsi148_device); err_driver: kfree(tsi148_bridge); err_struct: return retval; } static void tsi148_remove(struct pci_dev *pdev) { struct list_head *pos = NULL; struct list_head *tmplist; struct vme_master_resource *master_image; struct vme_slave_resource *slave_image; struct vme_dma_resource *dma_ctrlr; int i; struct tsi148_driver *bridge; struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev); bridge = tsi148_bridge->driver_priv; dev_dbg(&pdev->dev, "Driver is being unloaded.\n"); /* * Shutdown all inbound and outbound windows. */ for (i = 0; i < 8; i++) { iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] + TSI148_LCSR_OFFSET_ITAT); iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTAT); } /* * Shutdown Location monitor. */ iowrite32be(0, bridge->base + TSI148_LCSR_LMAT); /* * Shutdown CRG map. */ iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT); /* * Clear error status. */ iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT); iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT); iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT); /* * Remove VIRQ interrupt (if any) */ if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800) iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR); /* * Map all Interrupts to PCI INTA */ iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1); iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2); tsi148_irq_exit(tsi148_bridge, pdev); vme_unregister_bridge(tsi148_bridge); tsi148_crcsr_exit(tsi148_bridge, pdev); /* resources are stored in link list */ list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) { dma_ctrlr = list_entry(pos, struct vme_dma_resource, list); list_del(pos); kfree(dma_ctrlr); } /* resources are stored in link list */ list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) { slave_image = list_entry(pos, struct vme_slave_resource, list); list_del(pos); kfree(slave_image); } /* resources are stored in link list */ list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) { master_image = list_entry(pos, struct vme_master_resource, list); list_del(pos); kfree(master_image); } iounmap(bridge->base); pci_release_regions(pdev); pci_disable_device(pdev); kfree(tsi148_bridge->driver_priv); kfree(tsi148_bridge); } module_pci_driver(tsi148_driver); MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes"); module_param(err_chk, bool, 0); MODULE_PARM_DESC(geoid, "Override geographical addressing"); module_param(geoid, int, 0); MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge"); MODULE_LICENSE("GPL");
gpl-2.0
SunliyMonkey/linux
drivers/usb/host/ohci-mem.c
1647
3434
/* * OHCI HCD (Host Controller Driver) for USB. * * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> * * This file is licenced under the GPL. */ /*-------------------------------------------------------------------------*/ /* * OHCI deals with three types of memory: * - data used only by the HCD ... kmalloc is fine * - async and periodic schedules, shared by HC and HCD ... these * need to use dma_pool or dma_alloc_coherent * - driver buffers, read/written by HC ... the hcd glue or the * device driver provides us with dma addresses * * There's also "register" data, which is memory mapped. * No memory seen by this driver (or any HCD) may be paged out. */ /*-------------------------------------------------------------------------*/ static void ohci_hcd_init (struct ohci_hcd *ohci) { ohci->next_statechange = jiffies; spin_lock_init (&ohci->lock); INIT_LIST_HEAD (&ohci->pending); INIT_LIST_HEAD(&ohci->eds_in_use); } /*-------------------------------------------------------------------------*/ static int ohci_mem_init (struct ohci_hcd *ohci) { ohci->td_cache = dma_pool_create ("ohci_td", ohci_to_hcd(ohci)->self.controller, sizeof (struct td), 32 /* byte alignment */, 0 /* no page-crossing issues */); if (!ohci->td_cache) return -ENOMEM; ohci->ed_cache = dma_pool_create ("ohci_ed", ohci_to_hcd(ohci)->self.controller, sizeof (struct ed), 16 /* byte alignment */, 0 /* no page-crossing issues */); if (!ohci->ed_cache) { dma_pool_destroy (ohci->td_cache); return -ENOMEM; } return 0; } static void ohci_mem_cleanup (struct ohci_hcd *ohci) { if (ohci->td_cache) { dma_pool_destroy (ohci->td_cache); ohci->td_cache = NULL; } if (ohci->ed_cache) { dma_pool_destroy (ohci->ed_cache); ohci->ed_cache = NULL; } } /*-------------------------------------------------------------------------*/ /* ohci "done list" processing needs this mapping */ static inline struct td * dma_to_td (struct ohci_hcd *hc, dma_addr_t td_dma) { struct td *td; td_dma &= TD_MASK; td = hc->td_hash [TD_HASH_FUNC(td_dma)]; while (td && td->td_dma != td_dma) td = td->td_hash; return td; } /* TDs ... */ static struct td * td_alloc (struct ohci_hcd *hc, gfp_t mem_flags) { dma_addr_t dma; struct td *td; td = dma_pool_alloc (hc->td_cache, mem_flags, &dma); if (td) { /* in case hc fetches it, make it look dead */ memset (td, 0, sizeof *td); td->hwNextTD = cpu_to_hc32 (hc, dma); td->td_dma = dma; /* hashed in td_fill */ } return td; } static void td_free (struct ohci_hcd *hc, struct td *td) { struct td **prev = &hc->td_hash [TD_HASH_FUNC (td->td_dma)]; while (*prev && *prev != td) prev = &(*prev)->td_hash; if (*prev) *prev = td->td_hash; else if ((td->hwINFO & cpu_to_hc32(hc, TD_DONE)) != 0) ohci_dbg (hc, "no hash for td %p\n", td); dma_pool_free (hc->td_cache, td, td->td_dma); } /*-------------------------------------------------------------------------*/ /* EDs ... */ static struct ed * ed_alloc (struct ohci_hcd *hc, gfp_t mem_flags) { dma_addr_t dma; struct ed *ed; ed = dma_pool_alloc (hc->ed_cache, mem_flags, &dma); if (ed) { memset (ed, 0, sizeof (*ed)); INIT_LIST_HEAD (&ed->td_list); ed->dma = dma; } return ed; } static void ed_free (struct ohci_hcd *hc, struct ed *ed) { dma_pool_free (hc->ed_cache, ed, ed->dma); }
gpl-2.0
Vachounet/AcerLiquidGlowKernel
drivers/gpu/drm/radeon/radeon_encoders.c
1903
78554
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include "drmP.h" #include "drm_crtc_helper.h" #include "radeon_drm.h" #include "radeon.h" #include "atom.h" extern int atom_debug; /* evil but including atombios.h is much worse */ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, struct drm_display_mode *mode); static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *clone_encoder; uint32_t index_mask = 0; int count; /* DIG routing gets problematic */ if (rdev->family >= CHIP_R600) return index_mask; /* LVDS/TV are too wacky */ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) return index_mask; /* DVO requires 2x ppll clocks depending on tmds chip */ if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) return index_mask; count = -1; list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) { struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder); count++; if (clone_encoder == encoder) continue; if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT)) continue; if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT) continue; else index_mask |= (1 << count); } return index_mask; } void radeon_setup_encoder_clones(struct drm_device *dev) { struct drm_encoder *encoder; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { encoder->possible_clones = radeon_encoder_clones(encoder); } } uint32_t radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac) { struct radeon_device *rdev = dev->dev_private; uint32_t ret = 0; switch (supported_device) { case ATOM_DEVICE_CRT1_SUPPORT: case ATOM_DEVICE_TV1_SUPPORT: case ATOM_DEVICE_TV2_SUPPORT: case ATOM_DEVICE_CRT2_SUPPORT: case ATOM_DEVICE_CV_SUPPORT: switch (dac) { case 1: /* dac a */ if ((rdev->family == CHIP_RS300) || (rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480)) ret = ENCODER_INTERNAL_DAC2_ENUM_ID1; else if (ASIC_IS_AVIVO(rdev)) ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1; else ret = ENCODER_INTERNAL_DAC1_ENUM_ID1; break; case 2: /* dac b */ if (ASIC_IS_AVIVO(rdev)) ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1; else { /*if (rdev->family == CHIP_R200) ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; else*/ ret = ENCODER_INTERNAL_DAC2_ENUM_ID1; } break; case 3: /* external dac */ if (ASIC_IS_AVIVO(rdev)) ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1; else ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; break; } break; case ATOM_DEVICE_LCD1_SUPPORT: if (ASIC_IS_AVIVO(rdev)) ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1; else ret = ENCODER_INTERNAL_LVDS_ENUM_ID1; break; case ATOM_DEVICE_DFP1_SUPPORT: if ((rdev->family == CHIP_RS300) || (rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480)) ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; else if (ASIC_IS_AVIVO(rdev)) ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1; else ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1; break; case ATOM_DEVICE_LCD2_SUPPORT: case ATOM_DEVICE_DFP2_SUPPORT: if ((rdev->family == CHIP_RS600) || (rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) ret = ENCODER_INTERNAL_DDI_ENUM_ID1; else if (ASIC_IS_AVIVO(rdev)) ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1; else ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; break; case ATOM_DEVICE_DFP3_SUPPORT: ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1; break; } return ret; } static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_LVTM1: case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: return true; default: return false; } } void radeon_link_encoder_connector(struct drm_device *dev) { struct drm_connector *connector; struct radeon_connector *radeon_connector; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; /* walk the list and link encoders to connectors */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { radeon_connector = to_radeon_connector(connector); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->devices & radeon_connector->devices) drm_mode_connector_attach_encoder(connector, encoder); } } } void radeon_encoder_set_active_device(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_connector *connector; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices; DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n", radeon_encoder->active_device, radeon_encoder->devices, radeon_connector->devices, encoder->encoder_type); } } } struct drm_connector * radeon_get_connector_for_encoder(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_connector *connector; struct radeon_connector *radeon_connector; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { radeon_connector = to_radeon_connector(connector); if (radeon_encoder->active_device & radeon_connector->devices) return connector; } return NULL; } static struct drm_connector * radeon_get_connector_for_encoder_init(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_connector *connector; struct radeon_connector *radeon_connector; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { radeon_connector = to_radeon_connector(connector); if (radeon_encoder->devices & radeon_connector->devices) return connector; } return NULL; } struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *other_encoder; struct radeon_encoder *other_radeon_encoder; if (radeon_encoder->is_ext_encoder) return NULL; list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { if (other_encoder == encoder) continue; other_radeon_encoder = to_radeon_encoder(other_encoder); if (other_radeon_encoder->is_ext_encoder && (radeon_encoder->devices & other_radeon_encoder->devices)) return other_encoder; } return NULL; } bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder) { struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder); if (other_encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_TRAVIS: case ENCODER_OBJECT_ID_NUTMEG: return true; default: return false; } } return false; } void radeon_panel_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct drm_display_mode *native_mode = &radeon_encoder->native_mode; unsigned hblank = native_mode->htotal - native_mode->hdisplay; unsigned vblank = native_mode->vtotal - native_mode->vdisplay; unsigned hover = native_mode->hsync_start - native_mode->hdisplay; unsigned vover = native_mode->vsync_start - native_mode->vdisplay; unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start; unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start; adjusted_mode->clock = native_mode->clock; adjusted_mode->flags = native_mode->flags; if (ASIC_IS_AVIVO(rdev)) { adjusted_mode->hdisplay = native_mode->hdisplay; adjusted_mode->vdisplay = native_mode->vdisplay; } adjusted_mode->htotal = native_mode->hdisplay + hblank; adjusted_mode->hsync_start = native_mode->hdisplay + hover; adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width; adjusted_mode->vtotal = native_mode->vdisplay + vblank; adjusted_mode->vsync_start = native_mode->vdisplay + vover; adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width; drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); if (ASIC_IS_AVIVO(rdev)) { adjusted_mode->crtc_hdisplay = native_mode->hdisplay; adjusted_mode->crtc_vdisplay = native_mode->vdisplay; } adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank; adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover; adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width; adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank; adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover; adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width; } static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; /* set the active encoder to connector routing */ radeon_encoder_set_active_device(encoder); drm_mode_set_crtcinfo(adjusted_mode, 0); /* hw bug */ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; /* get the native mode for LVDS */ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) radeon_panel_mode_fixup(encoder, adjusted_mode); /* get the native mode for TV */ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; if (tv_dac) { if (tv_dac->tv_std == TV_STD_NTSC || tv_dac->tv_std == TV_STD_NTSC_J || tv_dac->tv_std == TV_STD_PAL_M) radeon_atom_get_tv_timings(rdev, 0, adjusted_mode); else radeon_atom_get_tv_timings(rdev, 1, adjusted_mode); } } if (ASIC_IS_DCE3(rdev) && ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || radeon_encoder_is_dp_bridge(encoder))) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); radeon_dp_set_link_config(connector, mode); } return true; } static void atombios_dac_setup(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); DAC_ENCODER_CONTROL_PS_ALLOCATION args; int index = 0; struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; memset(&args, 0, sizeof(args)); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); break; } args.ucAction = action; if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT)) args.ucDacStandard = ATOM_DAC1_PS2; else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.ucDacStandard = ATOM_DAC1_CV; else { switch (dac_info->tv_std) { case TV_STD_PAL: case TV_STD_PAL_M: case TV_STD_SCART_PAL: case TV_STD_SECAM: case TV_STD_PAL_CN: args.ucDacStandard = ATOM_DAC1_PAL; break; case TV_STD_NTSC: case TV_STD_NTSC_J: case TV_STD_PAL_60: default: args.ucDacStandard = ATOM_DAC1_NTSC; break; } } args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_tv_setup(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); TV_ENCODER_CONTROL_PS_ALLOCATION args; int index = 0; struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; memset(&args, 0, sizeof(args)); index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl); args.sTVEncoder.ucAction = action; if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.sTVEncoder.ucTvStandard = ATOM_TV_CV; else { switch (dac_info->tv_std) { case TV_STD_NTSC: args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; break; case TV_STD_PAL: args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; break; case TV_STD_PAL_M: args.sTVEncoder.ucTvStandard = ATOM_TV_PALM; break; case TV_STD_PAL_60: args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60; break; case TV_STD_NTSC_J: args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ; break; case TV_STD_SCART_PAL: args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */ break; case TV_STD_SECAM: args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM; break; case TV_STD_PAL_CN: args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN; break; default: args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; break; } } args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } union dvo_encoder_control { ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3; }; void atombios_dvo_setup(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); union dvo_encoder_control args; int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); memset(&args, 0, sizeof(args)); if (ASIC_IS_DCE3(rdev)) { /* DCE3+ */ args.dvo_v3.ucAction = action; args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.dvo_v3.ucDVOConfig = 0; /* XXX */ } else if (ASIC_IS_DCE2(rdev)) { /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */ args.dvo.sDVOEncoder.ucAction = action; args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); /* DFP1, CRT1, TV1 depending on the type of port */ args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX; if (radeon_encoder->pixel_clock > 165000) args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL; } else { /* R4xx, R5xx */ args.ext_tmds.sXTmdsEncoder.ucEnable = action; if (radeon_encoder->pixel_clock > 165000) args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL; /*if (pScrn->rgbBits == 8)*/ args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } union lvds_encoder_control { LVDS_ENCODER_CONTROL_PS_ALLOCATION v1; LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; }; void atombios_digital_setup(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; union lvds_encoder_control args; int index = 0; int hdmi_detected = 0; uint8_t frev, crev; if (!dig) return; if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) hdmi_detected = 1; memset(&args, 0, sizeof(args)); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); break; case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl); break; case ENCODER_OBJECT_ID_INTERNAL_LVTM1: if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); else index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl); break; } if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (frev) { case 1: case 2: switch (crev) { case 1: args.v1.ucMisc = 0; args.v1.ucAction = action; if (hdmi_detected) args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; } else { if (dig->linkb) args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; if (radeon_encoder->pixel_clock > 165000) args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; /*if (pScrn->rgbBits == 8) */ args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; } break; case 2: case 3: args.v2.ucMisc = 0; args.v2.ucAction = action; if (crev == 3) { if (dig->coherent_mode) args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT; } if (hdmi_detected) args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.v2.ucTruncate = 0; args.v2.ucSpatial = 0; args.v2.ucTemporal = 0; args.v2.ucFRC = 0; if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) { args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; } if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) { args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; } } else { if (dig->linkb) args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; if (radeon_encoder->pixel_clock > 165000) args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; } break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); break; } break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); break; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } int atombios_get_encoder_mode(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector; struct radeon_connector *radeon_connector; struct radeon_connector_atom_dig *dig_connector; /* dp bridges are always DP */ if (radeon_encoder_is_dp_bridge(encoder)) return ATOM_ENCODER_MODE_DP; /* DVO is always DVO */ if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO) return ATOM_ENCODER_MODE_DVO; connector = radeon_get_connector_for_encoder(encoder); /* if we don't have an active device yet, just use one of * the connectors tied to the encoder. */ if (!connector) connector = radeon_get_connector_for_encoder_init(encoder); radeon_connector = to_radeon_connector(connector); switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { /* fix me */ if (ASIC_IS_DCE4(rdev)) return ATOM_ENCODER_MODE_DVI; else return ATOM_ENCODER_MODE_HDMI; } else if (radeon_connector->use_digital) return ATOM_ENCODER_MODE_DVI; else return ATOM_ENCODER_MODE_CRT; break; case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: default: if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { /* fix me */ if (ASIC_IS_DCE4(rdev)) return ATOM_ENCODER_MODE_DVI; else return ATOM_ENCODER_MODE_HDMI; } else return ATOM_ENCODER_MODE_DVI; break; case DRM_MODE_CONNECTOR_LVDS: return ATOM_ENCODER_MODE_LVDS; break; case DRM_MODE_CONNECTOR_DisplayPort: dig_connector = radeon_connector->con_priv; if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) return ATOM_ENCODER_MODE_DP; else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { /* fix me */ if (ASIC_IS_DCE4(rdev)) return ATOM_ENCODER_MODE_DVI; else return ATOM_ENCODER_MODE_HDMI; } else return ATOM_ENCODER_MODE_DVI; break; case DRM_MODE_CONNECTOR_eDP: return ATOM_ENCODER_MODE_DP; case DRM_MODE_CONNECTOR_DVIA: case DRM_MODE_CONNECTOR_VGA: return ATOM_ENCODER_MODE_CRT; break; case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_9PinDIN: /* fix me */ return ATOM_ENCODER_MODE_TV; /*return ATOM_ENCODER_MODE_CV;*/ break; } } /* * DIG Encoder/Transmitter Setup * * DCE 3.0/3.1 * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA. * Supports up to 3 digital outputs * - 2 DIG encoder blocks. * DIG1 can drive UNIPHY link A or link B * DIG2 can drive UNIPHY link B or LVTMA * * DCE 3.2 * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B). * Supports up to 5 digital outputs * - 2 DIG encoder blocks. * DIG1/2 can drive UNIPHY0/1/2 link A or link B * * DCE 4.0/5.0 * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). * Supports up to 6 digital outputs * - 6 DIG encoder blocks. * - DIG to PHY mapping is hardcoded * DIG1 drives UNIPHY0 link A, A+B * DIG2 drives UNIPHY0 link B * DIG3 drives UNIPHY1 link A, A+B * DIG4 drives UNIPHY1 link B * DIG5 drives UNIPHY2 link A, A+B * DIG6 drives UNIPHY2 link B * * DCE 4.1 * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). * Supports up to 6 digital outputs * - 2 DIG encoder blocks. * DIG1/2 can drive UNIPHY0/1/2 link A or link B * * Routing * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) * Examples: * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI * crtc1 -> dig1 -> UNIPHY0 link B -> DP * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI */ union dig_encoder_control { DIG_ENCODER_CONTROL_PS_ALLOCATION v1; DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; DIG_ENCODER_CONTROL_PARAMETERS_V4 v4; }; void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); union dig_encoder_control args; int index = 0; uint8_t frev, crev; int dp_clock = 0; int dp_lane_count = 0; int hpd_id = RADEON_HPD_NONE; int bpc = 8; if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; dp_clock = dig_connector->dp_clock; dp_lane_count = dig_connector->dp_lane_count; hpd_id = radeon_connector->hpd.hpd; bpc = connector->display_info.bpc; } /* no dig encoder assigned */ if (dig->dig_encoder == -1) return; memset(&args, 0, sizeof(args)); if (ASIC_IS_DCE4(rdev)) index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl); else { if (dig->dig_encoder) index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); else index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); } if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; args.v1.ucAction = action; args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) args.v3.ucPanelMode = panel_mode; else args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) args.v1.ucLaneNum = dp_lane_count; else if (radeon_encoder->pixel_clock > 165000) args.v1.ucLaneNum = 8; else args.v1.ucLaneNum = 4; if (ASIC_IS_DCE5(rdev)) { if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) { if (dp_clock == 270000) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ; else if (dp_clock == 540000) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; } args.v4.acConfig.ucDigSel = dig->dig_encoder; switch (bpc) { case 0: args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE; break; case 6: args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR; break; case 8: default: args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR; break; case 10: args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR; break; case 12: args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR; break; case 16: args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR; break; } if (hpd_id == RADEON_HPD_NONE) args.v4.ucHPD_ID = 0; else args.v4.ucHPD_ID = hpd_id + 1; } else if (ASIC_IS_DCE4(rdev)) { if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; args.v3.acConfig.ucDigSel = dig->dig_encoder; switch (bpc) { case 0: args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE; break; case 6: args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR; break; case 8: default: args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; break; case 10: args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR; break; case 12: args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR; break; case 16: args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR; break; } } else { if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; break; } if (dig->linkb) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; else args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } union dig_transmitter_control { DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4; }; void atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector; union dig_transmitter_control args; int index = 0; uint8_t frev, crev; bool is_dp = false; int pll_id = 0; int dp_clock = 0; int dp_lane_count = 0; int connector_object_id = 0; int igp_lane_info = 0; int dig_encoder = dig->dig_encoder; if (action == ATOM_TRANSMITTER_ACTION_INIT) { connector = radeon_get_connector_for_encoder_init(encoder); /* just needed to avoid bailing in the encoder check. the encoder * isn't used for init */ dig_encoder = 0; } else connector = radeon_get_connector_for_encoder(encoder); if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; dp_clock = dig_connector->dp_clock; dp_lane_count = dig_connector->dp_lane_count; connector_object_id = (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; igp_lane_info = dig_connector->igp_lane_info; } /* no dig encoder assigned */ if (dig_encoder == -1) return; if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) is_dp = true; memset(&args, 0, sizeof(args)); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl); break; } if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; args.v1.ucAction = action; if (action == ATOM_TRANSMITTER_ACTION_INIT) { args.v1.usInitInfo = cpu_to_le16(connector_object_id); } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { args.v1.asMode.ucLaneSel = lane_num; args.v1.asMode.ucLaneSet = lane_set; } else { if (is_dp) args.v1.usPixelClock = cpu_to_le16(dp_clock / 10); else if (radeon_encoder->pixel_clock > 165000) args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); } if (ASIC_IS_DCE4(rdev)) { if (is_dp) args.v3.ucLaneNum = dp_lane_count; else if (radeon_encoder->pixel_clock > 165000) args.v3.ucLaneNum = 8; else args.v3.ucLaneNum = 4; if (dig->linkb) args.v3.acConfig.ucLinkSel = 1; if (dig_encoder & 1) args.v3.acConfig.ucEncoderSel = 1; /* Select the PLL for the PHY * DP PHY should be clocked from external src if there is * one. */ if (encoder->crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); pll_id = radeon_crtc->pll_id; } if (ASIC_IS_DCE5(rdev)) { /* On DCE5 DCPLL usually generates the DP ref clock */ if (is_dp) { if (rdev->clock.dp_extclk) args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK; else args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL; } else args.v4.acConfig.ucRefClkSource = pll_id; } else { /* On DCE4, if there is an external clock, it generates the DP ref clock */ if (is_dp && rdev->clock.dp_extclk) args.v3.acConfig.ucRefClkSource = 2; /* external src */ else args.v3.acConfig.ucRefClkSource = pll_id; } switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: args.v3.acConfig.ucTransmitterSel = 0; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: args.v3.acConfig.ucTransmitterSel = 1; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: args.v3.acConfig.ucTransmitterSel = 2; break; } if (is_dp) args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v3.acConfig.fCoherentMode = 1; if (radeon_encoder->pixel_clock > 165000) args.v3.acConfig.fDualLinkConnector = 1; } } else if (ASIC_IS_DCE32(rdev)) { args.v2.acConfig.ucEncoderSel = dig_encoder; if (dig->linkb) args.v2.acConfig.ucLinkSel = 1; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: args.v2.acConfig.ucTransmitterSel = 0; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: args.v2.acConfig.ucTransmitterSel = 1; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: args.v2.acConfig.ucTransmitterSel = 2; break; } if (is_dp) { args.v2.acConfig.fCoherentMode = 1; args.v2.acConfig.fDPConnector = 1; } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v2.acConfig.fCoherentMode = 1; if (radeon_encoder->pixel_clock > 165000) args.v2.acConfig.fDualLinkConnector = 1; } } else { args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; if (dig_encoder) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; else args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; if ((rdev->flags & RADEON_IS_IGP) && (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { if (igp_lane_info & 0x1) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; else if (igp_lane_info & 0x2) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; else if (igp_lane_info & 0x4) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; else if (igp_lane_info & 0x8) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; } else { if (igp_lane_info & 0x3) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; else if (igp_lane_info & 0xc) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; } } if (dig->linkb) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; else args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; if (is_dp) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; if (radeon_encoder->pixel_clock > 165000) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; } } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } bool atombios_set_edp_panel_power(struct drm_connector *connector, int action) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_device *dev = radeon_connector->base.dev; struct radeon_device *rdev = dev->dev_private; union dig_transmitter_control args; int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); uint8_t frev, crev; if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) goto done; if (!ASIC_IS_DCE4(rdev)) goto done; if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) && (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) goto done; if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) goto done; memset(&args, 0, sizeof(args)); args.v1.ucAction = action; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); /* wait for the panel to power up */ if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) { int i; for (i = 0; i < 300; i++) { if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) return true; mdelay(1); } return false; } done: return true; } union external_encoder_control { EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3; }; static void atombios_external_encoder_setup(struct drm_encoder *encoder, struct drm_encoder *ext_encoder, int action) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); union external_encoder_control args; struct drm_connector *connector; int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); u8 frev, crev; int dp_clock = 0; int dp_lane_count = 0; int connector_object_id = 0; u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; int bpc = 8; if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) connector = radeon_get_connector_for_encoder_init(encoder); else connector = radeon_get_connector_for_encoder(encoder); if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; dp_clock = dig_connector->dp_clock; dp_lane_count = dig_connector->dp_lane_count; connector_object_id = (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; bpc = connector->display_info.bpc; } memset(&args, 0, sizeof(args)); if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (frev) { case 1: /* no params on frev 1 */ break; case 2: switch (crev) { case 1: case 2: args.v1.sDigEncoder.ucAction = action; args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { if (dp_clock == 270000) args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; args.v1.sDigEncoder.ucLaneNum = dp_lane_count; } else if (radeon_encoder->pixel_clock > 165000) args.v1.sDigEncoder.ucLaneNum = 8; else args.v1.sDigEncoder.ucLaneNum = 4; break; case 3: args.v3.sExtEncoder.ucAction = action; if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id); else args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { if (dp_clock == 270000) args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; else if (dp_clock == 540000) args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ; args.v3.sExtEncoder.ucLaneNum = dp_lane_count; } else if (radeon_encoder->pixel_clock > 165000) args.v3.sExtEncoder.ucLaneNum = 8; else args.v3.sExtEncoder.ucLaneNum = 4; switch (ext_enum) { case GRAPH_OBJECT_ENUM_ID1: args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1; break; case GRAPH_OBJECT_ENUM_ID2: args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2; break; case GRAPH_OBJECT_ENUM_ID3: args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; break; } switch (bpc) { case 0: args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE; break; case 6: args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR; break; case 8: default: args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR; break; case 10: args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR; break; case 12: args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR; break; case 16: args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR; break; } break; default: DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); return; } break; default: DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); return; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_yuv_setup(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); ENABLE_YUV_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, EnableYUV); uint32_t temp, reg; memset(&args, 0, sizeof(args)); if (rdev->family >= CHIP_R600) reg = R600_BIOS_3_SCRATCH; else reg = RADEON_BIOS_3_SCRATCH; /* XXX: fix up scratch reg handling */ temp = RREG32(reg); if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) WREG32(reg, (ATOM_S3_TV1_ACTIVE | (radeon_crtc->crtc_id << 18))); else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24))); else WREG32(reg, 0); if (enable) args.ucEnable = ATOM_ENABLE; args.ucCRTC = radeon_crtc->crtc_id; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); WREG32(reg, temp); } static void radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; int index = 0; bool is_dig = false; bool is_dce5_dac = false; bool is_dce5_dvo = false; memset(&args, 0, sizeof(args)); DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", radeon_encoder->encoder_id, mode, radeon_encoder->devices, radeon_encoder->active_device); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl); break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: is_dig = true; break; case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_DDI: index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: if (ASIC_IS_DCE5(rdev)) is_dce5_dvo = true; else if (ASIC_IS_DCE3(rdev)) is_dig = true; else index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); break; case ENCODER_OBJECT_ID_INTERNAL_LVDS: index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); break; case ENCODER_OBJECT_ID_INTERNAL_LVTM1: if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); else index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: if (ASIC_IS_DCE5(rdev)) is_dce5_dac = true; else { if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); else index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); } break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); else index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl); break; } if (is_dig) { switch (mode) { case DRM_MODE_DPMS_ON: /* some early dce3.2 boards have a bug in their transmitter control table */ if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730)) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); else atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); if (connector && (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_ON); radeon_dig_connector->edp_on = true; } if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); radeon_dp_link_train(encoder, connector); if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); if (connector && (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_OFF); radeon_dig_connector->edp_on = false; } } if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); break; } } else if (is_dce5_dac) { switch (mode) { case DRM_MODE_DPMS_ON: atombios_dac_setup(encoder, ATOM_ENABLE); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: atombios_dac_setup(encoder, ATOM_DISABLE); break; } } else if (is_dce5_dvo) { switch (mode) { case DRM_MODE_DPMS_ON: atombios_dvo_setup(encoder, ATOM_ENABLE); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: atombios_dvo_setup(encoder, ATOM_DISABLE); break; } } else { switch (mode) { case DRM_MODE_DPMS_ON: args.ucAction = ATOM_ENABLE; /* workaround for DVOOutputControl on some RS690 systems */ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); WREG32(RADEON_BIOS_3_SCRATCH, reg); } else atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { args.ucAction = ATOM_LCD_BLON; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: args.ucAction = ATOM_DISABLE; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { args.ucAction = ATOM_LCD_BLOFF; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } break; } } if (ext_encoder) { switch (mode) { case DRM_MODE_DPMS_ON: default: if (ASIC_IS_DCE41(rdev)) { atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT); atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF); } else atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: if (ASIC_IS_DCE41(rdev)) { atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING); atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT); } else atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE); break; } } radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } union crtc_source_param { SELECT_CRTC_SOURCE_PS_ALLOCATION v1; SELECT_CRTC_SOURCE_PARAMETERS_V2 v2; }; static void atombios_set_encoder_crtc_source(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); union crtc_source_param args; int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); uint8_t frev, crev; struct radeon_encoder_atom_dig *dig; memset(&args, 0, sizeof(args)); if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (frev) { case 1: switch (crev) { case 1: default: if (ASIC_IS_AVIVO(rdev)) args.v1.ucCRTC = radeon_crtc->crtc_id; else { if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) { args.v1.ucCRTC = radeon_crtc->crtc_id; } else { args.v1.ucCRTC = radeon_crtc->crtc_id << 2; } } switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX; break; case ENCODER_OBJECT_ID_INTERNAL_LVDS: case ENCODER_OBJECT_ID_INTERNAL_LVTM1: if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX; else args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX; break; case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX; break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; else args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX; break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; else args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX; break; } break; case 2: args.v2.ucCRTC = radeon_crtc->crtc_id; args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: dig = radeon_encoder->enc_priv; switch (dig->dig_encoder) { case 0: args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; break; case 1: args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; break; case 2: args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID; break; case 3: args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID; break; case 4: args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID; break; case 5: args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID; break; } break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; else args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID; break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; else args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID; break; } break; } break; default: DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); return; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); /* update scratch regs with new routing */ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } static void atombios_apply_encoder_quirks(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); /* Funky macbooks */ if ((dev->pdev->device == 0x71C5) && (dev->pdev->subsystem_vendor == 0x106b) && (dev->pdev->subsystem_device == 0x0080)) { if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL); lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN; lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN; WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control); } } /* set scaler clears this on some chips */ if (ASIC_IS_AVIVO(rdev) && (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) { if (ASIC_IS_DCE4(rdev)) { if (mode->flags & DRM_MODE_FLAG_INTERLACE) WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, EVERGREEN_INTERLEAVE_EN); else WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); } else { if (mode->flags & DRM_MODE_FLAG_INTERLACE) WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, AVIVO_D1MODE_INTERLEAVE_EN); else WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); } } } static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *test_encoder; struct radeon_encoder_atom_dig *dig; uint32_t dig_enc_in_use = 0; /* DCE4/5 */ if (ASIC_IS_DCE4(rdev)) { dig = radeon_encoder->enc_priv; if (ASIC_IS_DCE41(rdev)) { if (dig->linkb) return 1; else return 0; } else { switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: if (dig->linkb) return 1; else return 0; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: if (dig->linkb) return 3; else return 2; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (dig->linkb) return 5; else return 4; break; } } } /* on DCE32 and encoder can driver any block so just crtc id */ if (ASIC_IS_DCE32(rdev)) { return radeon_crtc->crtc_id; } /* on DCE3 - LVTMA can only be driven by DIGB */ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { struct radeon_encoder *radeon_test_encoder; if (encoder == test_encoder) continue; if (!radeon_encoder_is_digital(test_encoder)) continue; radeon_test_encoder = to_radeon_encoder(test_encoder); dig = radeon_test_encoder->enc_priv; if (dig->dig_encoder >= 0) dig_enc_in_use |= (1 << dig->dig_encoder); } if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) { if (dig_enc_in_use & 0x2) DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n"); return 1; } if (!(dig_enc_in_use & 1)) return 0; return 1; } /* This only needs to be called once at startup */ void radeon_atom_encoder_init(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; struct drm_encoder *encoder; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); break; default: break; } if (ext_encoder && ASIC_IS_DCE41(rdev)) atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); } } static void radeon_atom_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); radeon_encoder->pixel_clock = adjusted_mode->clock; if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) atombios_yuv_setup(encoder, true); else atombios_yuv_setup(encoder, false); } switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_LVDS: case ENCODER_OBJECT_ID_INTERNAL_LVTM1: atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE); break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: if (ASIC_IS_DCE4(rdev)) { /* disable the transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); /* setup and enable the encoder */ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); /* enable the transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); } else { /* disable the encoder and transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); /* setup and enable the encoder and transmitter */ atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); } break; case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: atombios_dvo_setup(encoder, ATOM_ENABLE); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: atombios_dac_setup(encoder, ATOM_ENABLE); if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) { if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) atombios_tv_setup(encoder, ATOM_ENABLE); else atombios_tv_setup(encoder, ATOM_DISABLE); } break; } if (ext_encoder) { if (ASIC_IS_DCE41(rdev)) atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); else atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); } atombios_apply_encoder_quirks(encoder, adjusted_mode); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { r600_hdmi_enable(encoder); r600_hdmi_setmode(encoder, adjusted_mode); } } static bool atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_connector *radeon_connector = to_radeon_connector(connector); if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_CRT_SUPPORT)) { DAC_LOAD_DETECTION_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection); uint8_t frev, crev; memset(&args, 0, sizeof(args)); if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return false; args.sDacload.ucMisc = 0; if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) || (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1)) args.sDacload.ucDacType = ATOM_DAC_A; else args.sDacload.ucDacType = ATOM_DAC_B; if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT); else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT); else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT); if (crev >= 3) args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; } else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT); if (crev >= 3) args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); return true; } else return false; } static enum drm_connector_status radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_connector *radeon_connector = to_radeon_connector(connector); uint32_t bios_0_scratch; if (!atombios_dac_load_detect(encoder, connector)) { DRM_DEBUG_KMS("detect returned false \n"); return connector_status_unknown; } if (rdev->family >= CHIP_R600) bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); else bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH); DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { if (bios_0_scratch & ATOM_S0_CRT1_MASK) return connector_status_connected; } if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { if (bios_0_scratch & ATOM_S0_CRT2_MASK) return connector_status_connected; } if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) return connector_status_connected; } if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) return connector_status_connected; /* CTV */ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) return connector_status_connected; /* STV */ } return connector_status_disconnected; } static enum drm_connector_status radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); u32 bios_0_scratch; if (!ASIC_IS_DCE4(rdev)) return connector_status_unknown; if (!ext_encoder) return connector_status_unknown; if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0) return connector_status_unknown; /* load detect on the dp bridge */ atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION); bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { if (bios_0_scratch & ATOM_S0_CRT1_MASK) return connector_status_connected; } if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { if (bios_0_scratch & ATOM_S0_CRT2_MASK) return connector_status_connected; } if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) return connector_status_connected; } if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) return connector_status_connected; /* CTV */ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) return connector_status_connected; /* STV */ } return connector_status_disconnected; } void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder) { struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); if (ext_encoder) /* ddc_setup on the dp bridge */ atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP); } static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); if ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || radeon_encoder_is_dp_bridge(encoder)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; if (dig) dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); } radeon_atom_output_lock(encoder, true); radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); /* select the clock/data port if it uses a router */ if (radeon_connector->router.cd_valid) radeon_router_select_cd_port(radeon_connector); /* turn eDP panel on for mode set */ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_ON); } /* this is needed for the pll/ss setup to work correctly in some cases */ atombios_set_encoder_crtc_source(encoder); } static void radeon_atom_encoder_commit(struct drm_encoder *encoder) { radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); radeon_atom_output_lock(encoder, false); } static void radeon_atom_encoder_disable(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig; /* check for pre-DCE3 cards with shared encoders; * can't really use the links individually, so don't disable * the encoder if it's in use by another connector */ if (!ASIC_IS_DCE3(rdev)) { struct drm_encoder *other_encoder; struct radeon_encoder *other_radeon_encoder; list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { other_radeon_encoder = to_radeon_encoder(other_encoder); if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) && drm_helper_encoder_in_use(other_encoder)) goto disable_done; } } radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_LVDS: case ENCODER_OBJECT_ID_INTERNAL_LVTM1: atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE); break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: if (ASIC_IS_DCE4(rdev)) /* disable the transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); else { /* disable the encoder and transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); } break; case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: atombios_dvo_setup(encoder, ATOM_DISABLE); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: atombios_dac_setup(encoder, ATOM_DISABLE); if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) atombios_tv_setup(encoder, ATOM_DISABLE); break; } disable_done: if (radeon_encoder_is_digital(encoder)) { if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) r600_hdmi_disable(encoder); dig = radeon_encoder->enc_priv; dig->dig_encoder = -1; } radeon_encoder->active_device = 0; } /* these are handled by the primary encoders */ static void radeon_atom_ext_prepare(struct drm_encoder *encoder) { } static void radeon_atom_ext_commit(struct drm_encoder *encoder) { } static void radeon_atom_ext_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { } static void radeon_atom_ext_disable(struct drm_encoder *encoder) { } static void radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode) { } static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = { .dpms = radeon_atom_ext_dpms, .mode_fixup = radeon_atom_ext_mode_fixup, .prepare = radeon_atom_ext_prepare, .mode_set = radeon_atom_ext_mode_set, .commit = radeon_atom_ext_commit, .disable = radeon_atom_ext_disable, /* no detect for TMDS/LVDS yet */ }; static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { .dpms = radeon_atom_encoder_dpms, .mode_fixup = radeon_atom_mode_fixup, .prepare = radeon_atom_encoder_prepare, .mode_set = radeon_atom_encoder_mode_set, .commit = radeon_atom_encoder_commit, .disable = radeon_atom_encoder_disable, .detect = radeon_atom_dig_detect, }; static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { .dpms = radeon_atom_encoder_dpms, .mode_fixup = radeon_atom_mode_fixup, .prepare = radeon_atom_encoder_prepare, .mode_set = radeon_atom_encoder_mode_set, .commit = radeon_atom_encoder_commit, .detect = radeon_atom_dac_detect, }; void radeon_enc_destroy(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); kfree(radeon_encoder->enc_priv); drm_encoder_cleanup(encoder); kfree(radeon_encoder); } static const struct drm_encoder_funcs radeon_atom_enc_funcs = { .destroy = radeon_enc_destroy, }; struct radeon_encoder_atom_dac * radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) { struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL); if (!dac) return NULL; dac->tv_std = radeon_atombios_get_tv_info(rdev); return dac; } struct radeon_encoder_atom_dig * radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) { int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); if (!dig) return NULL; /* coherent mode by default */ dig->coherent_mode = true; dig->dig_encoder = -1; if (encoder_enum == 2) dig->linkb = true; else dig->linkb = false; return dig; } void radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device, u16 caps) { struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; /* see if we already added it */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->encoder_enum == encoder_enum) { radeon_encoder->devices |= supported_device; return; } } /* add a new one */ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); if (!radeon_encoder) return; encoder = &radeon_encoder->base; switch (rdev->num_crtc) { case 1: encoder->possible_crtcs = 0x1; break; case 2: default: encoder->possible_crtcs = 0x3; break; case 4: encoder->possible_crtcs = 0xf; break; case 6: encoder->possible_crtcs = 0x3f; break; } radeon_encoder->enc_priv = NULL; radeon_encoder->encoder_enum = encoder_enum; radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; radeon_encoder->devices = supported_device; radeon_encoder->rmx_type = RMX_OFF; radeon_encoder->underscan_type = UNDERSCAN_OFF; radeon_encoder->is_ext_encoder = false; radeon_encoder->caps = caps; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: case ENCODER_OBJECT_ID_INTERNAL_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: case ENCODER_OBJECT_ID_INTERNAL_LVTM1: if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { radeon_encoder->rmx_type = RMX_FULL; drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); } else { drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); } drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC); radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { radeon_encoder->rmx_type = RMX_FULL; drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); } else { drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); } drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); break; case ENCODER_OBJECT_ID_SI170B: case ENCODER_OBJECT_ID_CH7303: case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: case ENCODER_OBJECT_ID_TITFP513: case ENCODER_OBJECT_ID_VT1623: case ENCODER_OBJECT_ID_HDMI_SI1930: case ENCODER_OBJECT_ID_TRAVIS: case ENCODER_OBJECT_ID_NUTMEG: /* these are handled by the primary encoders */ radeon_encoder->is_ext_encoder = true; if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); else drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs); break; } }
gpl-2.0
FreeProjectAce/protou_kernel
drivers/watchdog/acquirewdt.c
3183
8927
/* * Acquire Single Board Computer Watchdog Timer driver * * Based on wdt.c. Original copyright messages: * * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide * warranty for any of this software. This material is provided * "AS-IS" and at no charge. * * (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk> * * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com> * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT * Can't add timeout - driver doesn't allow changing value */ /* * Theory of Operation: * The Watch-Dog Timer is provided to ensure that standalone * Systems can always recover from catastrophic conditions that * caused the CPU to crash. This condition may have occurred by * external EMI or a software bug. When the CPU stops working * correctly, hardware on the board will either perform a hardware * reset (cold boot) or a non-maskable interrupt (NMI) to bring the * system back to a known state. * * The Watch-Dog Timer is controlled by two I/O Ports. * 443 hex - Read - Enable or refresh the Watch-Dog Timer * 043 hex - Read - Disable the Watch-Dog Timer * * To enable the Watch-Dog Timer, a read from I/O port 443h must * be performed. This will enable and activate the countdown timer * which will eventually time out and either reset the CPU or cause * an NMI depending on the setting of a jumper. To ensure that this * reset condition does not occur, the Watch-Dog Timer must be * periodically refreshed by reading the same I/O port 443h. * The Watch-Dog Timer is disabled by reading I/O port 043h. * * The Watch-Dog Timer Time-Out Period is set via jumpers. * It can be 1, 2, 10, 20, 110 or 220 seconds. */ /* * Includes, defines, variables, module parameters, ... */ /* Includes */ #include <linux/module.h> /* For module specific items */ #include <linux/moduleparam.h> /* For new moduleparam's */ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV (WATCHDOG_MINOR) */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/fs.h> /* For file operations */ #include <linux/ioport.h> /* For io-port access */ #include <linux/platform_device.h> /* For platform_driver framework */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ #include <linux/io.h> /* For inb/outb/... */ /* Module information */ #define DRV_NAME "acquirewdt" #define PFX DRV_NAME ": " #define WATCHDOG_NAME "Acquire WDT" /* There is no way to see what the correct time-out period is */ #define WATCHDOG_HEARTBEAT 0 /* internal variables */ /* the watchdog platform device */ static struct platform_device *acq_platform_device; static unsigned long acq_is_open; static char expect_close; /* module parameters */ /* You must set this - there is no sane way to probe for this board. */ static int wdt_stop = 0x43; module_param(wdt_stop, int, 0); MODULE_PARM_DESC(wdt_stop, "Acquire WDT 'stop' io port (default 0x43)"); /* You must set this - there is no sane way to probe for this board. */ static int wdt_start = 0x443; module_param(wdt_start, int, 0); MODULE_PARM_DESC(wdt_start, "Acquire WDT 'start' io port (default 0x443)"); static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* * Watchdog Operations */ static void acq_keepalive(void) { /* Write a watchdog value */ inb_p(wdt_start); } static void acq_stop(void) { /* Turn the card off */ inb_p(wdt_stop); } /* * /dev/watchdog handling */ static ssize_t acq_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (count) { if (!nowayout) { size_t i; /* note: just in case someone wrote the magic character five months ago... */ expect_close = 0; /* scan to see whether or not we got the magic character */ for (i = 0; i != count; i++) { char c; if (get_user(c, buf + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } /* Well, anyhow someone wrote to us, we should return that favour */ acq_keepalive(); } return count; } static long acq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int options, retval = -EINVAL; void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = WATCHDOG_NAME, }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: { if (get_user(options, p)) return -EFAULT; if (options & WDIOS_DISABLECARD) { acq_stop(); retval = 0; } if (options & WDIOS_ENABLECARD) { acq_keepalive(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: acq_keepalive(); return 0; case WDIOC_GETTIMEOUT: return put_user(WATCHDOG_HEARTBEAT, p); default: return -ENOTTY; } } static int acq_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &acq_is_open)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Activate */ acq_keepalive(); return nonseekable_open(inode, file); } static int acq_close(struct inode *inode, struct file *file) { if (expect_close == 42) { acq_stop(); } else { printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); acq_keepalive(); } clear_bit(0, &acq_is_open); expect_close = 0; return 0; } /* * Kernel Interfaces */ static const struct file_operations acq_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = acq_write, .unlocked_ioctl = acq_ioctl, .open = acq_open, .release = acq_close, }; static struct miscdevice acq_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &acq_fops, }; /* * Init & exit routines */ static int __devinit acq_probe(struct platform_device *dev) { int ret; if (wdt_stop != wdt_start) { if (!request_region(wdt_stop, 1, WATCHDOG_NAME)) { printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", wdt_stop); ret = -EIO; goto out; } } if (!request_region(wdt_start, 1, WATCHDOG_NAME)) { printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", wdt_start); ret = -EIO; goto unreg_stop; } ret = misc_register(&acq_miscdev); if (ret != 0) { printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto unreg_regions; } printk(KERN_INFO PFX "initialized. (nowayout=%d)\n", nowayout); return 0; unreg_regions: release_region(wdt_start, 1); unreg_stop: if (wdt_stop != wdt_start) release_region(wdt_stop, 1); out: return ret; } static int __devexit acq_remove(struct platform_device *dev) { misc_deregister(&acq_miscdev); release_region(wdt_start, 1); if (wdt_stop != wdt_start) release_region(wdt_stop, 1); return 0; } static void acq_shutdown(struct platform_device *dev) { /* Turn the WDT off if we have a soft shutdown */ acq_stop(); } static struct platform_driver acquirewdt_driver = { .probe = acq_probe, .remove = __devexit_p(acq_remove), .shutdown = acq_shutdown, .driver = { .owner = THIS_MODULE, .name = DRV_NAME, }, }; static int __init acq_init(void) { int err; printk(KERN_INFO "WDT driver for Acquire single board computer initialising.\n"); err = platform_driver_register(&acquirewdt_driver); if (err) return err; acq_platform_device = platform_device_register_simple(DRV_NAME, -1, NULL, 0); if (IS_ERR(acq_platform_device)) { err = PTR_ERR(acq_platform_device); goto unreg_platform_driver; } return 0; unreg_platform_driver: platform_driver_unregister(&acquirewdt_driver); return err; } static void __exit acq_exit(void) { platform_device_unregister(acq_platform_device); platform_driver_unregister(&acquirewdt_driver); printk(KERN_INFO PFX "Watchdog Module Unloaded.\n"); } module_init(acq_init); module_exit(acq_exit); MODULE_AUTHOR("David Woodhouse"); MODULE_DESCRIPTION("Acquire Inc. Single Board Computer Watchdog Timer driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
stap75/android_kernel_huawei_viva
arch/mips/pmc-sierra/yosemite/ht-irq.c
3183
1874
/* * Copyright 2003 PMC-Sierra * Author: Manish Lachwani (lachwani@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/pci.h> /* * HT Bus fixup for the Titan * XXX IRQ values need to change based on the board layout */ void __init titan_ht_pcibios_fixup_bus(struct pci_bus *bus) { struct pci_bus *current_bus = bus; struct pci_dev *devices; struct list_head *devices_link; list_for_each(devices_link, &(current_bus->devices)) { devices = pci_dev_b(devices_link); if (devices == NULL) continue; } /* * PLX and SPKT related changes go here */ }
gpl-2.0
laufersteppenwolf/android_kernel_samsung_lt02
net/sched/act_api.c
4719
24047
/* * net/sched/act_api.c Packet action API. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Author: Jamal Hadi Salim * * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/err.h> #include <linux/module.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/sch_generic.h> #include <net/act_api.h> #include <net/netlink.h> void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) { unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); struct tcf_common **p1p; for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) { if (*p1p == p) { write_lock_bh(hinfo->lock); *p1p = p->tcfc_next; write_unlock_bh(hinfo->lock); gen_kill_estimator(&p->tcfc_bstats, &p->tcfc_rate_est); /* * gen_estimator est_timer() might access p->tcfc_lock * or bstats, wait a RCU grace period before freeing p */ kfree_rcu(p, tcfc_rcu); return; } } WARN_ON(1); } EXPORT_SYMBOL(tcf_hash_destroy); int tcf_hash_release(struct tcf_common *p, int bind, struct tcf_hashinfo *hinfo) { int ret = 0; if (p) { if (bind) p->tcfc_bindcnt--; p->tcfc_refcnt--; if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) { tcf_hash_destroy(p, hinfo); ret = 1; } } return ret; } EXPORT_SYMBOL(tcf_hash_release); static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, struct tc_action *a, struct tcf_hashinfo *hinfo) { struct tcf_common *p; int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; struct nlattr *nest; read_lock_bh(hinfo->lock); s_i = cb->args[0]; for (i = 0; i < (hinfo->hmask + 1); i++) { p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; for (; p; p = p->tcfc_next) { index++; if (index < s_i) continue; a->priv = p; a->order = n_i; nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; err = tcf_action_dump_1(skb, a, 0, 0); if (err < 0) { index--; nlmsg_trim(skb, nest); goto done; } nla_nest_end(skb, nest); n_i++; if (n_i >= TCA_ACT_MAX_PRIO) goto done; } } done: read_unlock_bh(hinfo->lock); if (n_i) cb->args[0] += n_i; return n_i; nla_put_failure: nla_nest_cancel(skb, nest); goto done; } static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, struct tcf_hashinfo *hinfo) { struct tcf_common *p, *s_p; struct nlattr *nest; int i = 0, n_i = 0; nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); for (i = 0; i < (hinfo->hmask + 1); i++) { p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; while (p != NULL) { s_p = p->tcfc_next; if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) module_put(a->ops->owner); n_i++; p = s_p; } } NLA_PUT_U32(skb, TCA_FCNT, n_i); nla_nest_end(skb, nest); return n_i; nla_put_failure: nla_nest_cancel(skb, nest); return -EINVAL; } int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, int type, struct tc_action *a) { struct tcf_hashinfo *hinfo = a->ops->hinfo; if (type == RTM_DELACTION) { return tcf_del_walker(skb, a, hinfo); } else if (type == RTM_GETACTION) { return tcf_dump_walker(skb, cb, a, hinfo); } else { WARN(1, "tcf_generic_walker: unknown action %d\n", type); return -EINVAL; } } EXPORT_SYMBOL(tcf_generic_walker); struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo) { struct tcf_common *p; read_lock_bh(hinfo->lock); for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p; p = p->tcfc_next) { if (p->tcfc_index == index) break; } read_unlock_bh(hinfo->lock); return p; } EXPORT_SYMBOL(tcf_hash_lookup); u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo) { u32 val = *idx_gen; do { if (++val == 0) val = 1; } while (tcf_hash_lookup(val, hinfo)); return (*idx_gen = val); } EXPORT_SYMBOL(tcf_hash_new_index); int tcf_hash_search(struct tc_action *a, u32 index) { struct tcf_hashinfo *hinfo = a->ops->hinfo; struct tcf_common *p = tcf_hash_lookup(index, hinfo); if (p) { a->priv = p; return 1; } return 0; } EXPORT_SYMBOL(tcf_hash_search); struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind, struct tcf_hashinfo *hinfo) { struct tcf_common *p = NULL; if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) { if (bind) p->tcfc_bindcnt++; p->tcfc_refcnt++; a->priv = p; } return p; } EXPORT_SYMBOL(tcf_hash_check); struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, int size, int bind, u32 *idx_gen, struct tcf_hashinfo *hinfo) { struct tcf_common *p = kzalloc(size, GFP_KERNEL); if (unlikely(!p)) return ERR_PTR(-ENOMEM); p->tcfc_refcnt = 1; if (bind) p->tcfc_bindcnt = 1; spin_lock_init(&p->tcfc_lock); p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo); p->tcfc_tm.install = jiffies; p->tcfc_tm.lastuse = jiffies; if (est) { int err = gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, &p->tcfc_lock, est); if (err) { kfree(p); return ERR_PTR(err); } } a->priv = (void *) p; return p; } EXPORT_SYMBOL(tcf_hash_create); void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo) { unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); write_lock_bh(hinfo->lock); p->tcfc_next = hinfo->htab[h]; hinfo->htab[h] = p; write_unlock_bh(hinfo->lock); } EXPORT_SYMBOL(tcf_hash_insert); static struct tc_action_ops *act_base = NULL; static DEFINE_RWLOCK(act_mod_lock); int tcf_register_action(struct tc_action_ops *act) { struct tc_action_ops *a, **ap; write_lock(&act_mod_lock); for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) { if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { write_unlock(&act_mod_lock); return -EEXIST; } } act->next = NULL; *ap = act; write_unlock(&act_mod_lock); return 0; } EXPORT_SYMBOL(tcf_register_action); int tcf_unregister_action(struct tc_action_ops *act) { struct tc_action_ops *a, **ap; int err = -ENOENT; write_lock(&act_mod_lock); for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) if (a == act) break; if (a) { *ap = a->next; a->next = NULL; err = 0; } write_unlock(&act_mod_lock); return err; } EXPORT_SYMBOL(tcf_unregister_action); /* lookup by name */ static struct tc_action_ops *tc_lookup_action_n(char *kind) { struct tc_action_ops *a = NULL; if (kind) { read_lock(&act_mod_lock); for (a = act_base; a; a = a->next) { if (strcmp(kind, a->kind) == 0) { if (!try_module_get(a->owner)) { read_unlock(&act_mod_lock); return NULL; } break; } } read_unlock(&act_mod_lock); } return a; } /* lookup by nlattr */ static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) { struct tc_action_ops *a = NULL; if (kind) { read_lock(&act_mod_lock); for (a = act_base; a; a = a->next) { if (nla_strcmp(kind, a->kind) == 0) { if (!try_module_get(a->owner)) { read_unlock(&act_mod_lock); return NULL; } break; } } read_unlock(&act_mod_lock); } return a; } #if 0 /* lookup by id */ static struct tc_action_ops *tc_lookup_action_id(u32 type) { struct tc_action_ops *a = NULL; if (type) { read_lock(&act_mod_lock); for (a = act_base; a; a = a->next) { if (a->type == type) { if (!try_module_get(a->owner)) { read_unlock(&act_mod_lock); return NULL; } break; } } read_unlock(&act_mod_lock); } return a; } #endif int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act, struct tcf_result *res) { const struct tc_action *a; int ret = -1; if (skb->tc_verd & TC_NCLS) { skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); ret = TC_ACT_OK; goto exec_done; } while ((a = act) != NULL) { repeat: if (a->ops && a->ops->act) { ret = a->ops->act(skb, a, res); if (TC_MUNGED & skb->tc_verd) { /* copied already, allow trampling */ skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); } if (ret == TC_ACT_REPEAT) goto repeat; /* we need a ttl - JHS */ if (ret != TC_ACT_PIPE) goto exec_done; } act = a->next; } exec_done: return ret; } EXPORT_SYMBOL(tcf_action_exec); void tcf_action_destroy(struct tc_action *act, int bind) { struct tc_action *a; for (a = act; a; a = act) { if (a->ops && a->ops->cleanup) { if (a->ops->cleanup(a, bind) == ACT_P_DELETED) module_put(a->ops->owner); act = act->next; kfree(a); } else { /*FIXME: Remove later - catch insertion bugs*/ WARN(1, "tcf_action_destroy: BUG? destroying NULL ops\n"); act = act->next; kfree(a); } } } int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { int err = -EINVAL; if (a->ops == NULL || a->ops->dump == NULL) return err; return a->ops->dump(skb, a, bind, ref); } int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { int err = -EINVAL; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; if (a->ops == NULL || a->ops->dump == NULL) return err; NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); if (tcf_action_copy_stats(skb, a, 0)) goto nla_put_failure; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; err = tcf_action_dump_old(skb, a, bind, ref); if (err > 0) { nla_nest_end(skb, nest); return err; } nla_put_failure: nlmsg_trim(skb, b); return -1; } EXPORT_SYMBOL(tcf_action_dump_1); int tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref) { struct tc_action *a; int err = -EINVAL; struct nlattr *nest; while ((a = act) != NULL) { act = a->next; nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; err = tcf_action_dump_1(skb, a, bind, ref); if (err < 0) goto errout; nla_nest_end(skb, nest); } return 0; nla_put_failure: err = -EINVAL; errout: nla_nest_cancel(skb, nest); return err; } struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind) { struct tc_action *a; struct tc_action_ops *a_o; char act_name[IFNAMSIZ]; struct nlattr *tb[TCA_ACT_MAX + 1]; struct nlattr *kind; int err; if (name == NULL) { err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; kind = tb[TCA_ACT_KIND]; if (kind == NULL) goto err_out; if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) goto err_out; } else { err = -EINVAL; if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) goto err_out; } a_o = tc_lookup_action_n(act_name); if (a_o == NULL) { #ifdef CONFIG_MODULES rtnl_unlock(); request_module("act_%s", act_name); rtnl_lock(); a_o = tc_lookup_action_n(act_name); /* We dropped the RTNL semaphore in order to * perform the module load. So, even if we * succeeded in loading the module we have to * tell the caller to replay the request. We * indicate this using -EAGAIN. */ if (a_o != NULL) { err = -EAGAIN; goto err_mod; } #endif err = -ENOENT; goto err_out; } err = -ENOMEM; a = kzalloc(sizeof(*a), GFP_KERNEL); if (a == NULL) goto err_mod; /* backward compatibility for policer */ if (name == NULL) err = a_o->init(tb[TCA_ACT_OPTIONS], est, a, ovr, bind); else err = a_o->init(nla, est, a, ovr, bind); if (err < 0) goto err_free; /* module count goes up only when brand new policy is created * if it exists and is only bound to in a_o->init() then * ACT_P_CREATED is not returned (a zero is). */ if (err != ACT_P_CREATED) module_put(a_o->owner); a->ops = a_o; return a; err_free: kfree(a); err_mod: module_put(a_o->owner); err_out: return ERR_PTR(err); } struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind) { struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct tc_action *head = NULL, *act, *act_prev = NULL; int err; int i; err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); if (err < 0) return ERR_PTR(err); for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { act = tcf_action_init_1(tb[i], est, name, ovr, bind); if (IS_ERR(act)) goto err; act->order = i; if (head == NULL) head = act; else act_prev->next = act; act_prev = act; } return head; err: if (head != NULL) tcf_action_destroy(head, bind); return act; } int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, int compat_mode) { int err = 0; struct gnet_dump d; struct tcf_act_hdr *h = a->priv; if (h == NULL) goto errout; /* compat_mode being true specifies a call that is supposed * to add additional backward compatibility statistic TLVs. */ if (compat_mode) { if (a->type == TCA_OLD_COMPAT) err = gnet_stats_start_copy_compat(skb, 0, TCA_STATS, TCA_XSTATS, &h->tcf_lock, &d); else return 0; } else err = gnet_stats_start_copy(skb, TCA_ACT_STATS, &h->tcf_lock, &d); if (err < 0) goto errout; if (a->ops != NULL && a->ops->get_stats != NULL) if (a->ops->get_stats(skb, a) < 0) goto errout; if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || gnet_stats_copy_rate_est(&d, &h->tcf_bstats, &h->tcf_rate_est) < 0 || gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) goto errout; if (gnet_stats_finish_copy(&d) < 0) goto errout; return 0; errout: return -1; } static int tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, u16 flags, int event, int bind, int ref) { struct tcamsg *t; struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto nla_put_failure; if (tcf_action_dump(skb, a, bind, ref) < 0) goto nla_put_failure; nla_nest_end(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nla_put_failure: nlmsg_failure: nlmsg_trim(skb, b); return -1; } static int act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n, struct tc_action *a, int event) { struct sk_buff *skb; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; if (tca_get_fill(skb, a, pid, n->nlmsg_seq, 0, event, 0, 0) <= 0) { kfree_skb(skb); return -EINVAL; } return rtnl_unicast(skb, net, pid); } static struct tc_action * tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid) { struct nlattr *tb[TCA_ACT_MAX + 1]; struct tc_action *a; int index; int err; err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; if (tb[TCA_ACT_INDEX] == NULL || nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) goto err_out; index = nla_get_u32(tb[TCA_ACT_INDEX]); err = -ENOMEM; a = kzalloc(sizeof(struct tc_action), GFP_KERNEL); if (a == NULL) goto err_out; err = -EINVAL; a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); if (a->ops == NULL) goto err_free; if (a->ops->lookup == NULL) goto err_mod; err = -ENOENT; if (a->ops->lookup(a, index) == 0) goto err_mod; module_put(a->ops->owner); return a; err_mod: module_put(a->ops->owner); err_free: kfree(a); err_out: return ERR_PTR(err); } static void cleanup_a(struct tc_action *act) { struct tc_action *a; for (a = act; a; a = act) { act = a->next; kfree(a); } } static struct tc_action *create_a(int i) { struct tc_action *act; act = kzalloc(sizeof(*act), GFP_KERNEL); if (act == NULL) { pr_debug("create_a: failed to alloc!\n"); return NULL; } act->order = i; return act; } static int tca_action_flush(struct net *net, struct nlattr *nla, struct nlmsghdr *n, u32 pid) { struct sk_buff *skb; unsigned char *b; struct nlmsghdr *nlh; struct tcamsg *t; struct netlink_callback dcb; struct nlattr *nest; struct nlattr *tb[TCA_ACT_MAX + 1]; struct nlattr *kind; struct tc_action *a = create_a(0); int err = -ENOMEM; if (a == NULL) { pr_debug("tca_action_flush: couldnt create tc_action\n"); return err; } skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { pr_debug("tca_action_flush: failed skb alloc\n"); kfree(a); return err; } b = skb_tail_pointer(skb); err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; kind = tb[TCA_ACT_KIND]; a->ops = tc_lookup_action(kind); if (a->ops == NULL) goto err_out; nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t)); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto nla_put_failure; err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); if (err < 0) goto nla_put_failure; if (err == 0) goto noflush_out; nla_nest_end(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; nlh->nlmsg_flags |= NLM_F_ROOT; module_put(a->ops->owner); kfree(a); err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); if (err > 0) return 0; return err; nla_put_failure: nlmsg_failure: module_put(a->ops->owner); err_out: noflush_out: kfree_skb(skb); kfree(a); return err; } static int tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) { int i, ret; struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct tc_action *head = NULL, *act, *act_prev = NULL; ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); if (ret < 0) return ret; if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { if (tb[1] != NULL) return tca_action_flush(net, tb[1], n, pid); else return -EINVAL; } for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { act = tcf_action_get_1(tb[i], n, pid); if (IS_ERR(act)) { ret = PTR_ERR(act); goto err; } act->order = i; if (head == NULL) head = act; else act_prev->next = act; act_prev = act; } if (event == RTM_GETACTION) ret = act_get_notify(net, pid, n, head, event); else { /* delete */ struct sk_buff *skb; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { ret = -ENOBUFS; goto err; } if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event, 0, 1) <= 0) { kfree_skb(skb); ret = -EINVAL; goto err; } /* now do the delete */ tcf_action_destroy(head, 0); ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); if (ret > 0) return 0; return ret; } err: cleanup_a(head); return ret; } static int tcf_add_notify(struct net *net, struct tc_action *a, u32 pid, u32 seq, int event, u16 flags) { struct tcamsg *t; struct nlmsghdr *nlh; struct sk_buff *skb; struct nlattr *nest; unsigned char *b; int err = 0; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; b = skb_tail_pointer(skb); nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto nla_put_failure; if (tcf_action_dump(skb, a, 0, 0) < 0) goto nla_put_failure; nla_nest_end(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; NETLINK_CB(skb).dst_group = RTNLGRP_TC; err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO); if (err > 0) err = 0; return err; nla_put_failure: nlmsg_failure: kfree_skb(skb); return -1; } static int tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr) { int ret = 0; struct tc_action *act; struct tc_action *a; u32 seq = n->nlmsg_seq; act = tcf_action_init(nla, NULL, NULL, ovr, 0); if (act == NULL) goto done; if (IS_ERR(act)) { ret = PTR_ERR(act); goto done; } /* dump then free all the actions after update; inserted policy * stays intact */ ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); for (a = act; a; a = act) { act = a->next; kfree(a); } done: return ret; } static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_ACT_MAX + 1]; u32 pid = skb ? NETLINK_CB(skb).pid : 0; int ret = 0, ovr = 0; ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); if (ret < 0) return ret; if (tca[TCA_ACT_TAB] == NULL) { pr_notice("tc_ctl_action: received NO action attribs\n"); return -EINVAL; } /* n->nlmsg_flags & NLM_F_CREATE */ switch (n->nlmsg_type) { case RTM_NEWACTION: /* we are going to assume all other flags * imply create only if it doesn't exist * Note that CREATE | EXCL implies that * but since we want avoid ambiguity (eg when flags * is zero) then just set this */ if (n->nlmsg_flags & NLM_F_REPLACE) ovr = 1; replay: ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr); if (ret == -EAGAIN) goto replay; break; case RTM_DELACTION: ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, pid, RTM_DELACTION); break; case RTM_GETACTION: ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, pid, RTM_GETACTION); break; default: BUG(); } return ret; } static struct nlattr * find_dump_kind(const struct nlmsghdr *n) { struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct nlattr *nla[TCAA_MAX + 1]; struct nlattr *kind; if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0) return NULL; tb1 = nla[TCA_ACT_TAB]; if (tb1 == NULL) return NULL; if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL) < 0) return NULL; if (tb[1] == NULL) return NULL; if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]), nla_len(tb[1]), NULL) < 0) return NULL; kind = tb2[TCA_ACT_KIND]; return kind; } static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) { struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; struct tc_action_ops *a_o; struct tc_action a; int ret = 0; struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); struct nlattr *kind = find_dump_kind(cb->nlh); if (kind == NULL) { pr_info("tc_dump_action: action bad kind\n"); return 0; } a_o = tc_lookup_action(kind); if (a_o == NULL) return 0; memset(&a, 0, sizeof(struct tc_action)); a.ops = a_o; if (a_o->walk == NULL) { WARN(1, "tc_dump_action: %s !capable of dumping table\n", a_o->kind); goto nla_put_failure; } nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, sizeof(*t)); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto nla_put_failure; ret = a_o->walk(skb, cb, RTM_GETACTION, &a); if (ret < 0) goto nla_put_failure; if (ret > 0) { nla_nest_end(skb, nest); ret = skb->len; } else nla_nest_cancel(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; if (NETLINK_CB(cb->skb).pid && ret) nlh->nlmsg_flags |= NLM_F_MULTI; module_put(a_o->owner); return skb->len; nla_put_failure: nlmsg_failure: module_put(a_o->owner); nlmsg_trim(skb, b); return skb->len; } static int __init tc_action_init(void) { rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, NULL); return 0; } subsys_initcall(tc_action_init);
gpl-2.0
yajnab/android_kernel_samsung_msm8625
drivers/tty/vt/selection.c
4975
9507
/* * This module exports the functions: * * 'int set_selection(struct tiocl_selection __user *, struct tty_struct *)' * 'void clear_selection(void)' * 'int paste_selection(struct tty_struct *)' * 'int sel_loadlut(char __user *)' * * Now that /dev/vcs exists, most of this can disappear again. */ #include <linux/module.h> #include <linux/tty.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/types.h> #include <asm/uaccess.h> #include <linux/kbd_kern.h> #include <linux/vt_kern.h> #include <linux/consolemap.h> #include <linux/selection.h> #include <linux/tiocl.h> #include <linux/console.h> /* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */ #define isspace(c) ((c) == ' ') extern void poke_blanked_console(void); /* FIXME: all this needs locking */ /* Variables for selection control. */ /* Use a dynamic buffer, instead of static (Dec 1994) */ struct vc_data *sel_cons; /* must not be deallocated */ static int use_unicode; static volatile int sel_start = -1; /* cleared by clear_selection */ static int sel_end; static int sel_buffer_lth; static char *sel_buffer; /* clear_selection, highlight and highlight_pointer can be called from interrupt (via scrollback/front) */ /* set reverse video on characters s-e of console with selection. */ static inline void highlight(const int s, const int e) { invert_screen(sel_cons, s, e-s+2, 1); } /* use complementary color to show the pointer */ static inline void highlight_pointer(const int where) { complement_pos(sel_cons, where); } static u16 sel_pos(int n) { return inverse_translate(sel_cons, screen_glyph(sel_cons, n), use_unicode); } /** * clear_selection - remove current selection * * Remove the current selection highlight, if any from the console * holding the selection. The caller must hold the console lock. */ void clear_selection(void) { highlight_pointer(-1); /* hide the pointer */ if (sel_start != -1) { highlight(sel_start, sel_end); sel_start = -1; } } /* * User settable table: what characters are to be considered alphabetic? * 256 bits. Locked by the console lock. */ static u32 inwordLut[8]={ 0x00000000, /* control chars */ 0x03FF0000, /* digits */ 0x87FFFFFE, /* uppercase and '_' */ 0x07FFFFFE, /* lowercase */ 0x00000000, 0x00000000, 0xFF7FFFFF, /* latin-1 accented letters, not multiplication sign */ 0xFF7FFFFF /* latin-1 accented letters, not division sign */ }; static inline int inword(const u16 c) { return c > 0xff || (( inwordLut[c>>5] >> (c & 0x1F) ) & 1); } /** * set loadlut - load the LUT table * @p: user table * * Load the LUT table from user space. The caller must hold the console * lock. Make a temporary copy so a partial update doesn't make a mess. */ int sel_loadlut(char __user *p) { u32 tmplut[8]; if (copy_from_user(tmplut, (u32 __user *)(p+4), 32)) return -EFAULT; memcpy(inwordLut, tmplut, 32); return 0; } /* does screen address p correspond to character at LH/RH edge of screen? */ static inline int atedge(const int p, int size_row) { return (!(p % size_row) || !((p + 2) % size_row)); } /* constrain v such that v <= u */ static inline unsigned short limit(const unsigned short v, const unsigned short u) { return (v > u) ? u : v; } /* stores the char in UTF8 and returns the number of bytes used (1-3) */ static int store_utf8(u16 c, char *p) { if (c < 0x80) { /* 0******* */ p[0] = c; return 1; } else if (c < 0x800) { /* 110***** 10****** */ p[0] = 0xc0 | (c >> 6); p[1] = 0x80 | (c & 0x3f); return 2; } else { /* 1110**** 10****** 10****** */ p[0] = 0xe0 | (c >> 12); p[1] = 0x80 | ((c >> 6) & 0x3f); p[2] = 0x80 | (c & 0x3f); return 3; } } /** * set_selection - set the current selection. * @sel: user selection info * @tty: the console tty * * Invoked by the ioctl handle for the vt layer. * * The entire selection process is managed under the console_lock. It's * a lot under the lock but its hardly a performance path */ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty) { struct vc_data *vc = vc_cons[fg_console].d; int sel_mode, new_sel_start, new_sel_end, spc; char *bp, *obp; int i, ps, pe, multiplier; u16 c; int mode; poke_blanked_console(); { unsigned short xs, ys, xe, ye; if (!access_ok(VERIFY_READ, sel, sizeof(*sel))) return -EFAULT; __get_user(xs, &sel->xs); __get_user(ys, &sel->ys); __get_user(xe, &sel->xe); __get_user(ye, &sel->ye); __get_user(sel_mode, &sel->sel_mode); xs--; ys--; xe--; ye--; xs = limit(xs, vc->vc_cols - 1); ys = limit(ys, vc->vc_rows - 1); xe = limit(xe, vc->vc_cols - 1); ye = limit(ye, vc->vc_rows - 1); ps = ys * vc->vc_size_row + (xs << 1); pe = ye * vc->vc_size_row + (xe << 1); if (sel_mode == TIOCL_SELCLEAR) { /* useful for screendump without selection highlights */ clear_selection(); return 0; } if (mouse_reporting() && (sel_mode & TIOCL_SELMOUSEREPORT)) { mouse_report(tty, sel_mode & TIOCL_SELBUTTONMASK, xs, ys); return 0; } } if (ps > pe) /* make sel_start <= sel_end */ { int tmp = ps; ps = pe; pe = tmp; } if (sel_cons != vc_cons[fg_console].d) { clear_selection(); sel_cons = vc_cons[fg_console].d; } mode = vt_do_kdgkbmode(fg_console); if (mode == K_UNICODE) use_unicode = 1; else use_unicode = 0; switch (sel_mode) { case TIOCL_SELCHAR: /* character-by-character selection */ new_sel_start = ps; new_sel_end = pe; break; case TIOCL_SELWORD: /* word-by-word selection */ spc = isspace(sel_pos(ps)); for (new_sel_start = ps; ; ps -= 2) { if ((spc && !isspace(sel_pos(ps))) || (!spc && !inword(sel_pos(ps)))) break; new_sel_start = ps; if (!(ps % vc->vc_size_row)) break; } spc = isspace(sel_pos(pe)); for (new_sel_end = pe; ; pe += 2) { if ((spc && !isspace(sel_pos(pe))) || (!spc && !inword(sel_pos(pe)))) break; new_sel_end = pe; if (!((pe + 2) % vc->vc_size_row)) break; } break; case TIOCL_SELLINE: /* line-by-line selection */ new_sel_start = ps - ps % vc->vc_size_row; new_sel_end = pe + vc->vc_size_row - pe % vc->vc_size_row - 2; break; case TIOCL_SELPOINTER: highlight_pointer(pe); return 0; default: return -EINVAL; } /* remove the pointer */ highlight_pointer(-1); /* select to end of line if on trailing space */ if (new_sel_end > new_sel_start && !atedge(new_sel_end, vc->vc_size_row) && isspace(sel_pos(new_sel_end))) { for (pe = new_sel_end + 2; ; pe += 2) if (!isspace(sel_pos(pe)) || atedge(pe, vc->vc_size_row)) break; if (isspace(sel_pos(pe))) new_sel_end = pe; } if (sel_start == -1) /* no current selection */ highlight(new_sel_start, new_sel_end); else if (new_sel_start == sel_start) { if (new_sel_end == sel_end) /* no action required */ return 0; else if (new_sel_end > sel_end) /* extend to right */ highlight(sel_end + 2, new_sel_end); else /* contract from right */ highlight(new_sel_end + 2, sel_end); } else if (new_sel_end == sel_end) { if (new_sel_start < sel_start) /* extend to left */ highlight(new_sel_start, sel_start - 2); else /* contract from left */ highlight(sel_start, new_sel_start - 2); } else /* some other case; start selection from scratch */ { clear_selection(); highlight(new_sel_start, new_sel_end); } sel_start = new_sel_start; sel_end = new_sel_end; /* Allocate a new buffer before freeing the old one ... */ multiplier = use_unicode ? 3 : 1; /* chars can take up to 3 bytes */ bp = kmalloc(((sel_end-sel_start)/2+1)*multiplier, GFP_KERNEL); if (!bp) { printk(KERN_WARNING "selection: kmalloc() failed\n"); clear_selection(); return -ENOMEM; } kfree(sel_buffer); sel_buffer = bp; obp = bp; for (i = sel_start; i <= sel_end; i += 2) { c = sel_pos(i); if (use_unicode) bp += store_utf8(c, bp); else *bp++ = c; if (!isspace(c)) obp = bp; if (! ((i + 2) % vc->vc_size_row)) { /* strip trailing blanks from line and add newline, unless non-space at end of line. */ if (obp != bp) { bp = obp; *bp++ = '\r'; } obp = bp; } } sel_buffer_lth = bp - sel_buffer; return 0; } /* Insert the contents of the selection buffer into the * queue of the tty associated with the current console. * Invoked by ioctl(). * * Locking: called without locks. Calls the ldisc wrongly with * unsafe methods, */ int paste_selection(struct tty_struct *tty) { struct vc_data *vc = tty->driver_data; int pasted = 0; unsigned int count; struct tty_ldisc *ld; DECLARE_WAITQUEUE(wait, current); console_lock(); poke_blanked_console(); console_unlock(); /* FIXME: wtf is this supposed to achieve ? */ ld = tty_ldisc_ref(tty); if (!ld) ld = tty_ldisc_ref_wait(tty); /* FIXME: this is completely unsafe */ add_wait_queue(&vc->paste_wait, &wait); while (sel_buffer && sel_buffer_lth > pasted) { set_current_state(TASK_INTERRUPTIBLE); if (test_bit(TTY_THROTTLED, &tty->flags)) { schedule(); continue; } count = sel_buffer_lth - pasted; count = min(count, tty->receive_room); tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted, NULL, count); pasted += count; } remove_wait_queue(&vc->paste_wait, &wait); __set_current_state(TASK_RUNNING); tty_ldisc_deref(ld); return 0; }
gpl-2.0
RadonX-ROM/paradox_kernel_oneplus_msm8974
arch/frv/mm/pgalloc.c
5231
4014
/* pgalloc.c: page directory & page table allocation * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/quicklist.h> #include <asm/pgalloc.h> #include <asm/page.h> #include <asm/cacheflush.h> pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE))); pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); if (pte) clear_page(pte); return pte; } pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { struct page *page; #ifdef CONFIG_HIGHPTE page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); #else page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); #endif if (page) { clear_highpage(page); pgtable_page_ctor(page); flush_dcache_page(page); } return page; } void __set_pmd(pmd_t *pmdptr, unsigned long pmd) { unsigned long *__ste_p = pmdptr->ste; int loop; if (!pmd) { memset(__ste_p, 0, PME_SIZE); } else { BUG_ON(pmd & (0x3f00 | xAMPRx_SS | 0xe)); for (loop = PME_SIZE; loop > 0; loop -= 4) { *__ste_p++ = pmd; pmd += __frv_PT_SIZE; } } frv_dcache_writeback((unsigned long) pmdptr, (unsigned long) (pmdptr + 1)); } /* * List of all pgd's needed for non-PAE so it can invalidate entries * in both cached and uncached pgd's; not needed for PAE since the * kernel pmd is shared. If PAE were not to share the pmd a similar * tactic would be needed. This is essentially codepath-based locking * against pageattr.c; it is the unique case in which a valid change * of kernel pagetables can't be lazily synchronized by vmalloc faults. * vmalloc faults work because attached pagetables are never freed. * If the locking proves to be non-performant, a ticketing scheme with * checks at dup_mmap(), exec(), and other mmlist addition points * could be used. The locking scheme was chosen on the basis of * manfred's recommendations and having no core impact whatsoever. * -- wli */ DEFINE_SPINLOCK(pgd_lock); struct page *pgd_list; static inline void pgd_list_add(pgd_t *pgd) { struct page *page = virt_to_page(pgd); page->index = (unsigned long) pgd_list; if (pgd_list) set_page_private(pgd_list, (unsigned long) &page->index); pgd_list = page; set_page_private(page, (unsigned long)&pgd_list); } static inline void pgd_list_del(pgd_t *pgd) { struct page *next, **pprev, *page = virt_to_page(pgd); next = (struct page *) page->index; pprev = (struct page **) page_private(page); *pprev = next; if (next) set_page_private(next, (unsigned long) pprev); } void pgd_ctor(void *pgd) { unsigned long flags; if (PTRS_PER_PMD == 1) spin_lock_irqsave(&pgd_lock, flags); memcpy((pgd_t *) pgd + USER_PGDS_IN_LAST_PML4, swapper_pg_dir + USER_PGDS_IN_LAST_PML4, (PTRS_PER_PGD - USER_PGDS_IN_LAST_PML4) * sizeof(pgd_t)); if (PTRS_PER_PMD > 1) return; pgd_list_add(pgd); spin_unlock_irqrestore(&pgd_lock, flags); memset(pgd, 0, USER_PGDS_IN_LAST_PML4 * sizeof(pgd_t)); } /* never called when PTRS_PER_PMD > 1 */ void pgd_dtor(void *pgd) { unsigned long flags; /* can be called from interrupt context */ spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); } pgd_t *pgd_alloc(struct mm_struct *mm) { return quicklist_alloc(0, GFP_KERNEL, pgd_ctor); } void pgd_free(struct mm_struct *mm, pgd_t *pgd) { /* in the non-PAE case, clear_page_tables() clears user pgd entries */ quicklist_free(0, pgd_dtor, pgd); } void __init pgtable_cache_init(void) { } void check_pgt_cache(void) { quicklist_trim(0, pgd_dtor, 25, 16); }
gpl-2.0
javelinanddart/UBER-L
drivers/staging/rtl8192u/r819xU_firmware.c
7791
12298
/************************************************************************************************** * Procedure: Init boot code/firmware code/data session * * Description: This routine will initialize firmware. If any error occurs during the initialization * process, the routine shall terminate immediately and return fail. * NIC driver should call NdisOpenFile only from MiniportInitialize. * * Arguments: The pointer of the adapter * Returns: * NDIS_STATUS_FAILURE - the following initialization process should be terminated * NDIS_STATUS_SUCCESS - if firmware initialization process success **************************************************************************************************/ #include "r8192U.h" #include "r8192U_hw.h" #include "r819xU_firmware_img.h" #include "r819xU_firmware.h" #include <linux/firmware.h> void firmware_init_param(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); rt_firmware *pfirmware = priv->pFirmware; pfirmware->cmdpacket_frag_thresold = GET_COMMAND_PACKET_FRAG_THRESHOLD(MAX_TRANSMIT_BUFFER_SIZE); } /* * segment the img and use the ptr and length to remember info on each segment * */ bool fw_download_code(struct net_device *dev, u8 *code_virtual_address, u32 buffer_len) { struct r8192_priv *priv = ieee80211_priv(dev); bool rt_status = true; u16 frag_threshold; u16 frag_length, frag_offset = 0; //u16 total_size; int i; rt_firmware *pfirmware = priv->pFirmware; struct sk_buff *skb; unsigned char *seg_ptr; cb_desc *tcb_desc; u8 bLastIniPkt; firmware_init_param(dev); //Fragmentation might be required frag_threshold = pfirmware->cmdpacket_frag_thresold; do { if((buffer_len - frag_offset) > frag_threshold) { frag_length = frag_threshold ; bLastIniPkt = 0; } else { frag_length = buffer_len - frag_offset; bLastIniPkt = 1; } /* Allocate skb buffer to contain firmware info and tx descriptor info * add 4 to avoid packet appending overflow. * */ #ifdef RTL8192U skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4); #else skb = dev_alloc_skb(frag_length + 4); #endif memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev)); tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->queue_index = TXCMD_QUEUE; tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT; tcb_desc->bLastIniPkt = bLastIniPkt; #ifdef RTL8192U skb_reserve(skb, USB_HWDESC_HEADER_LEN); #endif seg_ptr = skb->data; /* * Transform from little endian to big endian * and pending zero */ for(i=0 ; i < frag_length; i+=4) { *seg_ptr++ = ((i+0)<frag_length)?code_virtual_address[i+3]:0; *seg_ptr++ = ((i+1)<frag_length)?code_virtual_address[i+2]:0; *seg_ptr++ = ((i+2)<frag_length)?code_virtual_address[i+1]:0; *seg_ptr++ = ((i+3)<frag_length)?code_virtual_address[i+0]:0; } tcb_desc->txbuf_size= (u16)i; skb_put(skb, i); if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)|| (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\ (priv->ieee80211->queue_stop) ) { RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n"); skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb); } else { priv->ieee80211->softmac_hard_start_xmit(skb,dev); } code_virtual_address += frag_length; frag_offset += frag_length; }while(frag_offset < buffer_len); return rt_status; } bool fwSendNullPacket( struct net_device *dev, u32 Length ) { bool rtStatus = true; struct r8192_priv *priv = ieee80211_priv(dev); struct sk_buff *skb; cb_desc *tcb_desc; unsigned char *ptr_buf; bool bLastInitPacket = false; //PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK); //Get TCB and local buffer from common pool. (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) skb = dev_alloc_skb(Length+ 4); memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev)); tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->queue_index = TXCMD_QUEUE; tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT; tcb_desc->bLastIniPkt = bLastInitPacket; ptr_buf = skb_put(skb, Length); memset(ptr_buf,0,Length); tcb_desc->txbuf_size= (u16)Length; if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)|| (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\ (priv->ieee80211->queue_stop) ) { RT_TRACE(COMP_FIRMWARE,"===================NULL packet==================================> tx full!\n"); skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb); } else { priv->ieee80211->softmac_hard_start_xmit(skb,dev); } //PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK); return rtStatus; } //----------------------------------------------------------------------------- // Procedure: Check whether main code is download OK. If OK, turn on CPU // // Description: CPU register locates in different page against general register. // Switch to CPU register in the begin and switch back before return // // // Arguments: The pointer of the adapter // // Returns: // NDIS_STATUS_FAILURE - the following initialization process should be terminated // NDIS_STATUS_SUCCESS - if firmware initialization process success //----------------------------------------------------------------------------- bool CPUcheck_maincodeok_turnonCPU(struct net_device *dev) { bool rt_status = true; int check_putcodeOK_time = 200000, check_bootOk_time = 200000; u32 CPU_status = 0; /* Check whether put code OK */ do { CPU_status = read_nic_dword(dev, CPU_GEN); if(CPU_status&CPU_GEN_PUT_CODE_OK) break; }while(check_putcodeOK_time--); if(!(CPU_status&CPU_GEN_PUT_CODE_OK)) { RT_TRACE(COMP_ERR, "Download Firmware: Put code fail!\n"); goto CPUCheckMainCodeOKAndTurnOnCPU_Fail; } else { RT_TRACE(COMP_FIRMWARE, "Download Firmware: Put code ok!\n"); } /* Turn On CPU */ CPU_status = read_nic_dword(dev, CPU_GEN); write_nic_byte(dev, CPU_GEN, (u8)((CPU_status|CPU_GEN_PWR_STB_CPU)&0xff)); mdelay(1000); /* Check whether CPU boot OK */ do { CPU_status = read_nic_dword(dev, CPU_GEN); if(CPU_status&CPU_GEN_BOOT_RDY) break; }while(check_bootOk_time--); if(!(CPU_status&CPU_GEN_BOOT_RDY)) { goto CPUCheckMainCodeOKAndTurnOnCPU_Fail; } else { RT_TRACE(COMP_FIRMWARE, "Download Firmware: Boot ready!\n"); } return rt_status; CPUCheckMainCodeOKAndTurnOnCPU_Fail: RT_TRACE(COMP_ERR, "ERR in %s()\n", __FUNCTION__); rt_status = FALSE; return rt_status; } bool CPUcheck_firmware_ready(struct net_device *dev) { bool rt_status = true; int check_time = 200000; u32 CPU_status = 0; /* Check Firmware Ready */ do { CPU_status = read_nic_dword(dev, CPU_GEN); if(CPU_status&CPU_GEN_FIRM_RDY) break; }while(check_time--); if(!(CPU_status&CPU_GEN_FIRM_RDY)) goto CPUCheckFirmwareReady_Fail; else RT_TRACE(COMP_FIRMWARE, "Download Firmware: Firmware ready!\n"); return rt_status; CPUCheckFirmwareReady_Fail: RT_TRACE(COMP_ERR, "ERR in %s()\n", __FUNCTION__); rt_status = false; return rt_status; } bool init_firmware(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); bool rt_status = TRUE; u32 file_length = 0; u8 *mapped_file = NULL; u32 init_step = 0; opt_rst_type_e rst_opt = OPT_SYSTEM_RESET; firmware_init_step_e starting_state = FW_INIT_STEP0_BOOT; rt_firmware *pfirmware = priv->pFirmware; const struct firmware *fw_entry; const char *fw_name[3] = { "RTL8192U/boot.img", "RTL8192U/main.img", "RTL8192U/data.img"}; int rc; RT_TRACE(COMP_FIRMWARE, " PlatformInitFirmware()==>\n"); if (pfirmware->firmware_status == FW_STATUS_0_INIT ) { /* it is called by reset */ rst_opt = OPT_SYSTEM_RESET; starting_state = FW_INIT_STEP0_BOOT; // TODO: system reset }else if(pfirmware->firmware_status == FW_STATUS_5_READY) { /* it is called by Initialize */ rst_opt = OPT_FIRMWARE_RESET; starting_state = FW_INIT_STEP2_DATA; }else { RT_TRACE(COMP_FIRMWARE, "PlatformInitFirmware: undefined firmware state\n"); } /* * Download boot, main, and data image for System reset. * Download data image for firmware reseta */ for(init_step = starting_state; init_step <= FW_INIT_STEP2_DATA; init_step++) { /* * Open Image file, and map file to contineous memory if open file success. * or read image file from array. Default load from IMG file */ if(rst_opt == OPT_SYSTEM_RESET) { rc = request_firmware(&fw_entry, fw_name[init_step],&priv->udev->dev); if(rc < 0 ) { RT_TRACE(COMP_ERR, "request firmware fail!\n"); goto download_firmware_fail; } if(fw_entry->size > sizeof(pfirmware->firmware_buf)) { RT_TRACE(COMP_ERR, "img file size exceed the container buffer fail!\n"); goto download_firmware_fail; } if(init_step != FW_INIT_STEP1_MAIN) { memcpy(pfirmware->firmware_buf,fw_entry->data,fw_entry->size); mapped_file = pfirmware->firmware_buf; file_length = fw_entry->size; } else { #ifdef RTL8190P memcpy(pfirmware->firmware_buf,fw_entry->data,fw_entry->size); mapped_file = pfirmware->firmware_buf; file_length = fw_entry->size; #else memset(pfirmware->firmware_buf,0,128); memcpy(&pfirmware->firmware_buf[128],fw_entry->data,fw_entry->size); mapped_file = pfirmware->firmware_buf; file_length = fw_entry->size + 128; #endif } pfirmware->firmware_buf_size = file_length; }else if(rst_opt == OPT_FIRMWARE_RESET ) { /* we only need to download data.img here */ mapped_file = pfirmware->firmware_buf; file_length = pfirmware->firmware_buf_size; } /* Download image file */ /* The firmware download process is just as following, * 1. that is each packet will be segmented and inserted to the wait queue. * 2. each packet segment will be put in the skb_buff packet. * 3. each skb_buff packet data content will already include the firmware info * and Tx descriptor info * */ rt_status = fw_download_code(dev,mapped_file,file_length); if(rst_opt == OPT_SYSTEM_RESET) { release_firmware(fw_entry); } if(rt_status != TRUE) { goto download_firmware_fail; } switch(init_step) { case FW_INIT_STEP0_BOOT: /* Download boot * initialize command descriptor. * will set polling bit when firmware code is also configured */ pfirmware->firmware_status = FW_STATUS_1_MOVE_BOOT_CODE; #ifdef RTL8190P // To initialize IMEM, CPU move code from 0x80000080, hence, we send 0x80 byte packet rt_status = fwSendNullPacket(dev, RTL8190_CPU_START_OFFSET); if(rt_status != true) { RT_TRACE(COMP_INIT, "fwSendNullPacket() fail ! \n"); goto download_firmware_fail; } #endif //mdelay(1000); /* * To initialize IMEM, CPU move code from 0x80000080, * hence, we send 0x80 byte packet */ break; case FW_INIT_STEP1_MAIN: /* Download firmware code. Wait until Boot Ready and Turn on CPU */ pfirmware->firmware_status = FW_STATUS_2_MOVE_MAIN_CODE; /* Check Put Code OK and Turn On CPU */ rt_status = CPUcheck_maincodeok_turnonCPU(dev); if(rt_status != TRUE) { RT_TRACE(COMP_ERR, "CPUcheck_maincodeok_turnonCPU fail!\n"); goto download_firmware_fail; } pfirmware->firmware_status = FW_STATUS_3_TURNON_CPU; break; case FW_INIT_STEP2_DATA: /* download initial data code */ pfirmware->firmware_status = FW_STATUS_4_MOVE_DATA_CODE; mdelay(1); rt_status = CPUcheck_firmware_ready(dev); if(rt_status != TRUE) { RT_TRACE(COMP_ERR, "CPUcheck_firmware_ready fail(%d)!\n",rt_status); goto download_firmware_fail; } /* wait until data code is initialized ready.*/ pfirmware->firmware_status = FW_STATUS_5_READY; break; } } RT_TRACE(COMP_FIRMWARE, "Firmware Download Success\n"); //assert(pfirmware->firmware_status == FW_STATUS_5_READY, ("Firmware Download Fail\n")); return rt_status; download_firmware_fail: RT_TRACE(COMP_ERR, "ERR in %s()\n", __FUNCTION__); rt_status = FALSE; return rt_status; } MODULE_FIRMWARE("RTL8192U/boot.img"); MODULE_FIRMWARE("RTL8192U/main.img"); MODULE_FIRMWARE("RTL8192U/data.img");
gpl-2.0
bshiznit/android_kernel_htc_flounder
arch/sh/kernel/cpu/clock.c
11887
1152
/* * arch/sh/kernel/cpu/clock.c - SuperH clock framework * * Copyright (C) 2005 - 2009 Paul Mundt * * This clock framework is derived from the OMAP version by: * * Copyright (C) 2004 - 2008 Nokia Corporation * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> * * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/clk.h> #include <asm/clock.h> #include <asm/machvec.h> int __init clk_init(void) { int ret; ret = arch_clk_init(); if (unlikely(ret)) { pr_err("%s: CPU clock registration failed.\n", __func__); return ret; } if (sh_mv.mv_clk_init) { ret = sh_mv.mv_clk_init(); if (unlikely(ret)) { pr_err("%s: machvec clock initialization failed.\n", __func__); return ret; } } /* Kick the child clocks.. */ recalculate_root_clocks(); /* Enable the necessary init clocks */ clk_enable_init_clocks(); return ret; }
gpl-2.0
codesnake/linux-amlogic
net/irda/irnet/irnet_irda.c
12911
56923
/* * IrNET protocol module : Synchronous PPP over an IrDA socket. * * Jean II - HPL `00 - <jt@hpl.hp.com> * * This file implement the IRDA interface of IrNET. * Basically, we sit on top of IrTTP. We set up IrTTP, IrIAS properly, * and exchange frames with IrTTP. */ #include "irnet_irda.h" /* Private header */ #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <asm/unaligned.h> /* * PPP disconnect work: we need to make sure we're in * process context when calling ppp_unregister_channel(). */ static void irnet_ppp_disconnect(struct work_struct *work) { irnet_socket * self = container_of(work, irnet_socket, disconnect_work); if (self == NULL) return; /* * If we were connected, cleanup & close the PPP * channel, which will kill pppd (hangup) and the rest. */ if (self->ppp_open && !self->ttp_open && !self->ttp_connect) { ppp_unregister_channel(&self->chan); self->ppp_open = 0; } } /************************* CONTROL CHANNEL *************************/ /* * When ppp is not active, /dev/irnet act as a control channel. * Writing allow to set up the IrDA destination of the IrNET channel, * and any application may be read events happening on IrNET... */ /*------------------------------------------------------------------*/ /* * Post an event to the control channel... * Put the event in the log, and then wait all process blocked on read * so they can read the log... */ static void irnet_post_event(irnet_socket * ap, irnet_event event, __u32 saddr, __u32 daddr, char * name, __u16 hints) { int index; /* In the log */ DENTER(CTRL_TRACE, "(ap=0x%p, event=%d, daddr=%08x, name=``%s'')\n", ap, event, daddr, name); /* Protect this section via spinlock. * Note : as we are the only event producer, we only need to exclude * ourself when touching the log, which is nice and easy. */ spin_lock_bh(&irnet_events.spinlock); /* Copy the event in the log */ index = irnet_events.index; irnet_events.log[index].event = event; irnet_events.log[index].daddr = daddr; irnet_events.log[index].saddr = saddr; /* Try to copy IrDA nickname */ if(name) strcpy(irnet_events.log[index].name, name); else irnet_events.log[index].name[0] = '\0'; /* Copy hints */ irnet_events.log[index].hints.word = hints; /* Try to get ppp unit number */ if((ap != (irnet_socket *) NULL) && (ap->ppp_open)) irnet_events.log[index].unit = ppp_unit_number(&ap->chan); else irnet_events.log[index].unit = -1; /* Increment the index * Note that we increment the index only after the event is written, * to make sure that the readers don't get garbage... */ irnet_events.index = (index + 1) % IRNET_MAX_EVENTS; DEBUG(CTRL_INFO, "New event index is %d\n", irnet_events.index); /* Spin lock end */ spin_unlock_bh(&irnet_events.spinlock); /* Now : wake up everybody waiting for events... */ wake_up_interruptible_all(&irnet_events.rwait); DEXIT(CTRL_TRACE, "\n"); } /************************* IRDA SUBROUTINES *************************/ /* * These are a bunch of subroutines called from other functions * down there, mostly common code or to improve readability... * * Note : we duplicate quite heavily some routines of af_irda.c, * because our input structure (self) is quite different * (struct irnet instead of struct irda_sock), which make sharing * the same code impossible (at least, without templates). */ /*------------------------------------------------------------------*/ /* * Function irda_open_tsap (self) * * Open local Transport Service Access Point (TSAP) * * Create a IrTTP instance for us and set all the IrTTP callbacks. */ static inline int irnet_open_tsap(irnet_socket * self) { notify_t notify; /* Callback structure */ DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self); DABORT(self->tsap != NULL, -EBUSY, IRDA_SR_ERROR, "Already busy !\n"); /* Initialize IrTTP callbacks to be used by the IrDA stack */ irda_notify_init(&notify); notify.connect_confirm = irnet_connect_confirm; notify.connect_indication = irnet_connect_indication; notify.disconnect_indication = irnet_disconnect_indication; notify.data_indication = irnet_data_indication; /*notify.udata_indication = NULL;*/ notify.flow_indication = irnet_flow_indication; notify.status_indication = irnet_status_indication; notify.instance = self; strlcpy(notify.name, IRNET_NOTIFY_NAME, sizeof(notify.name)); /* Open an IrTTP instance */ self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify); DABORT(self->tsap == NULL, -ENOMEM, IRDA_SR_ERROR, "Unable to allocate TSAP !\n"); /* Remember which TSAP selector we actually got */ self->stsap_sel = self->tsap->stsap_sel; DEXIT(IRDA_SR_TRACE, " - tsap=0x%p, sel=0x%X\n", self->tsap, self->stsap_sel); return 0; } /*------------------------------------------------------------------*/ /* * Function irnet_ias_to_tsap (self, result, value) * * Examine an IAS object and extract TSAP * * We do an IAP query to find the TSAP associated with the IrNET service. * When IrIAP pass us the result of the query, this function look at * the return values to check for failures and extract the TSAP if * possible. * Also deallocate value * The failure is in self->errno * Return TSAP or -1 */ static inline __u8 irnet_ias_to_tsap(irnet_socket * self, int result, struct ias_value * value) { __u8 dtsap_sel = 0; /* TSAP we are looking for */ DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self); /* By default, no error */ self->errno = 0; /* Check if request succeeded */ switch(result) { /* Standard errors : service not available */ case IAS_CLASS_UNKNOWN: case IAS_ATTRIB_UNKNOWN: DEBUG(IRDA_SR_INFO, "IAS object doesn't exist ! (%d)\n", result); self->errno = -EADDRNOTAVAIL; break; /* Other errors, most likely IrDA stack failure */ default : DEBUG(IRDA_SR_INFO, "IAS query failed ! (%d)\n", result); self->errno = -EHOSTUNREACH; break; /* Success : we got what we wanted */ case IAS_SUCCESS: break; } /* Check what was returned to us */ if(value != NULL) { /* What type of argument have we got ? */ switch(value->type) { case IAS_INTEGER: DEBUG(IRDA_SR_INFO, "result=%d\n", value->t.integer); if(value->t.integer != -1) /* Get the remote TSAP selector */ dtsap_sel = value->t.integer; else self->errno = -EADDRNOTAVAIL; break; default: self->errno = -EADDRNOTAVAIL; DERROR(IRDA_SR_ERROR, "bad type ! (0x%X)\n", value->type); break; } /* Cleanup */ irias_delete_value(value); } else /* value == NULL */ { /* Nothing returned to us - usually result != SUCCESS */ if(!(self->errno)) { DERROR(IRDA_SR_ERROR, "IrDA bug : result == SUCCESS && value == NULL\n"); self->errno = -EHOSTUNREACH; } } DEXIT(IRDA_SR_TRACE, "\n"); /* Return the TSAP */ return dtsap_sel; } /*------------------------------------------------------------------*/ /* * Function irnet_find_lsap_sel (self) * * Try to lookup LSAP selector in remote LM-IAS * * Basically, we start a IAP query, and then go to sleep. When the query * return, irnet_getvalue_confirm will wake us up, and we can examine the * result of the query... * Note that in some case, the query fail even before we go to sleep, * creating some races... */ static inline int irnet_find_lsap_sel(irnet_socket * self) { DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self); /* This should not happen */ DABORT(self->iriap, -EBUSY, IRDA_SR_ERROR, "busy with a previous query.\n"); /* Create an IAP instance, will be closed in irnet_getvalue_confirm() */ self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, irnet_getvalue_confirm); /* Treat unexpected signals as disconnect */ self->errno = -EHOSTUNREACH; /* Query remote LM-IAS */ iriap_getvaluebyclass_request(self->iriap, self->rsaddr, self->daddr, IRNET_SERVICE_NAME, IRNET_IAS_VALUE); /* The above request is non-blocking. * After a while, IrDA will call us back in irnet_getvalue_confirm() * We will then call irnet_ias_to_tsap() and finish the * connection procedure */ DEXIT(IRDA_SR_TRACE, "\n"); return 0; } /*------------------------------------------------------------------*/ /* * Function irnet_connect_tsap (self) * * Initialise the TTP socket and initiate TTP connection * */ static inline int irnet_connect_tsap(irnet_socket * self) { int err; DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self); /* Open a local TSAP (an IrTTP instance) */ err = irnet_open_tsap(self); if(err != 0) { clear_bit(0, &self->ttp_connect); DERROR(IRDA_SR_ERROR, "connect aborted!\n"); return err; } /* Connect to remote device */ err = irttp_connect_request(self->tsap, self->dtsap_sel, self->rsaddr, self->daddr, NULL, self->max_sdu_size_rx, NULL); if(err != 0) { clear_bit(0, &self->ttp_connect); DERROR(IRDA_SR_ERROR, "connect aborted!\n"); return err; } /* The above call is non-blocking. * After a while, the IrDA stack will either call us back in * irnet_connect_confirm() or irnet_disconnect_indication() * See you there ;-) */ DEXIT(IRDA_SR_TRACE, "\n"); return err; } /*------------------------------------------------------------------*/ /* * Function irnet_discover_next_daddr (self) * * Query the IrNET TSAP of the next device in the log. * * Used in the TSAP discovery procedure. */ static inline int irnet_discover_next_daddr(irnet_socket * self) { /* Close the last instance of IrIAP, and open a new one. * We can't reuse the IrIAP instance in the IrIAP callback */ if(self->iriap) { iriap_close(self->iriap); self->iriap = NULL; } /* Create a new IAP instance */ self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, irnet_discovervalue_confirm); if(self->iriap == NULL) return -ENOMEM; /* Next discovery - before the call to avoid races */ self->disco_index++; /* Check if we have one more address to try */ if(self->disco_index < self->disco_number) { /* Query remote LM-IAS */ iriap_getvaluebyclass_request(self->iriap, self->discoveries[self->disco_index].saddr, self->discoveries[self->disco_index].daddr, IRNET_SERVICE_NAME, IRNET_IAS_VALUE); /* The above request is non-blocking. * After a while, IrDA will call us back in irnet_discovervalue_confirm() * We will then call irnet_ias_to_tsap() and come back here again... */ return 0; } else return 1; } /*------------------------------------------------------------------*/ /* * Function irnet_discover_daddr_and_lsap_sel (self) * * This try to find a device with the requested service. * * Initiate a TSAP discovery procedure. * It basically look into the discovery log. For each address in the list, * it queries the LM-IAS of the device to find if this device offer * the requested service. * If there is more than one node supporting the service, we complain * to the user (it should move devices around). * If we find one node which have the requested TSAP, we connect to it. * * This function just start the whole procedure. It request the discovery * log and submit the first IAS query. * The bulk of the job is handled in irnet_discovervalue_confirm() * * Note : this procedure fails if there is more than one device in range * on the same dongle, because IrLMP doesn't disconnect the LAP when the * last LSAP is closed. Moreover, we would need to wait the LAP * disconnection... */ static inline int irnet_discover_daddr_and_lsap_sel(irnet_socket * self) { int ret; DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self); /* Ask lmp for the current discovery log */ self->discoveries = irlmp_get_discoveries(&self->disco_number, self->mask, DISCOVERY_DEFAULT_SLOTS); /* Check if the we got some results */ if(self->discoveries == NULL) { self->disco_number = -1; clear_bit(0, &self->ttp_connect); DRETURN(-ENETUNREACH, IRDA_SR_INFO, "No Cachelog...\n"); } DEBUG(IRDA_SR_INFO, "Got the log (0x%p), size is %d\n", self->discoveries, self->disco_number); /* Start with the first discovery */ self->disco_index = -1; self->daddr = DEV_ADDR_ANY; /* This will fail if the log is empty - this is non-blocking */ ret = irnet_discover_next_daddr(self); if(ret) { /* Close IAP */ if(self->iriap) iriap_close(self->iriap); self->iriap = NULL; /* Cleanup our copy of the discovery log */ kfree(self->discoveries); self->discoveries = NULL; clear_bit(0, &self->ttp_connect); DRETURN(-ENETUNREACH, IRDA_SR_INFO, "Cachelog empty...\n"); } /* Follow me in irnet_discovervalue_confirm() */ DEXIT(IRDA_SR_TRACE, "\n"); return 0; } /*------------------------------------------------------------------*/ /* * Function irnet_dname_to_daddr (self) * * Convert an IrDA nickname to a valid IrDA address * * It basically look into the discovery log until there is a match. */ static inline int irnet_dname_to_daddr(irnet_socket * self) { struct irda_device_info *discoveries; /* Copy of the discovery log */ int number; /* Number of nodes in the log */ int i; DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self); /* Ask lmp for the current discovery log */ discoveries = irlmp_get_discoveries(&number, 0xffff, DISCOVERY_DEFAULT_SLOTS); /* Check if the we got some results */ if(discoveries == NULL) DRETURN(-ENETUNREACH, IRDA_SR_INFO, "Cachelog empty...\n"); /* * Now, check all discovered devices (if any), and connect * client only about the services that the client is * interested in... */ for(i = 0; i < number; i++) { /* Does the name match ? */ if(!strncmp(discoveries[i].info, self->rname, NICKNAME_MAX_LEN)) { /* Yes !!! Get it.. */ self->daddr = discoveries[i].daddr; DEBUG(IRDA_SR_INFO, "discovered device ``%s'' at address 0x%08x.\n", self->rname, self->daddr); kfree(discoveries); DEXIT(IRDA_SR_TRACE, "\n"); return 0; } } /* No luck ! */ DEBUG(IRDA_SR_INFO, "cannot discover device ``%s'' !!!\n", self->rname); kfree(discoveries); return -EADDRNOTAVAIL; } /************************* SOCKET ROUTINES *************************/ /* * This are the main operations on IrNET sockets, basically to create * and destroy IrNET sockets. These are called from the PPP part... */ /*------------------------------------------------------------------*/ /* * Create a IrNET instance : just initialise some parameters... */ int irda_irnet_create(irnet_socket * self) { DENTER(IRDA_SOCK_TRACE, "(self=0x%p)\n", self); self->magic = IRNET_MAGIC; /* Paranoia */ self->ttp_open = 0; /* Prevent higher layer from accessing IrTTP */ self->ttp_connect = 0; /* Not connecting yet */ self->rname[0] = '\0'; /* May be set via control channel */ self->rdaddr = DEV_ADDR_ANY; /* May be set via control channel */ self->rsaddr = DEV_ADDR_ANY; /* May be set via control channel */ self->daddr = DEV_ADDR_ANY; /* Until we get connected */ self->saddr = DEV_ADDR_ANY; /* Until we get connected */ self->max_sdu_size_rx = TTP_SAR_UNBOUND; /* Register as a client with IrLMP */ self->ckey = irlmp_register_client(0, NULL, NULL, NULL); #ifdef DISCOVERY_NOMASK self->mask = 0xffff; /* For W2k compatibility */ #else /* DISCOVERY_NOMASK */ self->mask = irlmp_service_to_hint(S_LAN); #endif /* DISCOVERY_NOMASK */ self->tx_flow = FLOW_START; /* Flow control from IrTTP */ INIT_WORK(&self->disconnect_work, irnet_ppp_disconnect); DEXIT(IRDA_SOCK_TRACE, "\n"); return 0; } /*------------------------------------------------------------------*/ /* * Connect to the other side : * o convert device name to an address * o find the socket number (dlsap) * o Establish the connection * * Note : We no longer mimic af_irda. The IAS query for finding the TSAP * is done asynchronously, like the TTP connection. This allow us to * call this function from any context (not only process). * The downside is that following what's happening in there is tricky * because it involve various functions all over the place... */ int irda_irnet_connect(irnet_socket * self) { int err; DENTER(IRDA_SOCK_TRACE, "(self=0x%p)\n", self); /* Check if we are already trying to connect. * Because irda_irnet_connect() can be called directly by pppd plus * packet retries in ppp_generic and connect may take time, plus we may * race with irnet_connect_indication(), we need to be careful there... */ if(test_and_set_bit(0, &self->ttp_connect)) DRETURN(-EBUSY, IRDA_SOCK_INFO, "Already connecting...\n"); if((self->iriap != NULL) || (self->tsap != NULL)) DERROR(IRDA_SOCK_ERROR, "Socket not cleaned up...\n"); /* Insert ourselves in the hashbin so that the IrNET server can find us. * Notes : 4th arg is string of 32 char max and must be null terminated * When 4th arg is used (string), 3rd arg isn't (int) * Can't re-insert (MUST remove first) so check for that... */ if((irnet_server.running) && (self->q.q_next == NULL)) { spin_lock_bh(&irnet_server.spinlock); hashbin_insert(irnet_server.list, (irda_queue_t *) self, 0, self->rname); spin_unlock_bh(&irnet_server.spinlock); DEBUG(IRDA_SOCK_INFO, "Inserted ``%s'' in hashbin...\n", self->rname); } /* If we don't have anything (no address, no name) */ if((self->rdaddr == DEV_ADDR_ANY) && (self->rname[0] == '\0')) { /* Try to find a suitable address */ if((err = irnet_discover_daddr_and_lsap_sel(self)) != 0) DRETURN(err, IRDA_SOCK_INFO, "auto-connect failed!\n"); /* In most cases, the call above is non-blocking */ } else { /* If we have only the name (no address), try to get an address */ if(self->rdaddr == DEV_ADDR_ANY) { if((err = irnet_dname_to_daddr(self)) != 0) DRETURN(err, IRDA_SOCK_INFO, "name connect failed!\n"); } else /* Use the requested destination address */ self->daddr = self->rdaddr; /* Query remote LM-IAS to find LSAP selector */ irnet_find_lsap_sel(self); /* The above call is non blocking */ } /* At this point, we are waiting for the IrDA stack to call us back, * or we have already failed. * We will finish the connection procedure in irnet_connect_tsap(). */ DEXIT(IRDA_SOCK_TRACE, "\n"); return 0; } /*------------------------------------------------------------------*/ /* * Function irda_irnet_destroy(self) * * Destroy irnet instance * * Note : this need to be called from a process context. */ void irda_irnet_destroy(irnet_socket * self) { DENTER(IRDA_SOCK_TRACE, "(self=0x%p)\n", self); if(self == NULL) return; /* Remove ourselves from hashbin (if we are queued in hashbin) * Note : `irnet_server.running' protect us from calls in hashbin_delete() */ if((irnet_server.running) && (self->q.q_next != NULL)) { struct irnet_socket * entry; DEBUG(IRDA_SOCK_INFO, "Removing from hash..\n"); spin_lock_bh(&irnet_server.spinlock); entry = hashbin_remove_this(irnet_server.list, (irda_queue_t *) self); self->q.q_next = NULL; spin_unlock_bh(&irnet_server.spinlock); DASSERT(entry == self, , IRDA_SOCK_ERROR, "Can't remove from hash.\n"); } /* If we were connected, post a message */ if(test_bit(0, &self->ttp_open)) { /* Note : as the disconnect comes from ppp_generic, the unit number * doesn't exist anymore when we post the event, so we need to pass * NULL as the first arg... */ irnet_post_event(NULL, IRNET_DISCONNECT_TO, self->saddr, self->daddr, self->rname, 0); } /* Prevent various IrDA callbacks from messing up things * Need to be first */ clear_bit(0, &self->ttp_connect); /* Prevent higher layer from accessing IrTTP */ clear_bit(0, &self->ttp_open); /* Unregister with IrLMP */ irlmp_unregister_client(self->ckey); /* Unregister with LM-IAS */ if(self->iriap) { iriap_close(self->iriap); self->iriap = NULL; } /* Cleanup eventual discoveries from connection attempt or control channel */ if(self->discoveries != NULL) { /* Cleanup our copy of the discovery log */ kfree(self->discoveries); self->discoveries = NULL; } /* Close our IrTTP connection */ if(self->tsap) { DEBUG(IRDA_SOCK_INFO, "Closing our TTP connection.\n"); irttp_disconnect_request(self->tsap, NULL, P_NORMAL); irttp_close_tsap(self->tsap); self->tsap = NULL; } self->stsap_sel = 0; DEXIT(IRDA_SOCK_TRACE, "\n"); } /************************** SERVER SOCKET **************************/ /* * The IrNET service is composed of one server socket and a variable * number of regular IrNET sockets. The server socket is supposed to * handle incoming connections and redirect them to one IrNET sockets. * It's a superset of the regular IrNET socket, but has a very distinct * behaviour... */ /*------------------------------------------------------------------*/ /* * Function irnet_daddr_to_dname (self) * * Convert an IrDA address to a IrDA nickname * * It basically look into the discovery log until there is a match. */ static inline int irnet_daddr_to_dname(irnet_socket * self) { struct irda_device_info *discoveries; /* Copy of the discovery log */ int number; /* Number of nodes in the log */ int i; DENTER(IRDA_SERV_TRACE, "(self=0x%p)\n", self); /* Ask lmp for the current discovery log */ discoveries = irlmp_get_discoveries(&number, 0xffff, DISCOVERY_DEFAULT_SLOTS); /* Check if the we got some results */ if (discoveries == NULL) DRETURN(-ENETUNREACH, IRDA_SERV_INFO, "Cachelog empty...\n"); /* Now, check all discovered devices (if any) */ for(i = 0; i < number; i++) { /* Does the name match ? */ if(discoveries[i].daddr == self->daddr) { /* Yes !!! Get it.. */ strlcpy(self->rname, discoveries[i].info, sizeof(self->rname)); self->rname[sizeof(self->rname) - 1] = '\0'; DEBUG(IRDA_SERV_INFO, "Device 0x%08x is in fact ``%s''.\n", self->daddr, self->rname); kfree(discoveries); DEXIT(IRDA_SERV_TRACE, "\n"); return 0; } } /* No luck ! */ DEXIT(IRDA_SERV_INFO, ": cannot discover device 0x%08x !!!\n", self->daddr); kfree(discoveries); return -EADDRNOTAVAIL; } /*------------------------------------------------------------------*/ /* * Function irda_find_socket (self) * * Find the correct IrNET socket * * Look into the list of IrNET sockets and finds one with the right * properties... */ static inline irnet_socket * irnet_find_socket(irnet_socket * self) { irnet_socket * new = (irnet_socket *) NULL; int err; DENTER(IRDA_SERV_TRACE, "(self=0x%p)\n", self); /* Get the addresses of the requester */ self->daddr = irttp_get_daddr(self->tsap); self->saddr = irttp_get_saddr(self->tsap); /* Try to get the IrDA nickname of the requester */ err = irnet_daddr_to_dname(self); /* Protect access to the instance list */ spin_lock_bh(&irnet_server.spinlock); /* So now, try to get an socket having specifically * requested that nickname */ if(err == 0) { new = (irnet_socket *) hashbin_find(irnet_server.list, 0, self->rname); if(new) DEBUG(IRDA_SERV_INFO, "Socket 0x%p matches rname ``%s''.\n", new, new->rname); } /* If no name matches, try to find an socket by the destination address */ /* It can be either the requested destination address (set via the * control channel), or the current destination address if the * socket is in the middle of a connection request */ if(new == (irnet_socket *) NULL) { new = (irnet_socket *) hashbin_get_first(irnet_server.list); while(new !=(irnet_socket *) NULL) { /* Does it have the same address ? */ if((new->rdaddr == self->daddr) || (new->daddr == self->daddr)) { /* Yes !!! Get it.. */ DEBUG(IRDA_SERV_INFO, "Socket 0x%p matches daddr %#08x.\n", new, self->daddr); break; } new = (irnet_socket *) hashbin_get_next(irnet_server.list); } } /* If we don't have any socket, get the first unconnected socket */ if(new == (irnet_socket *) NULL) { new = (irnet_socket *) hashbin_get_first(irnet_server.list); while(new !=(irnet_socket *) NULL) { /* Is it available ? */ if(!(test_bit(0, &new->ttp_open)) && (new->rdaddr == DEV_ADDR_ANY) && (new->rname[0] == '\0') && (new->ppp_open)) { /* Yes !!! Get it.. */ DEBUG(IRDA_SERV_INFO, "Socket 0x%p is free.\n", new); break; } new = (irnet_socket *) hashbin_get_next(irnet_server.list); } } /* Spin lock end */ spin_unlock_bh(&irnet_server.spinlock); DEXIT(IRDA_SERV_TRACE, " - new = 0x%p\n", new); return new; } /*------------------------------------------------------------------*/ /* * Function irda_connect_socket (self) * * Connect an incoming connection to the socket * */ static inline int irnet_connect_socket(irnet_socket * server, irnet_socket * new, struct qos_info * qos, __u32 max_sdu_size, __u8 max_header_size) { DENTER(IRDA_SERV_TRACE, "(server=0x%p, new=0x%p)\n", server, new); /* Now attach up the new socket */ new->tsap = irttp_dup(server->tsap, new); DABORT(new->tsap == NULL, -1, IRDA_SERV_ERROR, "dup failed!\n"); /* Set up all the relevant parameters on the new socket */ new->stsap_sel = new->tsap->stsap_sel; new->dtsap_sel = new->tsap->dtsap_sel; new->saddr = irttp_get_saddr(new->tsap); new->daddr = irttp_get_daddr(new->tsap); new->max_header_size = max_header_size; new->max_sdu_size_tx = max_sdu_size; new->max_data_size = max_sdu_size; #ifdef STREAM_COMPAT /* If we want to receive "stream sockets" */ if(max_sdu_size == 0) new->max_data_size = irttp_get_max_seg_size(new->tsap); #endif /* STREAM_COMPAT */ /* Clean up the original one to keep it in listen state */ irttp_listen(server->tsap); /* Send a connection response on the new socket */ irttp_connect_response(new->tsap, new->max_sdu_size_rx, NULL); /* Allow PPP to send its junk over the new socket... */ set_bit(0, &new->ttp_open); /* Not connecting anymore, and clean up last possible remains * of connection attempts on the socket */ clear_bit(0, &new->ttp_connect); if(new->iriap) { iriap_close(new->iriap); new->iriap = NULL; } if(new->discoveries != NULL) { kfree(new->discoveries); new->discoveries = NULL; } #ifdef CONNECT_INDIC_KICK /* As currently we don't block packets in ppp_irnet_send() while passive, * this is not really needed... * Also, not doing it give IrDA a chance to finish the setup properly * before being swamped with packets... */ ppp_output_wakeup(&new->chan); #endif /* CONNECT_INDIC_KICK */ /* Notify the control channel */ irnet_post_event(new, IRNET_CONNECT_FROM, new->saddr, new->daddr, server->rname, 0); DEXIT(IRDA_SERV_TRACE, "\n"); return 0; } /*------------------------------------------------------------------*/ /* * Function irda_disconnect_server (self) * * Cleanup the server socket when the incoming connection abort * */ static inline void irnet_disconnect_server(irnet_socket * self, struct sk_buff *skb) { DENTER(IRDA_SERV_TRACE, "(self=0x%p)\n", self); /* Put the received packet in the black hole */ kfree_skb(skb); #ifdef FAIL_SEND_DISCONNECT /* Tell the other party we don't want to be connected */ /* Hum... Is it the right thing to do ? And do we need to send * a connect response before ? It looks ok without this... */ irttp_disconnect_request(self->tsap, NULL, P_NORMAL); #endif /* FAIL_SEND_DISCONNECT */ /* Notify the control channel (see irnet_find_socket()) */ irnet_post_event(NULL, IRNET_REQUEST_FROM, self->saddr, self->daddr, self->rname, 0); /* Clean up the server to keep it in listen state */ irttp_listen(self->tsap); DEXIT(IRDA_SERV_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Function irda_setup_server (self) * * Create a IrTTP server and set it up... * * Register the IrLAN hint bit, create a IrTTP instance for us, * set all the IrTTP callbacks and create an IrIAS entry... */ static inline int irnet_setup_server(void) { __u16 hints; DENTER(IRDA_SERV_TRACE, "()\n"); /* Initialise the regular socket part of the server */ irda_irnet_create(&irnet_server.s); /* Open a local TSAP (an IrTTP instance) for the server */ irnet_open_tsap(&irnet_server.s); /* PPP part setup */ irnet_server.s.ppp_open = 0; irnet_server.s.chan.private = NULL; irnet_server.s.file = NULL; /* Get the hint bit corresponding to IrLAN */ /* Note : we overload the IrLAN hint bit. As it is only a "hint", and as * we provide roughly the same functionality as IrLAN, this is ok. * In fact, the situation is similar as JetSend overloading the Obex hint */ hints = irlmp_service_to_hint(S_LAN); #ifdef ADVERTISE_HINT /* Register with IrLMP as a service (advertise our hint bit) */ irnet_server.skey = irlmp_register_service(hints); #endif /* ADVERTISE_HINT */ /* Register with LM-IAS (so that people can connect to us) */ irnet_server.ias_obj = irias_new_object(IRNET_SERVICE_NAME, jiffies); irias_add_integer_attrib(irnet_server.ias_obj, IRNET_IAS_VALUE, irnet_server.s.stsap_sel, IAS_KERNEL_ATTR); irias_insert_object(irnet_server.ias_obj); #ifdef DISCOVERY_EVENTS /* Tell IrLMP we want to be notified of newly discovered nodes */ irlmp_update_client(irnet_server.s.ckey, hints, irnet_discovery_indication, irnet_expiry_indication, (void *) &irnet_server.s); #endif DEXIT(IRDA_SERV_TRACE, " - self=0x%p\n", &irnet_server.s); return 0; } /*------------------------------------------------------------------*/ /* * Function irda_destroy_server (self) * * Destroy the IrTTP server... * * Reverse of the previous function... */ static inline void irnet_destroy_server(void) { DENTER(IRDA_SERV_TRACE, "()\n"); #ifdef ADVERTISE_HINT /* Unregister with IrLMP */ irlmp_unregister_service(irnet_server.skey); #endif /* ADVERTISE_HINT */ /* Unregister with LM-IAS */ if(irnet_server.ias_obj) irias_delete_object(irnet_server.ias_obj); /* Cleanup the socket part */ irda_irnet_destroy(&irnet_server.s); DEXIT(IRDA_SERV_TRACE, "\n"); } /************************ IRDA-TTP CALLBACKS ************************/ /* * When we create a IrTTP instance, we pass to it a set of callbacks * that IrTTP will call in case of various events. * We take care of those events here. */ /*------------------------------------------------------------------*/ /* * Function irnet_data_indication (instance, sap, skb) * * Received some data from TinyTP. Just queue it on the receive queue * */ static int irnet_data_indication(void * instance, void * sap, struct sk_buff *skb) { irnet_socket * ap = (irnet_socket *) instance; unsigned char * p; int code = 0; DENTER(IRDA_TCB_TRACE, "(self/ap=0x%p, skb=0x%p)\n", ap, skb); DASSERT(skb != NULL, 0, IRDA_CB_ERROR, "skb is NULL !!!\n"); /* Check is ppp is ready to receive our packet */ if(!ap->ppp_open) { DERROR(IRDA_CB_ERROR, "PPP not ready, dropping packet...\n"); /* When we return error, TTP will need to requeue the skb and * will stop the sender. IrTTP will stall until we send it a * flow control request... */ return -ENOMEM; } /* strip address/control field if present */ p = skb->data; if((p[0] == PPP_ALLSTATIONS) && (p[1] == PPP_UI)) { /* chop off address/control */ if(skb->len < 3) goto err_exit; p = skb_pull(skb, 2); } /* decompress protocol field if compressed */ if(p[0] & 1) { /* protocol is compressed */ skb_push(skb, 1)[0] = 0; } else if(skb->len < 2) goto err_exit; /* pass to generic ppp layer */ /* Note : how do I know if ppp can accept or not the packet ? This is * essential if I want to manage flow control smoothly... */ ppp_input(&ap->chan, skb); DEXIT(IRDA_TCB_TRACE, "\n"); return 0; err_exit: DERROR(IRDA_CB_ERROR, "Packet too small, dropping...\n"); kfree_skb(skb); ppp_input_error(&ap->chan, code); return 0; /* Don't return an error code, only for flow control... */ } /*------------------------------------------------------------------*/ /* * Function irnet_disconnect_indication (instance, sap, reason, skb) * * Connection has been closed. Chech reason to find out why * * Note : there are many cases where we come here : * o attempted to connect, timeout * o connected, link is broken, LAP has timeout * o connected, other side close the link * o connection request on the server not handled */ static void irnet_disconnect_indication(void * instance, void * sap, LM_REASON reason, struct sk_buff *skb) { irnet_socket * self = (irnet_socket *) instance; int test_open; int test_connect; DENTER(IRDA_TCB_TRACE, "(self=0x%p)\n", self); DASSERT(self != NULL, , IRDA_CB_ERROR, "Self is NULL !!!\n"); /* Don't care about it, but let's not leak it */ if(skb) dev_kfree_skb(skb); /* Prevent higher layer from accessing IrTTP */ test_open = test_and_clear_bit(0, &self->ttp_open); /* Not connecting anymore... * (note : TSAP is open, so IAP callbacks are no longer pending...) */ test_connect = test_and_clear_bit(0, &self->ttp_connect); /* If both self->ttp_open and self->ttp_connect are NULL, it mean that we * have a race condition with irda_irnet_destroy() or * irnet_connect_indication(), so don't mess up tsap... */ if(!(test_open || test_connect)) { DERROR(IRDA_CB_ERROR, "Race condition detected...\n"); return; } /* If we were active, notify the control channel */ if(test_open) irnet_post_event(self, IRNET_DISCONNECT_FROM, self->saddr, self->daddr, self->rname, 0); else /* If we were trying to connect, notify the control channel */ if((self->tsap) && (self != &irnet_server.s)) irnet_post_event(self, IRNET_NOANSWER_FROM, self->saddr, self->daddr, self->rname, 0); /* Close our IrTTP connection, cleanup tsap */ if((self->tsap) && (self != &irnet_server.s)) { DEBUG(IRDA_CB_INFO, "Closing our TTP connection.\n"); irttp_close_tsap(self->tsap); self->tsap = NULL; } /* Cleanup the socket in case we want to reconnect in ppp_output_wakeup() */ self->stsap_sel = 0; self->daddr = DEV_ADDR_ANY; self->tx_flow = FLOW_START; /* Deal with the ppp instance if it's still alive */ if(self->ppp_open) { if(test_open) { /* ppp_unregister_channel() wants a user context. */ schedule_work(&self->disconnect_work); } else { /* If we were trying to connect, flush (drain) ppp_generic * Tx queue (most often we have blocked it), which will * trigger an other attempt to connect. If we are passive, * this will empty the Tx queue after last try. */ ppp_output_wakeup(&self->chan); } } DEXIT(IRDA_TCB_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Function irnet_connect_confirm (instance, sap, qos, max_sdu_size, skb) * * Connections has been confirmed by the remote device * */ static void irnet_connect_confirm(void * instance, void * sap, struct qos_info *qos, __u32 max_sdu_size, __u8 max_header_size, struct sk_buff *skb) { irnet_socket * self = (irnet_socket *) instance; DENTER(IRDA_TCB_TRACE, "(self=0x%p)\n", self); /* Check if socket is closing down (via irda_irnet_destroy()) */ if(! test_bit(0, &self->ttp_connect)) { DERROR(IRDA_CB_ERROR, "Socket no longer connecting. Ouch !\n"); return; } /* How much header space do we need to reserve */ self->max_header_size = max_header_size; /* IrTTP max SDU size in transmit direction */ self->max_sdu_size_tx = max_sdu_size; self->max_data_size = max_sdu_size; #ifdef STREAM_COMPAT if(max_sdu_size == 0) self->max_data_size = irttp_get_max_seg_size(self->tsap); #endif /* STREAM_COMPAT */ /* At this point, IrLMP has assigned our source address */ self->saddr = irttp_get_saddr(self->tsap); /* Allow higher layer to access IrTTP */ set_bit(0, &self->ttp_open); clear_bit(0, &self->ttp_connect); /* Not racy, IrDA traffic is serial */ /* Give a kick in the ass of ppp_generic so that he sends us some data */ ppp_output_wakeup(&self->chan); /* Check size of received packet */ if(skb->len > 0) { #ifdef PASS_CONNECT_PACKETS DEBUG(IRDA_CB_INFO, "Passing connect packet to PPP.\n"); /* Try to pass it to PPP */ irnet_data_indication(instance, sap, skb); #else /* PASS_CONNECT_PACKETS */ DERROR(IRDA_CB_ERROR, "Dropping non empty packet.\n"); kfree_skb(skb); /* Note : will be optimised with other kfree... */ #endif /* PASS_CONNECT_PACKETS */ } else kfree_skb(skb); /* Notify the control channel */ irnet_post_event(self, IRNET_CONNECT_TO, self->saddr, self->daddr, self->rname, 0); DEXIT(IRDA_TCB_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Function irnet_flow_indication (instance, sap, flow) * * Used by TinyTP to tell us if it can accept more data or not * */ static void irnet_flow_indication(void * instance, void * sap, LOCAL_FLOW flow) { irnet_socket * self = (irnet_socket *) instance; LOCAL_FLOW oldflow = self->tx_flow; DENTER(IRDA_TCB_TRACE, "(self=0x%p, flow=%d)\n", self, flow); /* Update our state */ self->tx_flow = flow; /* Check what IrTTP want us to do... */ switch(flow) { case FLOW_START: DEBUG(IRDA_CB_INFO, "IrTTP wants us to start again\n"); /* Check if we really need to wake up PPP */ if(oldflow == FLOW_STOP) ppp_output_wakeup(&self->chan); else DEBUG(IRDA_CB_INFO, "But we were already transmitting !!!\n"); break; case FLOW_STOP: DEBUG(IRDA_CB_INFO, "IrTTP wants us to slow down\n"); break; default: DEBUG(IRDA_CB_INFO, "Unknown flow command!\n"); break; } DEXIT(IRDA_TCB_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Function irnet_status_indication (instance, sap, reason, skb) * * Link (IrLAP) status report. * */ static void irnet_status_indication(void * instance, LINK_STATUS link, LOCK_STATUS lock) { irnet_socket * self = (irnet_socket *) instance; DENTER(IRDA_TCB_TRACE, "(self=0x%p)\n", self); DASSERT(self != NULL, , IRDA_CB_ERROR, "Self is NULL !!!\n"); /* We can only get this event if we are connected */ switch(link) { case STATUS_NO_ACTIVITY: irnet_post_event(self, IRNET_BLOCKED_LINK, self->saddr, self->daddr, self->rname, 0); break; default: DEBUG(IRDA_CB_INFO, "Unknown status...\n"); } DEXIT(IRDA_TCB_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Function irnet_connect_indication(instance, sap, qos, max_sdu_size, userdata) * * Incoming connection * * In theory, this function is called only on the server socket. * Some other node is attempting to connect to the IrNET service, and has * sent a connection request on our server socket. * We just redirect the connection to the relevant IrNET socket. * * Note : we also make sure that between 2 irnet nodes, there can * exist only one irnet connection. */ static void irnet_connect_indication(void * instance, void * sap, struct qos_info *qos, __u32 max_sdu_size, __u8 max_header_size, struct sk_buff *skb) { irnet_socket * server = &irnet_server.s; irnet_socket * new = (irnet_socket *) NULL; DENTER(IRDA_TCB_TRACE, "(server=0x%p)\n", server); DASSERT(instance == &irnet_server, , IRDA_CB_ERROR, "Invalid instance (0x%p) !!!\n", instance); DASSERT(sap == irnet_server.s.tsap, , IRDA_CB_ERROR, "Invalid sap !!!\n"); /* Try to find the most appropriate IrNET socket */ new = irnet_find_socket(server); /* After all this hard work, do we have an socket ? */ if(new == (irnet_socket *) NULL) { DEXIT(IRDA_CB_INFO, ": No socket waiting for this connection.\n"); irnet_disconnect_server(server, skb); return; } /* Is the socket already busy ? */ if(test_bit(0, &new->ttp_open)) { DEXIT(IRDA_CB_INFO, ": Socket already connected.\n"); irnet_disconnect_server(server, skb); return; } /* The following code is a bit tricky, so need comments ;-) */ /* If ttp_connect is set, the socket is trying to connect to the other * end and may have sent a IrTTP connection request and is waiting for * a connection response (that may never come). * Now, the pain is that the socket may have opened a tsap and is * waiting on it, while the other end is trying to connect to it on * another tsap. * Because IrNET can be peer to peer, we need to workaround this. * Furthermore, the way the irnetd script is implemented, the * target will create a second IrNET connection back to the * originator and expect the originator to bind this new connection * to the original PPPD instance. * And of course, if we don't use irnetd, we can have a race when * both side try to connect simultaneously, which could leave both * connections half closed (yuck). * Conclusions : * 1) The "originator" must accept the new connection and get rid * of the old one so that irnetd works * 2) One side must deny the new connection to avoid races, * but both side must agree on which side it is... * Most often, the originator is primary at the LAP layer. * Jean II */ /* Now, let's look at the way I wrote the test... * We need to clear up the ttp_connect flag atomically to prevent * irnet_disconnect_indication() to mess up the tsap we are going to close. * We want to clear the ttp_connect flag only if we close the tsap, * otherwise we will never close it, so we need to check for primary * *before* doing the test on the flag. * And of course, ALLOW_SIMULT_CONNECT can disable this entirely... * Jean II */ /* Socket already connecting ? On primary ? */ if(0 #ifdef ALLOW_SIMULT_CONNECT || ((irttp_is_primary(server->tsap) == 1) && /* primary */ (test_and_clear_bit(0, &new->ttp_connect))) #endif /* ALLOW_SIMULT_CONNECT */ ) { DERROR(IRDA_CB_ERROR, "Socket already connecting, but going to reuse it !\n"); /* Cleanup the old TSAP if necessary - IrIAP will be cleaned up later */ if(new->tsap != NULL) { /* Close the old connection the new socket was attempting, * so that we can hook it up to the new connection. * It's now safe to do it... */ irttp_close_tsap(new->tsap); new->tsap = NULL; } } else { /* Three options : * 1) socket was not connecting or connected : ttp_connect should be 0. * 2) we don't want to connect the socket because we are secondary or * ALLOW_SIMULT_CONNECT is undefined. ttp_connect should be 1. * 3) we are half way in irnet_disconnect_indication(), and it's a * nice race condition... Fortunately, we can detect that by checking * if tsap is still alive. On the other hand, we can't be in * irda_irnet_destroy() otherwise we would not have found this * socket in the hashbin. * Jean II */ if((test_bit(0, &new->ttp_connect)) || (new->tsap != NULL)) { /* Don't mess this socket, somebody else in in charge... */ DERROR(IRDA_CB_ERROR, "Race condition detected, socket in use, abort connect...\n"); irnet_disconnect_server(server, skb); return; } } /* So : at this point, we have a socket, and it is idle. Good ! */ irnet_connect_socket(server, new, qos, max_sdu_size, max_header_size); /* Check size of received packet */ if(skb->len > 0) { #ifdef PASS_CONNECT_PACKETS DEBUG(IRDA_CB_INFO, "Passing connect packet to PPP.\n"); /* Try to pass it to PPP */ irnet_data_indication(new, new->tsap, skb); #else /* PASS_CONNECT_PACKETS */ DERROR(IRDA_CB_ERROR, "Dropping non empty packet.\n"); kfree_skb(skb); /* Note : will be optimised with other kfree... */ #endif /* PASS_CONNECT_PACKETS */ } else kfree_skb(skb); DEXIT(IRDA_TCB_TRACE, "\n"); } /********************** IRDA-IAS/LMP CALLBACKS **********************/ /* * These are the callbacks called by other layers of the IrDA stack, * mainly LMP for discovery and IAS for name queries. */ /*------------------------------------------------------------------*/ /* * Function irnet_getvalue_confirm (result, obj_id, value, priv) * * Got answer from remote LM-IAS, just connect * * This is the reply to a IAS query we were doing to find the TSAP of * the device we want to connect to. * If we have found a valid TSAP, just initiate the TTP connection * on this TSAP. */ static void irnet_getvalue_confirm(int result, __u16 obj_id, struct ias_value *value, void * priv) { irnet_socket * self = (irnet_socket *) priv; DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self); DASSERT(self != NULL, , IRDA_OCB_ERROR, "Self is NULL !!!\n"); /* Check if already connected (via irnet_connect_socket()) * or socket is closing down (via irda_irnet_destroy()) */ if(! test_bit(0, &self->ttp_connect)) { DERROR(IRDA_OCB_ERROR, "Socket no longer connecting. Ouch !\n"); return; } /* We probably don't need to make any more queries */ iriap_close(self->iriap); self->iriap = NULL; /* Post process the IAS reply */ self->dtsap_sel = irnet_ias_to_tsap(self, result, value); /* If error, just go out */ if(self->errno) { clear_bit(0, &self->ttp_connect); DERROR(IRDA_OCB_ERROR, "IAS connect failed ! (0x%X)\n", self->errno); return; } DEBUG(IRDA_OCB_INFO, "daddr = %08x, lsap = %d, starting IrTTP connection\n", self->daddr, self->dtsap_sel); /* Start up TTP - non blocking */ irnet_connect_tsap(self); DEXIT(IRDA_OCB_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Function irnet_discovervalue_confirm (result, obj_id, value, priv) * * Handle the TSAP discovery procedure state machine. * Got answer from remote LM-IAS, try next device * * We are doing a TSAP discovery procedure, and we got an answer to * a IAS query we were doing to find the TSAP on one of the address * in the discovery log. * * If we have found a valid TSAP for the first time, save it. If it's * not the first time we found one, complain. * * If we have more addresses in the log, just initiate a new query. * Note that those query may fail (see irnet_discover_daddr_and_lsap_sel()) * * Otherwise, wrap up the procedure (cleanup), check if we have found * any device and connect to it. */ static void irnet_discovervalue_confirm(int result, __u16 obj_id, struct ias_value *value, void * priv) { irnet_socket * self = (irnet_socket *) priv; __u8 dtsap_sel; /* TSAP we are looking for */ DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self); DASSERT(self != NULL, , IRDA_OCB_ERROR, "Self is NULL !!!\n"); /* Check if already connected (via irnet_connect_socket()) * or socket is closing down (via irda_irnet_destroy()) */ if(! test_bit(0, &self->ttp_connect)) { DERROR(IRDA_OCB_ERROR, "Socket no longer connecting. Ouch !\n"); return; } /* Post process the IAS reply */ dtsap_sel = irnet_ias_to_tsap(self, result, value); /* Have we got something ? */ if(self->errno == 0) { /* We found the requested service */ if(self->daddr != DEV_ADDR_ANY) { DERROR(IRDA_OCB_ERROR, "More than one device in range supports IrNET...\n"); } else { /* First time we found that one, save it ! */ self->daddr = self->discoveries[self->disco_index].daddr; self->dtsap_sel = dtsap_sel; } } /* If no failure */ if((self->errno == -EADDRNOTAVAIL) || (self->errno == 0)) { int ret; /* Search the next node */ ret = irnet_discover_next_daddr(self); if(!ret) { /* In this case, the above request was non-blocking. * We will return here after a while... */ return; } /* In this case, we have processed the last discovery item */ } /* No more queries to be done (failure or last one) */ /* We probably don't need to make any more queries */ iriap_close(self->iriap); self->iriap = NULL; /* No more items : remove the log and signal termination */ DEBUG(IRDA_OCB_INFO, "Cleaning up log (0x%p)\n", self->discoveries); if(self->discoveries != NULL) { /* Cleanup our copy of the discovery log */ kfree(self->discoveries); self->discoveries = NULL; } self->disco_number = -1; /* Check out what we found */ if(self->daddr == DEV_ADDR_ANY) { self->daddr = DEV_ADDR_ANY; clear_bit(0, &self->ttp_connect); DEXIT(IRDA_OCB_TRACE, ": cannot discover IrNET in any device !!!\n"); return; } /* We have a valid address - just connect */ DEBUG(IRDA_OCB_INFO, "daddr = %08x, lsap = %d, starting IrTTP connection\n", self->daddr, self->dtsap_sel); /* Start up TTP - non blocking */ irnet_connect_tsap(self); DEXIT(IRDA_OCB_TRACE, "\n"); } #ifdef DISCOVERY_EVENTS /*------------------------------------------------------------------*/ /* * Function irnet_discovery_indication (discovery) * * Got a discovery indication from IrLMP, post an event * * Note : IrLMP take care of matching the hint mask for us, and also * check if it is a "new" node for us... * * As IrLMP filter on the IrLAN hint bit, we get both IrLAN and IrNET * nodes, so it's only at connection time that we will know if the * node support IrNET, IrLAN or both. The other solution is to check * in IAS the PNP ids and service name. * Note : even if a node support IrNET (or IrLAN), it's no guarantee * that we will be able to connect to it, the node might already be * busy... * * One last thing : in some case, this function will trigger duplicate * discovery events. On the other hand, we should catch all * discoveries properly (i.e. not miss one). Filtering duplicate here * is to messy, so we leave that to user space... */ static void irnet_discovery_indication(discinfo_t * discovery, DISCOVERY_MODE mode, void * priv) { irnet_socket * self = &irnet_server.s; DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self); DASSERT(priv == &irnet_server, , IRDA_OCB_ERROR, "Invalid instance (0x%p) !!!\n", priv); DEBUG(IRDA_OCB_INFO, "Discovered new IrNET/IrLAN node %s...\n", discovery->info); /* Notify the control channel */ irnet_post_event(NULL, IRNET_DISCOVER, discovery->saddr, discovery->daddr, discovery->info, get_unaligned((__u16 *)discovery->hints)); DEXIT(IRDA_OCB_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Function irnet_expiry_indication (expiry) * * Got a expiry indication from IrLMP, post an event * * Note : IrLMP take care of matching the hint mask for us, we only * check if it is a "new" node... */ static void irnet_expiry_indication(discinfo_t * expiry, DISCOVERY_MODE mode, void * priv) { irnet_socket * self = &irnet_server.s; DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self); DASSERT(priv == &irnet_server, , IRDA_OCB_ERROR, "Invalid instance (0x%p) !!!\n", priv); DEBUG(IRDA_OCB_INFO, "IrNET/IrLAN node %s expired...\n", expiry->info); /* Notify the control channel */ irnet_post_event(NULL, IRNET_EXPIRE, expiry->saddr, expiry->daddr, expiry->info, get_unaligned((__u16 *)expiry->hints)); DEXIT(IRDA_OCB_TRACE, "\n"); } #endif /* DISCOVERY_EVENTS */ /*********************** PROC ENTRY CALLBACKS ***********************/ /* * We create a instance in the /proc filesystem, and here we take care * of that... */ #ifdef CONFIG_PROC_FS static int irnet_proc_show(struct seq_file *m, void *v) { irnet_socket * self; char * state; int i = 0; /* Get the IrNET server information... */ seq_printf(m, "IrNET server - "); seq_printf(m, "IrDA state: %s, ", (irnet_server.running ? "running" : "dead")); seq_printf(m, "stsap_sel: %02x, ", irnet_server.s.stsap_sel); seq_printf(m, "dtsap_sel: %02x\n", irnet_server.s.dtsap_sel); /* Do we need to continue ? */ if(!irnet_server.running) return 0; /* Protect access to the instance list */ spin_lock_bh(&irnet_server.spinlock); /* Get the sockets one by one... */ self = (irnet_socket *) hashbin_get_first(irnet_server.list); while(self != NULL) { /* Start printing info about the socket. */ seq_printf(m, "\nIrNET socket %d - ", i++); /* First, get the requested configuration */ seq_printf(m, "Requested IrDA name: \"%s\", ", self->rname); seq_printf(m, "daddr: %08x, ", self->rdaddr); seq_printf(m, "saddr: %08x\n", self->rsaddr); /* Second, get all the PPP info */ seq_printf(m, " PPP state: %s", (self->ppp_open ? "registered" : "unregistered")); if(self->ppp_open) { seq_printf(m, ", unit: ppp%d", ppp_unit_number(&self->chan)); seq_printf(m, ", channel: %d", ppp_channel_index(&self->chan)); seq_printf(m, ", mru: %d", self->mru); /* Maybe add self->flags ? Later... */ } /* Then, get all the IrDA specific info... */ if(self->ttp_open) state = "connected"; else if(self->tsap != NULL) state = "connecting"; else if(self->iriap != NULL) state = "searching"; else if(self->ttp_connect) state = "weird"; else state = "idle"; seq_printf(m, "\n IrDA state: %s, ", state); seq_printf(m, "daddr: %08x, ", self->daddr); seq_printf(m, "stsap_sel: %02x, ", self->stsap_sel); seq_printf(m, "dtsap_sel: %02x\n", self->dtsap_sel); /* Next socket, please... */ self = (irnet_socket *) hashbin_get_next(irnet_server.list); } /* Spin lock end */ spin_unlock_bh(&irnet_server.spinlock); return 0; } static int irnet_proc_open(struct inode *inode, struct file *file) { return single_open(file, irnet_proc_show, NULL); } static const struct file_operations irnet_proc_fops = { .owner = THIS_MODULE, .open = irnet_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* PROC_FS */ /********************** CONFIGURATION/CLEANUP **********************/ /* * Initialisation and teardown of the IrDA part, called at module * insertion and removal... */ /*------------------------------------------------------------------*/ /* * Prepare the IrNET layer for operation... */ int __init irda_irnet_init(void) { int err = 0; DENTER(MODULE_TRACE, "()\n"); /* Pure paranoia - should be redundant */ memset(&irnet_server, 0, sizeof(struct irnet_root)); /* Setup start of irnet instance list */ irnet_server.list = hashbin_new(HB_NOLOCK); DABORT(irnet_server.list == NULL, -ENOMEM, MODULE_ERROR, "Can't allocate hashbin!\n"); /* Init spinlock for instance list */ spin_lock_init(&irnet_server.spinlock); /* Initialise control channel */ init_waitqueue_head(&irnet_events.rwait); irnet_events.index = 0; /* Init spinlock for event logging */ spin_lock_init(&irnet_events.spinlock); #ifdef CONFIG_PROC_FS /* Add a /proc file for irnet infos */ proc_create("irnet", 0, proc_irda, &irnet_proc_fops); #endif /* CONFIG_PROC_FS */ /* Setup the IrNET server */ err = irnet_setup_server(); if(!err) /* We are no longer functional... */ irnet_server.running = 1; DEXIT(MODULE_TRACE, "\n"); return err; } /*------------------------------------------------------------------*/ /* * Cleanup at exit... */ void __exit irda_irnet_cleanup(void) { DENTER(MODULE_TRACE, "()\n"); /* We are no longer there... */ irnet_server.running = 0; #ifdef CONFIG_PROC_FS /* Remove our /proc file */ remove_proc_entry("irnet", proc_irda); #endif /* CONFIG_PROC_FS */ /* Remove our IrNET server from existence */ irnet_destroy_server(); /* Remove all instances of IrNET socket still present */ hashbin_delete(irnet_server.list, (FREE_FUNC) irda_irnet_destroy); DEXIT(MODULE_TRACE, "\n"); }
gpl-2.0
c8813q-dev/android_kernel_huawei_c8813q
drivers/net/wan/lmc/lmc_media.c
13679
34361
/* $Id: lmc_media.c,v 1.13 2000/04/11 05:25:26 asj Exp $ */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/if_arp.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/inet.h> #include <linux/bitops.h> #include <asm/processor.h> /* Processor type for cache alignment. */ #include <asm/io.h> #include <asm/dma.h> #include <asm/uaccess.h> #include "lmc.h" #include "lmc_var.h" #include "lmc_ioctl.h" #include "lmc_debug.h" #define CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE 1 /* * Copyright (c) 1997-2000 LAN Media Corporation (LMC) * All rights reserved. www.lanmedia.com * * This code is written by: * Andrew Stanley-Jones (asj@cban.com) * Rob Braun (bbraun@vix.com), * Michael Graff (explorer@vix.com) and * Matt Thomas (matt@3am-software.com). * * This software may be used and distributed according to the terms * of the GNU General Public License version 2, incorporated herein by reference. */ /* * protocol independent method. */ static void lmc_set_protocol (lmc_softc_t * const, lmc_ctl_t *); /* * media independent methods to check on media status, link, light LEDs, * etc. */ static void lmc_ds3_init (lmc_softc_t * const); static void lmc_ds3_default (lmc_softc_t * const); static void lmc_ds3_set_status (lmc_softc_t * const, lmc_ctl_t *); static void lmc_ds3_set_100ft (lmc_softc_t * const, int); static int lmc_ds3_get_link_status (lmc_softc_t * const); static void lmc_ds3_set_crc_length (lmc_softc_t * const, int); static void lmc_ds3_set_scram (lmc_softc_t * const, int); static void lmc_ds3_watchdog (lmc_softc_t * const); static void lmc_hssi_init (lmc_softc_t * const); static void lmc_hssi_default (lmc_softc_t * const); static void lmc_hssi_set_status (lmc_softc_t * const, lmc_ctl_t *); static void lmc_hssi_set_clock (lmc_softc_t * const, int); static int lmc_hssi_get_link_status (lmc_softc_t * const); static void lmc_hssi_set_link_status (lmc_softc_t * const, int); static void lmc_hssi_set_crc_length (lmc_softc_t * const, int); static void lmc_hssi_watchdog (lmc_softc_t * const); static void lmc_ssi_init (lmc_softc_t * const); static void lmc_ssi_default (lmc_softc_t * const); static void lmc_ssi_set_status (lmc_softc_t * const, lmc_ctl_t *); static void lmc_ssi_set_clock (lmc_softc_t * const, int); static void lmc_ssi_set_speed (lmc_softc_t * const, lmc_ctl_t *); static int lmc_ssi_get_link_status (lmc_softc_t * const); static void lmc_ssi_set_link_status (lmc_softc_t * const, int); static void lmc_ssi_set_crc_length (lmc_softc_t * const, int); static void lmc_ssi_watchdog (lmc_softc_t * const); static void lmc_t1_init (lmc_softc_t * const); static void lmc_t1_default (lmc_softc_t * const); static void lmc_t1_set_status (lmc_softc_t * const, lmc_ctl_t *); static int lmc_t1_get_link_status (lmc_softc_t * const); static void lmc_t1_set_circuit_type (lmc_softc_t * const, int); static void lmc_t1_set_crc_length (lmc_softc_t * const, int); static void lmc_t1_set_clock (lmc_softc_t * const, int); static void lmc_t1_watchdog (lmc_softc_t * const); static void lmc_dummy_set_1 (lmc_softc_t * const, int); static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *); static inline void write_av9110_bit (lmc_softc_t *, int); static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32); lmc_media_t lmc_ds3_media = { lmc_ds3_init, /* special media init stuff */ lmc_ds3_default, /* reset to default state */ lmc_ds3_set_status, /* reset status to state provided */ lmc_dummy_set_1, /* set clock source */ lmc_dummy_set2_1, /* set line speed */ lmc_ds3_set_100ft, /* set cable length */ lmc_ds3_set_scram, /* set scrambler */ lmc_ds3_get_link_status, /* get link status */ lmc_dummy_set_1, /* set link status */ lmc_ds3_set_crc_length, /* set CRC length */ lmc_dummy_set_1, /* set T1 or E1 circuit type */ lmc_ds3_watchdog }; lmc_media_t lmc_hssi_media = { lmc_hssi_init, /* special media init stuff */ lmc_hssi_default, /* reset to default state */ lmc_hssi_set_status, /* reset status to state provided */ lmc_hssi_set_clock, /* set clock source */ lmc_dummy_set2_1, /* set line speed */ lmc_dummy_set_1, /* set cable length */ lmc_dummy_set_1, /* set scrambler */ lmc_hssi_get_link_status, /* get link status */ lmc_hssi_set_link_status, /* set link status */ lmc_hssi_set_crc_length, /* set CRC length */ lmc_dummy_set_1, /* set T1 or E1 circuit type */ lmc_hssi_watchdog }; lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */ lmc_ssi_default, /* reset to default state */ lmc_ssi_set_status, /* reset status to state provided */ lmc_ssi_set_clock, /* set clock source */ lmc_ssi_set_speed, /* set line speed */ lmc_dummy_set_1, /* set cable length */ lmc_dummy_set_1, /* set scrambler */ lmc_ssi_get_link_status, /* get link status */ lmc_ssi_set_link_status, /* set link status */ lmc_ssi_set_crc_length, /* set CRC length */ lmc_dummy_set_1, /* set T1 or E1 circuit type */ lmc_ssi_watchdog }; lmc_media_t lmc_t1_media = { lmc_t1_init, /* special media init stuff */ lmc_t1_default, /* reset to default state */ lmc_t1_set_status, /* reset status to state provided */ lmc_t1_set_clock, /* set clock source */ lmc_dummy_set2_1, /* set line speed */ lmc_dummy_set_1, /* set cable length */ lmc_dummy_set_1, /* set scrambler */ lmc_t1_get_link_status, /* get link status */ lmc_dummy_set_1, /* set link status */ lmc_t1_set_crc_length, /* set CRC length */ lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */ lmc_t1_watchdog }; static void lmc_dummy_set_1 (lmc_softc_t * const sc, int a) { } static void lmc_dummy_set2_1 (lmc_softc_t * const sc, lmc_ctl_t * a) { } /* * HSSI methods */ static void lmc_hssi_init (lmc_softc_t * const sc) { sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5200; lmc_gpio_mkoutput (sc, LMC_GEP_HSSI_CLOCK); } static void lmc_hssi_default (lmc_softc_t * const sc) { sc->lmc_miireg16 = LMC_MII16_LED_ALL; sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); } /* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed. */ static void lmc_hssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) { if (ctl == NULL) { sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source); lmc_set_protocol (sc, NULL); return; } /* * check for change in clock source */ if (ctl->clock_source && !sc->ictl.clock_source) { sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT); sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT; } else if (!ctl->clock_source && sc->ictl.clock_source) { sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); } lmc_set_protocol (sc, ctl); } /* * 1 == internal, 0 == external */ static void lmc_hssi_set_clock (lmc_softc_t * const sc, int ie) { int old; old = sc->ictl.clock_source; if (ie == LMC_CTL_CLOCK_SOURCE_EXT) { sc->lmc_gpio |= LMC_GEP_HSSI_CLOCK; LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT; if(old != ie) printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS); } else { sc->lmc_gpio &= ~(LMC_GEP_HSSI_CLOCK); LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; if(old != ie) printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS); } } /* * return hardware link status. * 0 == link is down, 1 == link is up. */ static int lmc_hssi_get_link_status (lmc_softc_t * const sc) { /* * We're using the same code as SSI since * they're practically the same */ return lmc_ssi_get_link_status(sc); } static void lmc_hssi_set_link_status (lmc_softc_t * const sc, int state) { if (state == LMC_LINK_UP) sc->lmc_miireg16 |= LMC_MII16_HSSI_TA; else sc->lmc_miireg16 &= ~LMC_MII16_HSSI_TA; lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } /* * 0 == 16bit, 1 == 32bit */ static void lmc_hssi_set_crc_length (lmc_softc_t * const sc, int state) { if (state == LMC_CTL_CRC_LENGTH_32) { /* 32 bit */ sc->lmc_miireg16 |= LMC_MII16_HSSI_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; } else { /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_HSSI_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } static void lmc_hssi_watchdog (lmc_softc_t * const sc) { /* HSSI is blank */ } /* * DS3 methods */ /* * Set cable length */ static void lmc_ds3_set_100ft (lmc_softc_t * const sc, int ie) { if (ie == LMC_CTL_CABLE_LENGTH_GT_100FT) { sc->lmc_miireg16 &= ~LMC_MII16_DS3_ZERO; sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_GT_100FT; } else if (ie == LMC_CTL_CABLE_LENGTH_LT_100FT) { sc->lmc_miireg16 |= LMC_MII16_DS3_ZERO; sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_LT_100FT; } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } static void lmc_ds3_default (lmc_softc_t * const sc) { sc->lmc_miireg16 = LMC_MII16_LED_ALL; sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); sc->lmc_media->set_cable_length (sc, LMC_CTL_CABLE_LENGTH_LT_100FT); sc->lmc_media->set_scrambler (sc, LMC_CTL_OFF); sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); } /* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed. */ static void lmc_ds3_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) { if (ctl == NULL) { sc->lmc_media->set_cable_length (sc, sc->ictl.cable_length); sc->lmc_media->set_scrambler (sc, sc->ictl.scrambler_onoff); lmc_set_protocol (sc, NULL); return; } /* * check for change in cable length setting */ if (ctl->cable_length && !sc->ictl.cable_length) lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_GT_100FT); else if (!ctl->cable_length && sc->ictl.cable_length) lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_LT_100FT); /* * Check for change in scrambler setting (requires reset) */ if (ctl->scrambler_onoff && !sc->ictl.scrambler_onoff) lmc_ds3_set_scram (sc, LMC_CTL_ON); else if (!ctl->scrambler_onoff && sc->ictl.scrambler_onoff) lmc_ds3_set_scram (sc, LMC_CTL_OFF); lmc_set_protocol (sc, ctl); } static void lmc_ds3_init (lmc_softc_t * const sc) { int i; sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5245; /* writes zeros everywhere */ for (i = 0; i < 21; i++) { lmc_mii_writereg (sc, 0, 17, i); lmc_mii_writereg (sc, 0, 18, 0); } /* set some essential bits */ lmc_mii_writereg (sc, 0, 17, 1); lmc_mii_writereg (sc, 0, 18, 0x25); /* ser, xtx */ lmc_mii_writereg (sc, 0, 17, 5); lmc_mii_writereg (sc, 0, 18, 0x80); /* emode */ lmc_mii_writereg (sc, 0, 17, 14); lmc_mii_writereg (sc, 0, 18, 0x30); /* rcgen, tcgen */ /* clear counters and latched bits */ for (i = 0; i < 21; i++) { lmc_mii_writereg (sc, 0, 17, i); lmc_mii_readreg (sc, 0, 18); } } /* * 1 == DS3 payload scrambled, 0 == not scrambled */ static void lmc_ds3_set_scram (lmc_softc_t * const sc, int ie) { if (ie == LMC_CTL_ON) { sc->lmc_miireg16 |= LMC_MII16_DS3_SCRAM; sc->ictl.scrambler_onoff = LMC_CTL_ON; } else { sc->lmc_miireg16 &= ~LMC_MII16_DS3_SCRAM; sc->ictl.scrambler_onoff = LMC_CTL_OFF; } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } /* * return hardware link status. * 0 == link is down, 1 == link is up. */ static int lmc_ds3_get_link_status (lmc_softc_t * const sc) { u16 link_status, link_status_11; int ret = 1; lmc_mii_writereg (sc, 0, 17, 7); link_status = lmc_mii_readreg (sc, 0, 18); /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions * led0 yellow = far-end adapter is in Red alarm condition * led1 blue = received an Alarm Indication signal * (upstream failure) * led2 Green = power to adapter, Gate Array loaded & driver * attached * led3 red = Loss of Signal (LOS) or out of frame (OOF) * conditions detected on T3 receive signal */ lmc_led_on(sc, LMC_DS3_LED2); if ((link_status & LMC_FRAMER_REG0_DLOS) || (link_status & LMC_FRAMER_REG0_OOFS)){ ret = 0; if(sc->last_led_err[3] != 1){ u16 r1; lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */ r1 = lmc_mii_readreg (sc, 0, 18); r1 &= 0xfe; lmc_mii_writereg(sc, 0, 18, r1); printk(KERN_WARNING "%s: Red Alarm - Loss of Signal or Loss of Framing\n", sc->name); } lmc_led_on(sc, LMC_DS3_LED3); /* turn on red LED */ sc->last_led_err[3] = 1; } else { lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */ if(sc->last_led_err[3] == 1){ u16 r1; lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */ r1 = lmc_mii_readreg (sc, 0, 18); r1 |= 0x01; lmc_mii_writereg(sc, 0, 18, r1); } sc->last_led_err[3] = 0; } lmc_mii_writereg(sc, 0, 17, 0x10); link_status_11 = lmc_mii_readreg(sc, 0, 18); if((link_status & LMC_FRAMER_REG0_AIS) || (link_status_11 & LMC_FRAMER_REG10_XBIT)) { ret = 0; if(sc->last_led_err[0] != 1){ printk(KERN_WARNING "%s: AIS Alarm or XBit Error\n", sc->name); printk(KERN_WARNING "%s: Remote end has loss of signal or framing\n", sc->name); } lmc_led_on(sc, LMC_DS3_LED0); sc->last_led_err[0] = 1; } else { lmc_led_off(sc, LMC_DS3_LED0); sc->last_led_err[0] = 0; } lmc_mii_writereg (sc, 0, 17, 9); link_status = lmc_mii_readreg (sc, 0, 18); if(link_status & LMC_FRAMER_REG9_RBLUE){ ret = 0; if(sc->last_led_err[1] != 1){ printk(KERN_WARNING "%s: Blue Alarm - Receiving all 1's\n", sc->name); } lmc_led_on(sc, LMC_DS3_LED1); sc->last_led_err[1] = 1; } else { lmc_led_off(sc, LMC_DS3_LED1); sc->last_led_err[1] = 0; } return ret; } /* * 0 == 16bit, 1 == 32bit */ static void lmc_ds3_set_crc_length (lmc_softc_t * const sc, int state) { if (state == LMC_CTL_CRC_LENGTH_32) { /* 32 bit */ sc->lmc_miireg16 |= LMC_MII16_DS3_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; } else { /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_DS3_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } static void lmc_ds3_watchdog (lmc_softc_t * const sc) { } /* * SSI methods */ static void lmc_ssi_init(lmc_softc_t * const sc) { u16 mii17; int cable; sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000; mii17 = lmc_mii_readreg(sc, 0, 17); cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT; sc->ictl.cable_type = cable; lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK); } static void lmc_ssi_default (lmc_softc_t * const sc) { sc->lmc_miireg16 = LMC_MII16_LED_ALL; /* * make TXCLOCK always be an output */ lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK); sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); sc->lmc_media->set_speed (sc, NULL); sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); } /* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed. */ static void lmc_ssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) { if (ctl == NULL) { sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source); sc->lmc_media->set_speed (sc, &sc->ictl); lmc_set_protocol (sc, NULL); return; } /* * check for change in clock source */ if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_INT && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_EXT) { sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT); sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT; } else if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_EXT && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_INT) { sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; } if (ctl->clock_rate != sc->ictl.clock_rate) sc->lmc_media->set_speed (sc, ctl); lmc_set_protocol (sc, ctl); } /* * 1 == internal, 0 == external */ static void lmc_ssi_set_clock (lmc_softc_t * const sc, int ie) { int old; old = ie; if (ie == LMC_CTL_CLOCK_SOURCE_EXT) { sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK); LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT; if(ie != old) printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS); } else { sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK; LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; if(ie != old) printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS); } } static void lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl) { lmc_ctl_t *ictl = &sc->ictl; lmc_av9110_t *av; /* original settings for clock rate of: * 100 Khz (8,25,0,0,2) were incorrect * they should have been 80,125,1,3,3 * There are 17 param combinations to produce this freq. * For 1.5 Mhz use 120,100,1,1,2 (226 param. combinations) */ if (ctl == NULL) { av = &ictl->cardspec.ssi; ictl->clock_rate = 1500000; av->f = ictl->clock_rate; av->n = 120; av->m = 100; av->v = 1; av->x = 1; av->r = 2; write_av9110 (sc, av->n, av->m, av->v, av->x, av->r); return; } av = &ctl->cardspec.ssi; if (av->f == 0) return; ictl->clock_rate = av->f; /* really, this is the rate we are */ ictl->cardspec.ssi = *av; write_av9110 (sc, av->n, av->m, av->v, av->x, av->r); } /* * return hardware link status. * 0 == link is down, 1 == link is up. */ static int lmc_ssi_get_link_status (lmc_softc_t * const sc) { u16 link_status; u32 ticks; int ret = 1; int hw_hdsk = 1; /* * missing CTS? Hmm. If we require CTS on, we may never get the * link to come up, so omit it in this test. * * Also, it seems that with a loopback cable, DCD isn't asserted, * so just check for things like this: * DSR _must_ be asserted. * One of DCD or CTS must be asserted. */ /* LMC 1000 (SSI) LED definitions * led0 Green = power to adapter, Gate Array loaded & * driver attached * led1 Green = DSR and DTR and RTS and CTS are set * led2 Green = Cable detected * led3 red = No timing is available from the * cable or the on-board frequency * generator. */ link_status = lmc_mii_readreg (sc, 0, 16); /* Is the transmit clock still available */ ticks = LMC_CSR_READ (sc, csr_gp_timer); ticks = 0x0000ffff - (ticks & 0x0000ffff); lmc_led_on (sc, LMC_MII16_LED0); /* ====== transmit clock determination ===== */ if (sc->lmc_timing == LMC_CTL_CLOCK_SOURCE_INT) { lmc_led_off(sc, LMC_MII16_LED3); } else if (ticks == 0 ) { /* no clock found ? */ ret = 0; if (sc->last_led_err[3] != 1) { sc->extra_stats.tx_lossOfClockCnt++; printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name); } sc->last_led_err[3] = 1; lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */ } else { if(sc->last_led_err[3] == 1) printk(KERN_WARNING "%s: Clock Returned\n", sc->name); sc->last_led_err[3] = 0; lmc_led_off (sc, LMC_MII16_LED3); /* turn OFF red LED */ } if ((link_status & LMC_MII16_SSI_DSR) == 0) { /* Also HSSI CA */ ret = 0; hw_hdsk = 0; } #ifdef CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE if ((link_status & (LMC_MII16_SSI_CTS | LMC_MII16_SSI_DCD)) == 0){ ret = 0; hw_hdsk = 0; } #endif if(hw_hdsk == 0){ if(sc->last_led_err[1] != 1) printk(KERN_WARNING "%s: DSR not asserted\n", sc->name); sc->last_led_err[1] = 1; lmc_led_off(sc, LMC_MII16_LED1); } else { if(sc->last_led_err[1] != 0) printk(KERN_WARNING "%s: DSR now asserted\n", sc->name); sc->last_led_err[1] = 0; lmc_led_on(sc, LMC_MII16_LED1); } if(ret == 1) { lmc_led_on(sc, LMC_MII16_LED2); /* Over all good status? */ } return ret; } static void lmc_ssi_set_link_status (lmc_softc_t * const sc, int state) { if (state == LMC_LINK_UP) { sc->lmc_miireg16 |= (LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS); printk (LMC_PRINTF_FMT ": asserting DTR and RTS\n", LMC_PRINTF_ARGS); } else { sc->lmc_miireg16 &= ~(LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS); printk (LMC_PRINTF_FMT ": deasserting DTR and RTS\n", LMC_PRINTF_ARGS); } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } /* * 0 == 16bit, 1 == 32bit */ static void lmc_ssi_set_crc_length (lmc_softc_t * const sc, int state) { if (state == LMC_CTL_CRC_LENGTH_32) { /* 32 bit */ sc->lmc_miireg16 |= LMC_MII16_SSI_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4; } else { /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_SSI_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2; } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } /* * These are bits to program the ssi frequency generator */ static inline void write_av9110_bit (lmc_softc_t * sc, int c) { /* * set the data bit as we need it. */ sc->lmc_gpio &= ~(LMC_GEP_CLK); if (c & 0x01) sc->lmc_gpio |= LMC_GEP_DATA; else sc->lmc_gpio &= ~(LMC_GEP_DATA); LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); /* * set the clock to high */ sc->lmc_gpio |= LMC_GEP_CLK; LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); /* * set the clock to low again. */ sc->lmc_gpio &= ~(LMC_GEP_CLK); LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); } static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r) { int i; #if 0 printk (LMC_PRINTF_FMT ": speed %u, %d %d %d %d %d\n", LMC_PRINTF_ARGS, sc->ictl.clock_rate, n, m, v, x, r); #endif sc->lmc_gpio |= LMC_GEP_SSI_GENERATOR; sc->lmc_gpio &= ~(LMC_GEP_DATA | LMC_GEP_CLK); LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); /* * Set the TXCLOCK, GENERATOR, SERIAL, and SERIALCLK * as outputs. */ lmc_gpio_mkoutput (sc, (LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_SSI_GENERATOR)); sc->lmc_gpio &= ~(LMC_GEP_SSI_GENERATOR); LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); /* * a shifting we will go... */ for (i = 0; i < 7; i++) write_av9110_bit (sc, n >> i); for (i = 0; i < 7; i++) write_av9110_bit (sc, m >> i); for (i = 0; i < 1; i++) write_av9110_bit (sc, v >> i); for (i = 0; i < 2; i++) write_av9110_bit (sc, x >> i); for (i = 0; i < 2; i++) write_av9110_bit (sc, r >> i); for (i = 0; i < 5; i++) write_av9110_bit (sc, 0x17 >> i); /* * stop driving serial-related signals */ lmc_gpio_mkinput (sc, (LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_SSI_GENERATOR)); } static void lmc_ssi_watchdog(lmc_softc_t * const sc) { u16 mii17 = lmc_mii_readreg(sc, 0, 17); if (((mii17 >> 3) & 7) == 7) lmc_led_off(sc, LMC_MII16_LED2); else lmc_led_on(sc, LMC_MII16_LED2); } /* * T1 methods */ /* * The framer regs are multiplexed through MII regs 17 & 18 * write the register address to MII reg 17 and the * data to MII reg 18. */ static void lmc_t1_write (lmc_softc_t * const sc, int a, int d) { lmc_mii_writereg (sc, 0, 17, a); lmc_mii_writereg (sc, 0, 18, d); } /* Save a warning static int lmc_t1_read (lmc_softc_t * const sc, int a) { lmc_mii_writereg (sc, 0, 17, a); return lmc_mii_readreg (sc, 0, 18); } */ static void lmc_t1_init (lmc_softc_t * const sc) { u16 mii16; int i; sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200; mii16 = lmc_mii_readreg (sc, 0, 16); /* reset 8370 */ mii16 &= ~LMC_MII16_T1_RST; lmc_mii_writereg (sc, 0, 16, mii16 | LMC_MII16_T1_RST); lmc_mii_writereg (sc, 0, 16, mii16); /* set T1 or E1 line. Uses sc->lmcmii16 reg in function so update it */ sc->lmc_miireg16 = mii16; lmc_t1_set_circuit_type(sc, LMC_CTL_CIRCUIT_TYPE_T1); mii16 = sc->lmc_miireg16; lmc_t1_write (sc, 0x01, 0x1B); /* CR0 - primary control */ lmc_t1_write (sc, 0x02, 0x42); /* JAT_CR - jitter atten config */ lmc_t1_write (sc, 0x14, 0x00); /* LOOP - loopback config */ lmc_t1_write (sc, 0x15, 0x00); /* DL3_TS - external data link timeslot */ lmc_t1_write (sc, 0x18, 0xFF); /* PIO - programmable I/O */ lmc_t1_write (sc, 0x19, 0x30); /* POE - programmable OE */ lmc_t1_write (sc, 0x1A, 0x0F); /* CMUX - clock input mux */ lmc_t1_write (sc, 0x20, 0x41); /* LIU_CR - RX LIU config */ lmc_t1_write (sc, 0x22, 0x76); /* RLIU_CR - RX LIU config */ lmc_t1_write (sc, 0x40, 0x03); /* RCR0 - RX config */ lmc_t1_write (sc, 0x45, 0x00); /* RALM - RX alarm config */ lmc_t1_write (sc, 0x46, 0x05); /* LATCH - RX alarm/err/cntr latch */ lmc_t1_write (sc, 0x68, 0x40); /* TLIU_CR - TX LIU config */ lmc_t1_write (sc, 0x70, 0x0D); /* TCR0 - TX framer config */ lmc_t1_write (sc, 0x71, 0x05); /* TCR1 - TX config */ lmc_t1_write (sc, 0x72, 0x0B); /* TFRM - TX frame format */ lmc_t1_write (sc, 0x73, 0x00); /* TERROR - TX error insert */ lmc_t1_write (sc, 0x74, 0x00); /* TMAN - TX manual Sa/FEBE config */ lmc_t1_write (sc, 0x75, 0x00); /* TALM - TX alarm signal config */ lmc_t1_write (sc, 0x76, 0x00); /* TPATT - TX test pattern config */ lmc_t1_write (sc, 0x77, 0x00); /* TLB - TX inband loopback config */ lmc_t1_write (sc, 0x90, 0x05); /* CLAD_CR - clock rate adapter config */ lmc_t1_write (sc, 0x91, 0x05); /* CSEL - clad freq sel */ lmc_t1_write (sc, 0xA6, 0x00); /* DL1_CTL - DL1 control */ lmc_t1_write (sc, 0xB1, 0x00); /* DL2_CTL - DL2 control */ lmc_t1_write (sc, 0xD0, 0x47); /* SBI_CR - sys bus iface config */ lmc_t1_write (sc, 0xD1, 0x70); /* RSB_CR - RX sys bus config */ lmc_t1_write (sc, 0xD4, 0x30); /* TSB_CR - TX sys bus config */ for (i = 0; i < 32; i++) { lmc_t1_write (sc, 0x0E0 + i, 0x00); /* SBCn - sys bus per-channel ctl */ lmc_t1_write (sc, 0x100 + i, 0x00); /* TPCn - TX per-channel ctl */ lmc_t1_write (sc, 0x180 + i, 0x00); /* RPCn - RX per-channel ctl */ } for (i = 1; i < 25; i++) { lmc_t1_write (sc, 0x0E0 + i, 0x0D); /* SBCn - sys bus per-channel ctl */ } mii16 |= LMC_MII16_T1_XOE; lmc_mii_writereg (sc, 0, 16, mii16); sc->lmc_miireg16 = mii16; } static void lmc_t1_default (lmc_softc_t * const sc) { sc->lmc_miireg16 = LMC_MII16_LED_ALL; sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1); sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); /* Right now we can only clock from out internal source */ sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; } /* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed. */ static void lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) { if (ctl == NULL) { sc->lmc_media->set_circuit_type (sc, sc->ictl.circuit_type); lmc_set_protocol (sc, NULL); return; } /* * check for change in circuit type */ if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_T1 && sc->ictl.circuit_type == LMC_CTL_CIRCUIT_TYPE_E1) sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_E1); else if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_E1 && sc->ictl.circuit_type == LMC_CTL_CIRCUIT_TYPE_T1) sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1); lmc_set_protocol (sc, ctl); } /* * return hardware link status. * 0 == link is down, 1 == link is up. */ static int lmc_t1_get_link_status (lmc_softc_t * const sc) { u16 link_status; int ret = 1; /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions * led0 yellow = far-end adapter is in Red alarm condition * led1 blue = received an Alarm Indication signal * (upstream failure) * led2 Green = power to adapter, Gate Array loaded & driver * attached * led3 red = Loss of Signal (LOS) or out of frame (OOF) * conditions detected on T3 receive signal */ lmc_trace(sc->lmc_device, "lmc_t1_get_link_status in"); lmc_led_on(sc, LMC_DS3_LED2); lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM1_STATUS); link_status = lmc_mii_readreg (sc, 0, 18); if (link_status & T1F_RAIS) { /* turn on blue LED */ ret = 0; if(sc->last_led_err[1] != 1){ printk(KERN_WARNING "%s: Receive AIS/Blue Alarm. Far end in RED alarm\n", sc->name); } lmc_led_on(sc, LMC_DS3_LED1); sc->last_led_err[1] = 1; } else { if(sc->last_led_err[1] != 0){ printk(KERN_WARNING "%s: End AIS/Blue Alarm\n", sc->name); } lmc_led_off (sc, LMC_DS3_LED1); sc->last_led_err[1] = 0; } /* * Yellow Alarm is nasty evil stuff, looks at data patterns * inside the channel and confuses it with HDLC framing * ignore all yellow alarms. * * Do listen to MultiFrame Yellow alarm which while implemented * different ways isn't in the channel and hence somewhat * more reliable */ if (link_status & T1F_RMYEL) { ret = 0; if(sc->last_led_err[0] != 1){ printk(KERN_WARNING "%s: Receive Yellow AIS Alarm\n", sc->name); } lmc_led_on(sc, LMC_DS3_LED0); sc->last_led_err[0] = 1; } else { if(sc->last_led_err[0] != 0){ printk(KERN_WARNING "%s: End of Yellow AIS Alarm\n", sc->name); } lmc_led_off(sc, LMC_DS3_LED0); sc->last_led_err[0] = 0; } /* * Loss of signal and los of frame * Use the green bit to identify which one lit the led */ if(link_status & T1F_RLOF){ ret = 0; if(sc->last_led_err[3] != 1){ printk(KERN_WARNING "%s: Local Red Alarm: Loss of Framing\n", sc->name); } lmc_led_on(sc, LMC_DS3_LED3); sc->last_led_err[3] = 1; } else { if(sc->last_led_err[3] != 0){ printk(KERN_WARNING "%s: End Red Alarm (LOF)\n", sc->name); } if( ! (link_status & T1F_RLOS)) lmc_led_off(sc, LMC_DS3_LED3); sc->last_led_err[3] = 0; } if(link_status & T1F_RLOS){ ret = 0; if(sc->last_led_err[2] != 1){ printk(KERN_WARNING "%s: Local Red Alarm: Loss of Signal\n", sc->name); } lmc_led_on(sc, LMC_DS3_LED3); sc->last_led_err[2] = 1; } else { if(sc->last_led_err[2] != 0){ printk(KERN_WARNING "%s: End Red Alarm (LOS)\n", sc->name); } if( ! (link_status & T1F_RLOF)) lmc_led_off(sc, LMC_DS3_LED3); sc->last_led_err[2] = 0; } sc->lmc_xinfo.t1_alarm1_status = link_status; lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM2_STATUS); sc->lmc_xinfo.t1_alarm2_status = lmc_mii_readreg (sc, 0, 18); lmc_trace(sc->lmc_device, "lmc_t1_get_link_status out"); return ret; } /* * 1 == T1 Circuit Type , 0 == E1 Circuit Type */ static void lmc_t1_set_circuit_type (lmc_softc_t * const sc, int ie) { if (ie == LMC_CTL_CIRCUIT_TYPE_T1) { sc->lmc_miireg16 |= LMC_MII16_T1_Z; sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_T1; printk(KERN_INFO "%s: In T1 Mode\n", sc->name); } else { sc->lmc_miireg16 &= ~LMC_MII16_T1_Z; sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_E1; printk(KERN_INFO "%s: In E1 Mode\n", sc->name); } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } /* * 0 == 16bit, 1 == 32bit */ static void lmc_t1_set_crc_length (lmc_softc_t * const sc, int state) { if (state == LMC_CTL_CRC_LENGTH_32) { /* 32 bit */ sc->lmc_miireg16 |= LMC_MII16_T1_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4; } else { /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_T1_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2; } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } /* * 1 == internal, 0 == external */ static void lmc_t1_set_clock (lmc_softc_t * const sc, int ie) { int old; old = ie; if (ie == LMC_CTL_CLOCK_SOURCE_EXT) { sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK); LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT; if(old != ie) printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS); } else { sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK; LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; if(old != ie) printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS); } } static void lmc_t1_watchdog (lmc_softc_t * const sc) { } static void lmc_set_protocol (lmc_softc_t * const sc, lmc_ctl_t * ctl) { if (!ctl) sc->ictl.keepalive_onoff = LMC_CTL_ON; }
gpl-2.0
alinuredini/nova
drivers/net/wan/lmc/lmc_media.c
13679
34361
/* $Id: lmc_media.c,v 1.13 2000/04/11 05:25:26 asj Exp $ */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/if_arp.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/inet.h> #include <linux/bitops.h> #include <asm/processor.h> /* Processor type for cache alignment. */ #include <asm/io.h> #include <asm/dma.h> #include <asm/uaccess.h> #include "lmc.h" #include "lmc_var.h" #include "lmc_ioctl.h" #include "lmc_debug.h" #define CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE 1 /* * Copyright (c) 1997-2000 LAN Media Corporation (LMC) * All rights reserved. www.lanmedia.com * * This code is written by: * Andrew Stanley-Jones (asj@cban.com) * Rob Braun (bbraun@vix.com), * Michael Graff (explorer@vix.com) and * Matt Thomas (matt@3am-software.com). * * This software may be used and distributed according to the terms * of the GNU General Public License version 2, incorporated herein by reference. */ /* * protocol independent method. */ static void lmc_set_protocol (lmc_softc_t * const, lmc_ctl_t *); /* * media independent methods to check on media status, link, light LEDs, * etc. */ static void lmc_ds3_init (lmc_softc_t * const); static void lmc_ds3_default (lmc_softc_t * const); static void lmc_ds3_set_status (lmc_softc_t * const, lmc_ctl_t *); static void lmc_ds3_set_100ft (lmc_softc_t * const, int); static int lmc_ds3_get_link_status (lmc_softc_t * const); static void lmc_ds3_set_crc_length (lmc_softc_t * const, int); static void lmc_ds3_set_scram (lmc_softc_t * const, int); static void lmc_ds3_watchdog (lmc_softc_t * const); static void lmc_hssi_init (lmc_softc_t * const); static void lmc_hssi_default (lmc_softc_t * const); static void lmc_hssi_set_status (lmc_softc_t * const, lmc_ctl_t *); static void lmc_hssi_set_clock (lmc_softc_t * const, int); static int lmc_hssi_get_link_status (lmc_softc_t * const); static void lmc_hssi_set_link_status (lmc_softc_t * const, int); static void lmc_hssi_set_crc_length (lmc_softc_t * const, int); static void lmc_hssi_watchdog (lmc_softc_t * const); static void lmc_ssi_init (lmc_softc_t * const); static void lmc_ssi_default (lmc_softc_t * const); static void lmc_ssi_set_status (lmc_softc_t * const, lmc_ctl_t *); static void lmc_ssi_set_clock (lmc_softc_t * const, int); static void lmc_ssi_set_speed (lmc_softc_t * const, lmc_ctl_t *); static int lmc_ssi_get_link_status (lmc_softc_t * const); static void lmc_ssi_set_link_status (lmc_softc_t * const, int); static void lmc_ssi_set_crc_length (lmc_softc_t * const, int); static void lmc_ssi_watchdog (lmc_softc_t * const); static void lmc_t1_init (lmc_softc_t * const); static void lmc_t1_default (lmc_softc_t * const); static void lmc_t1_set_status (lmc_softc_t * const, lmc_ctl_t *); static int lmc_t1_get_link_status (lmc_softc_t * const); static void lmc_t1_set_circuit_type (lmc_softc_t * const, int); static void lmc_t1_set_crc_length (lmc_softc_t * const, int); static void lmc_t1_set_clock (lmc_softc_t * const, int); static void lmc_t1_watchdog (lmc_softc_t * const); static void lmc_dummy_set_1 (lmc_softc_t * const, int); static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *); static inline void write_av9110_bit (lmc_softc_t *, int); static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32); lmc_media_t lmc_ds3_media = { lmc_ds3_init, /* special media init stuff */ lmc_ds3_default, /* reset to default state */ lmc_ds3_set_status, /* reset status to state provided */ lmc_dummy_set_1, /* set clock source */ lmc_dummy_set2_1, /* set line speed */ lmc_ds3_set_100ft, /* set cable length */ lmc_ds3_set_scram, /* set scrambler */ lmc_ds3_get_link_status, /* get link status */ lmc_dummy_set_1, /* set link status */ lmc_ds3_set_crc_length, /* set CRC length */ lmc_dummy_set_1, /* set T1 or E1 circuit type */ lmc_ds3_watchdog }; lmc_media_t lmc_hssi_media = { lmc_hssi_init, /* special media init stuff */ lmc_hssi_default, /* reset to default state */ lmc_hssi_set_status, /* reset status to state provided */ lmc_hssi_set_clock, /* set clock source */ lmc_dummy_set2_1, /* set line speed */ lmc_dummy_set_1, /* set cable length */ lmc_dummy_set_1, /* set scrambler */ lmc_hssi_get_link_status, /* get link status */ lmc_hssi_set_link_status, /* set link status */ lmc_hssi_set_crc_length, /* set CRC length */ lmc_dummy_set_1, /* set T1 or E1 circuit type */ lmc_hssi_watchdog }; lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */ lmc_ssi_default, /* reset to default state */ lmc_ssi_set_status, /* reset status to state provided */ lmc_ssi_set_clock, /* set clock source */ lmc_ssi_set_speed, /* set line speed */ lmc_dummy_set_1, /* set cable length */ lmc_dummy_set_1, /* set scrambler */ lmc_ssi_get_link_status, /* get link status */ lmc_ssi_set_link_status, /* set link status */ lmc_ssi_set_crc_length, /* set CRC length */ lmc_dummy_set_1, /* set T1 or E1 circuit type */ lmc_ssi_watchdog }; lmc_media_t lmc_t1_media = { lmc_t1_init, /* special media init stuff */ lmc_t1_default, /* reset to default state */ lmc_t1_set_status, /* reset status to state provided */ lmc_t1_set_clock, /* set clock source */ lmc_dummy_set2_1, /* set line speed */ lmc_dummy_set_1, /* set cable length */ lmc_dummy_set_1, /* set scrambler */ lmc_t1_get_link_status, /* get link status */ lmc_dummy_set_1, /* set link status */ lmc_t1_set_crc_length, /* set CRC length */ lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */ lmc_t1_watchdog }; static void lmc_dummy_set_1 (lmc_softc_t * const sc, int a) { } static void lmc_dummy_set2_1 (lmc_softc_t * const sc, lmc_ctl_t * a) { } /* * HSSI methods */ static void lmc_hssi_init (lmc_softc_t * const sc) { sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5200; lmc_gpio_mkoutput (sc, LMC_GEP_HSSI_CLOCK); } static void lmc_hssi_default (lmc_softc_t * const sc) { sc->lmc_miireg16 = LMC_MII16_LED_ALL; sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); } /* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed. */ static void lmc_hssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) { if (ctl == NULL) { sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source); lmc_set_protocol (sc, NULL); return; } /* * check for change in clock source */ if (ctl->clock_source && !sc->ictl.clock_source) { sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT); sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT; } else if (!ctl->clock_source && sc->ictl.clock_source) { sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); } lmc_set_protocol (sc, ctl); } /* * 1 == internal, 0 == external */ static void lmc_hssi_set_clock (lmc_softc_t * const sc, int ie) { int old; old = sc->ictl.clock_source; if (ie == LMC_CTL_CLOCK_SOURCE_EXT) { sc->lmc_gpio |= LMC_GEP_HSSI_CLOCK; LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT; if(old != ie) printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS); } else { sc->lmc_gpio &= ~(LMC_GEP_HSSI_CLOCK); LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; if(old != ie) printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS); } } /* * return hardware link status. * 0 == link is down, 1 == link is up. */ static int lmc_hssi_get_link_status (lmc_softc_t * const sc) { /* * We're using the same code as SSI since * they're practically the same */ return lmc_ssi_get_link_status(sc); } static void lmc_hssi_set_link_status (lmc_softc_t * const sc, int state) { if (state == LMC_LINK_UP) sc->lmc_miireg16 |= LMC_MII16_HSSI_TA; else sc->lmc_miireg16 &= ~LMC_MII16_HSSI_TA; lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } /* * 0 == 16bit, 1 == 32bit */ static void lmc_hssi_set_crc_length (lmc_softc_t * const sc, int state) { if (state == LMC_CTL_CRC_LENGTH_32) { /* 32 bit */ sc->lmc_miireg16 |= LMC_MII16_HSSI_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; } else { /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_HSSI_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } static void lmc_hssi_watchdog (lmc_softc_t * const sc) { /* HSSI is blank */ } /* * DS3 methods */ /* * Set cable length */ static void lmc_ds3_set_100ft (lmc_softc_t * const sc, int ie) { if (ie == LMC_CTL_CABLE_LENGTH_GT_100FT) { sc->lmc_miireg16 &= ~LMC_MII16_DS3_ZERO; sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_GT_100FT; } else if (ie == LMC_CTL_CABLE_LENGTH_LT_100FT) { sc->lmc_miireg16 |= LMC_MII16_DS3_ZERO; sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_LT_100FT; } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } static void lmc_ds3_default (lmc_softc_t * const sc) { sc->lmc_miireg16 = LMC_MII16_LED_ALL; sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); sc->lmc_media->set_cable_length (sc, LMC_CTL_CABLE_LENGTH_LT_100FT); sc->lmc_media->set_scrambler (sc, LMC_CTL_OFF); sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); } /* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed. */ static void lmc_ds3_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) { if (ctl == NULL) { sc->lmc_media->set_cable_length (sc, sc->ictl.cable_length); sc->lmc_media->set_scrambler (sc, sc->ictl.scrambler_onoff); lmc_set_protocol (sc, NULL); return; } /* * check for change in cable length setting */ if (ctl->cable_length && !sc->ictl.cable_length) lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_GT_100FT); else if (!ctl->cable_length && sc->ictl.cable_length) lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_LT_100FT); /* * Check for change in scrambler setting (requires reset) */ if (ctl->scrambler_onoff && !sc->ictl.scrambler_onoff) lmc_ds3_set_scram (sc, LMC_CTL_ON); else if (!ctl->scrambler_onoff && sc->ictl.scrambler_onoff) lmc_ds3_set_scram (sc, LMC_CTL_OFF); lmc_set_protocol (sc, ctl); } static void lmc_ds3_init (lmc_softc_t * const sc) { int i; sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5245; /* writes zeros everywhere */ for (i = 0; i < 21; i++) { lmc_mii_writereg (sc, 0, 17, i); lmc_mii_writereg (sc, 0, 18, 0); } /* set some essential bits */ lmc_mii_writereg (sc, 0, 17, 1); lmc_mii_writereg (sc, 0, 18, 0x25); /* ser, xtx */ lmc_mii_writereg (sc, 0, 17, 5); lmc_mii_writereg (sc, 0, 18, 0x80); /* emode */ lmc_mii_writereg (sc, 0, 17, 14); lmc_mii_writereg (sc, 0, 18, 0x30); /* rcgen, tcgen */ /* clear counters and latched bits */ for (i = 0; i < 21; i++) { lmc_mii_writereg (sc, 0, 17, i); lmc_mii_readreg (sc, 0, 18); } } /* * 1 == DS3 payload scrambled, 0 == not scrambled */ static void lmc_ds3_set_scram (lmc_softc_t * const sc, int ie) { if (ie == LMC_CTL_ON) { sc->lmc_miireg16 |= LMC_MII16_DS3_SCRAM; sc->ictl.scrambler_onoff = LMC_CTL_ON; } else { sc->lmc_miireg16 &= ~LMC_MII16_DS3_SCRAM; sc->ictl.scrambler_onoff = LMC_CTL_OFF; } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } /* * return hardware link status. * 0 == link is down, 1 == link is up. */ static int lmc_ds3_get_link_status (lmc_softc_t * const sc) { u16 link_status, link_status_11; int ret = 1; lmc_mii_writereg (sc, 0, 17, 7); link_status = lmc_mii_readreg (sc, 0, 18); /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions * led0 yellow = far-end adapter is in Red alarm condition * led1 blue = received an Alarm Indication signal * (upstream failure) * led2 Green = power to adapter, Gate Array loaded & driver * attached * led3 red = Loss of Signal (LOS) or out of frame (OOF) * conditions detected on T3 receive signal */ lmc_led_on(sc, LMC_DS3_LED2); if ((link_status & LMC_FRAMER_REG0_DLOS) || (link_status & LMC_FRAMER_REG0_OOFS)){ ret = 0; if(sc->last_led_err[3] != 1){ u16 r1; lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */ r1 = lmc_mii_readreg (sc, 0, 18); r1 &= 0xfe; lmc_mii_writereg(sc, 0, 18, r1); printk(KERN_WARNING "%s: Red Alarm - Loss of Signal or Loss of Framing\n", sc->name); } lmc_led_on(sc, LMC_DS3_LED3); /* turn on red LED */ sc->last_led_err[3] = 1; } else { lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */ if(sc->last_led_err[3] == 1){ u16 r1; lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */ r1 = lmc_mii_readreg (sc, 0, 18); r1 |= 0x01; lmc_mii_writereg(sc, 0, 18, r1); } sc->last_led_err[3] = 0; } lmc_mii_writereg(sc, 0, 17, 0x10); link_status_11 = lmc_mii_readreg(sc, 0, 18); if((link_status & LMC_FRAMER_REG0_AIS) || (link_status_11 & LMC_FRAMER_REG10_XBIT)) { ret = 0; if(sc->last_led_err[0] != 1){ printk(KERN_WARNING "%s: AIS Alarm or XBit Error\n", sc->name); printk(KERN_WARNING "%s: Remote end has loss of signal or framing\n", sc->name); } lmc_led_on(sc, LMC_DS3_LED0); sc->last_led_err[0] = 1; } else { lmc_led_off(sc, LMC_DS3_LED0); sc->last_led_err[0] = 0; } lmc_mii_writereg (sc, 0, 17, 9); link_status = lmc_mii_readreg (sc, 0, 18); if(link_status & LMC_FRAMER_REG9_RBLUE){ ret = 0; if(sc->last_led_err[1] != 1){ printk(KERN_WARNING "%s: Blue Alarm - Receiving all 1's\n", sc->name); } lmc_led_on(sc, LMC_DS3_LED1); sc->last_led_err[1] = 1; } else { lmc_led_off(sc, LMC_DS3_LED1); sc->last_led_err[1] = 0; } return ret; } /* * 0 == 16bit, 1 == 32bit */ static void lmc_ds3_set_crc_length (lmc_softc_t * const sc, int state) { if (state == LMC_CTL_CRC_LENGTH_32) { /* 32 bit */ sc->lmc_miireg16 |= LMC_MII16_DS3_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; } else { /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_DS3_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } static void lmc_ds3_watchdog (lmc_softc_t * const sc) { } /* * SSI methods */ static void lmc_ssi_init(lmc_softc_t * const sc) { u16 mii17; int cable; sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000; mii17 = lmc_mii_readreg(sc, 0, 17); cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT; sc->ictl.cable_type = cable; lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK); } static void lmc_ssi_default (lmc_softc_t * const sc) { sc->lmc_miireg16 = LMC_MII16_LED_ALL; /* * make TXCLOCK always be an output */ lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK); sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); sc->lmc_media->set_speed (sc, NULL); sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); } /* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed. */ static void lmc_ssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) { if (ctl == NULL) { sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source); sc->lmc_media->set_speed (sc, &sc->ictl); lmc_set_protocol (sc, NULL); return; } /* * check for change in clock source */ if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_INT && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_EXT) { sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT); sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT; } else if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_EXT && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_INT) { sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; } if (ctl->clock_rate != sc->ictl.clock_rate) sc->lmc_media->set_speed (sc, ctl); lmc_set_protocol (sc, ctl); } /* * 1 == internal, 0 == external */ static void lmc_ssi_set_clock (lmc_softc_t * const sc, int ie) { int old; old = ie; if (ie == LMC_CTL_CLOCK_SOURCE_EXT) { sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK); LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT; if(ie != old) printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS); } else { sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK; LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; if(ie != old) printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS); } } static void lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl) { lmc_ctl_t *ictl = &sc->ictl; lmc_av9110_t *av; /* original settings for clock rate of: * 100 Khz (8,25,0,0,2) were incorrect * they should have been 80,125,1,3,3 * There are 17 param combinations to produce this freq. * For 1.5 Mhz use 120,100,1,1,2 (226 param. combinations) */ if (ctl == NULL) { av = &ictl->cardspec.ssi; ictl->clock_rate = 1500000; av->f = ictl->clock_rate; av->n = 120; av->m = 100; av->v = 1; av->x = 1; av->r = 2; write_av9110 (sc, av->n, av->m, av->v, av->x, av->r); return; } av = &ctl->cardspec.ssi; if (av->f == 0) return; ictl->clock_rate = av->f; /* really, this is the rate we are */ ictl->cardspec.ssi = *av; write_av9110 (sc, av->n, av->m, av->v, av->x, av->r); } /* * return hardware link status. * 0 == link is down, 1 == link is up. */ static int lmc_ssi_get_link_status (lmc_softc_t * const sc) { u16 link_status; u32 ticks; int ret = 1; int hw_hdsk = 1; /* * missing CTS? Hmm. If we require CTS on, we may never get the * link to come up, so omit it in this test. * * Also, it seems that with a loopback cable, DCD isn't asserted, * so just check for things like this: * DSR _must_ be asserted. * One of DCD or CTS must be asserted. */ /* LMC 1000 (SSI) LED definitions * led0 Green = power to adapter, Gate Array loaded & * driver attached * led1 Green = DSR and DTR and RTS and CTS are set * led2 Green = Cable detected * led3 red = No timing is available from the * cable or the on-board frequency * generator. */ link_status = lmc_mii_readreg (sc, 0, 16); /* Is the transmit clock still available */ ticks = LMC_CSR_READ (sc, csr_gp_timer); ticks = 0x0000ffff - (ticks & 0x0000ffff); lmc_led_on (sc, LMC_MII16_LED0); /* ====== transmit clock determination ===== */ if (sc->lmc_timing == LMC_CTL_CLOCK_SOURCE_INT) { lmc_led_off(sc, LMC_MII16_LED3); } else if (ticks == 0 ) { /* no clock found ? */ ret = 0; if (sc->last_led_err[3] != 1) { sc->extra_stats.tx_lossOfClockCnt++; printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name); } sc->last_led_err[3] = 1; lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */ } else { if(sc->last_led_err[3] == 1) printk(KERN_WARNING "%s: Clock Returned\n", sc->name); sc->last_led_err[3] = 0; lmc_led_off (sc, LMC_MII16_LED3); /* turn OFF red LED */ } if ((link_status & LMC_MII16_SSI_DSR) == 0) { /* Also HSSI CA */ ret = 0; hw_hdsk = 0; } #ifdef CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE if ((link_status & (LMC_MII16_SSI_CTS | LMC_MII16_SSI_DCD)) == 0){ ret = 0; hw_hdsk = 0; } #endif if(hw_hdsk == 0){ if(sc->last_led_err[1] != 1) printk(KERN_WARNING "%s: DSR not asserted\n", sc->name); sc->last_led_err[1] = 1; lmc_led_off(sc, LMC_MII16_LED1); } else { if(sc->last_led_err[1] != 0) printk(KERN_WARNING "%s: DSR now asserted\n", sc->name); sc->last_led_err[1] = 0; lmc_led_on(sc, LMC_MII16_LED1); } if(ret == 1) { lmc_led_on(sc, LMC_MII16_LED2); /* Over all good status? */ } return ret; } static void lmc_ssi_set_link_status (lmc_softc_t * const sc, int state) { if (state == LMC_LINK_UP) { sc->lmc_miireg16 |= (LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS); printk (LMC_PRINTF_FMT ": asserting DTR and RTS\n", LMC_PRINTF_ARGS); } else { sc->lmc_miireg16 &= ~(LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS); printk (LMC_PRINTF_FMT ": deasserting DTR and RTS\n", LMC_PRINTF_ARGS); } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } /* * 0 == 16bit, 1 == 32bit */ static void lmc_ssi_set_crc_length (lmc_softc_t * const sc, int state) { if (state == LMC_CTL_CRC_LENGTH_32) { /* 32 bit */ sc->lmc_miireg16 |= LMC_MII16_SSI_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4; } else { /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_SSI_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2; } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } /* * These are bits to program the ssi frequency generator */ static inline void write_av9110_bit (lmc_softc_t * sc, int c) { /* * set the data bit as we need it. */ sc->lmc_gpio &= ~(LMC_GEP_CLK); if (c & 0x01) sc->lmc_gpio |= LMC_GEP_DATA; else sc->lmc_gpio &= ~(LMC_GEP_DATA); LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); /* * set the clock to high */ sc->lmc_gpio |= LMC_GEP_CLK; LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); /* * set the clock to low again. */ sc->lmc_gpio &= ~(LMC_GEP_CLK); LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); } static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r) { int i; #if 0 printk (LMC_PRINTF_FMT ": speed %u, %d %d %d %d %d\n", LMC_PRINTF_ARGS, sc->ictl.clock_rate, n, m, v, x, r); #endif sc->lmc_gpio |= LMC_GEP_SSI_GENERATOR; sc->lmc_gpio &= ~(LMC_GEP_DATA | LMC_GEP_CLK); LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); /* * Set the TXCLOCK, GENERATOR, SERIAL, and SERIALCLK * as outputs. */ lmc_gpio_mkoutput (sc, (LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_SSI_GENERATOR)); sc->lmc_gpio &= ~(LMC_GEP_SSI_GENERATOR); LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); /* * a shifting we will go... */ for (i = 0; i < 7; i++) write_av9110_bit (sc, n >> i); for (i = 0; i < 7; i++) write_av9110_bit (sc, m >> i); for (i = 0; i < 1; i++) write_av9110_bit (sc, v >> i); for (i = 0; i < 2; i++) write_av9110_bit (sc, x >> i); for (i = 0; i < 2; i++) write_av9110_bit (sc, r >> i); for (i = 0; i < 5; i++) write_av9110_bit (sc, 0x17 >> i); /* * stop driving serial-related signals */ lmc_gpio_mkinput (sc, (LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_SSI_GENERATOR)); } static void lmc_ssi_watchdog(lmc_softc_t * const sc) { u16 mii17 = lmc_mii_readreg(sc, 0, 17); if (((mii17 >> 3) & 7) == 7) lmc_led_off(sc, LMC_MII16_LED2); else lmc_led_on(sc, LMC_MII16_LED2); } /* * T1 methods */ /* * The framer regs are multiplexed through MII regs 17 & 18 * write the register address to MII reg 17 and the * data to MII reg 18. */ static void lmc_t1_write (lmc_softc_t * const sc, int a, int d) { lmc_mii_writereg (sc, 0, 17, a); lmc_mii_writereg (sc, 0, 18, d); } /* Save a warning static int lmc_t1_read (lmc_softc_t * const sc, int a) { lmc_mii_writereg (sc, 0, 17, a); return lmc_mii_readreg (sc, 0, 18); } */ static void lmc_t1_init (lmc_softc_t * const sc) { u16 mii16; int i; sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200; mii16 = lmc_mii_readreg (sc, 0, 16); /* reset 8370 */ mii16 &= ~LMC_MII16_T1_RST; lmc_mii_writereg (sc, 0, 16, mii16 | LMC_MII16_T1_RST); lmc_mii_writereg (sc, 0, 16, mii16); /* set T1 or E1 line. Uses sc->lmcmii16 reg in function so update it */ sc->lmc_miireg16 = mii16; lmc_t1_set_circuit_type(sc, LMC_CTL_CIRCUIT_TYPE_T1); mii16 = sc->lmc_miireg16; lmc_t1_write (sc, 0x01, 0x1B); /* CR0 - primary control */ lmc_t1_write (sc, 0x02, 0x42); /* JAT_CR - jitter atten config */ lmc_t1_write (sc, 0x14, 0x00); /* LOOP - loopback config */ lmc_t1_write (sc, 0x15, 0x00); /* DL3_TS - external data link timeslot */ lmc_t1_write (sc, 0x18, 0xFF); /* PIO - programmable I/O */ lmc_t1_write (sc, 0x19, 0x30); /* POE - programmable OE */ lmc_t1_write (sc, 0x1A, 0x0F); /* CMUX - clock input mux */ lmc_t1_write (sc, 0x20, 0x41); /* LIU_CR - RX LIU config */ lmc_t1_write (sc, 0x22, 0x76); /* RLIU_CR - RX LIU config */ lmc_t1_write (sc, 0x40, 0x03); /* RCR0 - RX config */ lmc_t1_write (sc, 0x45, 0x00); /* RALM - RX alarm config */ lmc_t1_write (sc, 0x46, 0x05); /* LATCH - RX alarm/err/cntr latch */ lmc_t1_write (sc, 0x68, 0x40); /* TLIU_CR - TX LIU config */ lmc_t1_write (sc, 0x70, 0x0D); /* TCR0 - TX framer config */ lmc_t1_write (sc, 0x71, 0x05); /* TCR1 - TX config */ lmc_t1_write (sc, 0x72, 0x0B); /* TFRM - TX frame format */ lmc_t1_write (sc, 0x73, 0x00); /* TERROR - TX error insert */ lmc_t1_write (sc, 0x74, 0x00); /* TMAN - TX manual Sa/FEBE config */ lmc_t1_write (sc, 0x75, 0x00); /* TALM - TX alarm signal config */ lmc_t1_write (sc, 0x76, 0x00); /* TPATT - TX test pattern config */ lmc_t1_write (sc, 0x77, 0x00); /* TLB - TX inband loopback config */ lmc_t1_write (sc, 0x90, 0x05); /* CLAD_CR - clock rate adapter config */ lmc_t1_write (sc, 0x91, 0x05); /* CSEL - clad freq sel */ lmc_t1_write (sc, 0xA6, 0x00); /* DL1_CTL - DL1 control */ lmc_t1_write (sc, 0xB1, 0x00); /* DL2_CTL - DL2 control */ lmc_t1_write (sc, 0xD0, 0x47); /* SBI_CR - sys bus iface config */ lmc_t1_write (sc, 0xD1, 0x70); /* RSB_CR - RX sys bus config */ lmc_t1_write (sc, 0xD4, 0x30); /* TSB_CR - TX sys bus config */ for (i = 0; i < 32; i++) { lmc_t1_write (sc, 0x0E0 + i, 0x00); /* SBCn - sys bus per-channel ctl */ lmc_t1_write (sc, 0x100 + i, 0x00); /* TPCn - TX per-channel ctl */ lmc_t1_write (sc, 0x180 + i, 0x00); /* RPCn - RX per-channel ctl */ } for (i = 1; i < 25; i++) { lmc_t1_write (sc, 0x0E0 + i, 0x0D); /* SBCn - sys bus per-channel ctl */ } mii16 |= LMC_MII16_T1_XOE; lmc_mii_writereg (sc, 0, 16, mii16); sc->lmc_miireg16 = mii16; } static void lmc_t1_default (lmc_softc_t * const sc) { sc->lmc_miireg16 = LMC_MII16_LED_ALL; sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1); sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); /* Right now we can only clock from out internal source */ sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; } /* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed. */ static void lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) { if (ctl == NULL) { sc->lmc_media->set_circuit_type (sc, sc->ictl.circuit_type); lmc_set_protocol (sc, NULL); return; } /* * check for change in circuit type */ if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_T1 && sc->ictl.circuit_type == LMC_CTL_CIRCUIT_TYPE_E1) sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_E1); else if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_E1 && sc->ictl.circuit_type == LMC_CTL_CIRCUIT_TYPE_T1) sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1); lmc_set_protocol (sc, ctl); } /* * return hardware link status. * 0 == link is down, 1 == link is up. */ static int lmc_t1_get_link_status (lmc_softc_t * const sc) { u16 link_status; int ret = 1; /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions * led0 yellow = far-end adapter is in Red alarm condition * led1 blue = received an Alarm Indication signal * (upstream failure) * led2 Green = power to adapter, Gate Array loaded & driver * attached * led3 red = Loss of Signal (LOS) or out of frame (OOF) * conditions detected on T3 receive signal */ lmc_trace(sc->lmc_device, "lmc_t1_get_link_status in"); lmc_led_on(sc, LMC_DS3_LED2); lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM1_STATUS); link_status = lmc_mii_readreg (sc, 0, 18); if (link_status & T1F_RAIS) { /* turn on blue LED */ ret = 0; if(sc->last_led_err[1] != 1){ printk(KERN_WARNING "%s: Receive AIS/Blue Alarm. Far end in RED alarm\n", sc->name); } lmc_led_on(sc, LMC_DS3_LED1); sc->last_led_err[1] = 1; } else { if(sc->last_led_err[1] != 0){ printk(KERN_WARNING "%s: End AIS/Blue Alarm\n", sc->name); } lmc_led_off (sc, LMC_DS3_LED1); sc->last_led_err[1] = 0; } /* * Yellow Alarm is nasty evil stuff, looks at data patterns * inside the channel and confuses it with HDLC framing * ignore all yellow alarms. * * Do listen to MultiFrame Yellow alarm which while implemented * different ways isn't in the channel and hence somewhat * more reliable */ if (link_status & T1F_RMYEL) { ret = 0; if(sc->last_led_err[0] != 1){ printk(KERN_WARNING "%s: Receive Yellow AIS Alarm\n", sc->name); } lmc_led_on(sc, LMC_DS3_LED0); sc->last_led_err[0] = 1; } else { if(sc->last_led_err[0] != 0){ printk(KERN_WARNING "%s: End of Yellow AIS Alarm\n", sc->name); } lmc_led_off(sc, LMC_DS3_LED0); sc->last_led_err[0] = 0; } /* * Loss of signal and los of frame * Use the green bit to identify which one lit the led */ if(link_status & T1F_RLOF){ ret = 0; if(sc->last_led_err[3] != 1){ printk(KERN_WARNING "%s: Local Red Alarm: Loss of Framing\n", sc->name); } lmc_led_on(sc, LMC_DS3_LED3); sc->last_led_err[3] = 1; } else { if(sc->last_led_err[3] != 0){ printk(KERN_WARNING "%s: End Red Alarm (LOF)\n", sc->name); } if( ! (link_status & T1F_RLOS)) lmc_led_off(sc, LMC_DS3_LED3); sc->last_led_err[3] = 0; } if(link_status & T1F_RLOS){ ret = 0; if(sc->last_led_err[2] != 1){ printk(KERN_WARNING "%s: Local Red Alarm: Loss of Signal\n", sc->name); } lmc_led_on(sc, LMC_DS3_LED3); sc->last_led_err[2] = 1; } else { if(sc->last_led_err[2] != 0){ printk(KERN_WARNING "%s: End Red Alarm (LOS)\n", sc->name); } if( ! (link_status & T1F_RLOF)) lmc_led_off(sc, LMC_DS3_LED3); sc->last_led_err[2] = 0; } sc->lmc_xinfo.t1_alarm1_status = link_status; lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM2_STATUS); sc->lmc_xinfo.t1_alarm2_status = lmc_mii_readreg (sc, 0, 18); lmc_trace(sc->lmc_device, "lmc_t1_get_link_status out"); return ret; } /* * 1 == T1 Circuit Type , 0 == E1 Circuit Type */ static void lmc_t1_set_circuit_type (lmc_softc_t * const sc, int ie) { if (ie == LMC_CTL_CIRCUIT_TYPE_T1) { sc->lmc_miireg16 |= LMC_MII16_T1_Z; sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_T1; printk(KERN_INFO "%s: In T1 Mode\n", sc->name); } else { sc->lmc_miireg16 &= ~LMC_MII16_T1_Z; sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_E1; printk(KERN_INFO "%s: In E1 Mode\n", sc->name); } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } /* * 0 == 16bit, 1 == 32bit */ static void lmc_t1_set_crc_length (lmc_softc_t * const sc, int state) { if (state == LMC_CTL_CRC_LENGTH_32) { /* 32 bit */ sc->lmc_miireg16 |= LMC_MII16_T1_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4; } else { /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_T1_CRC; sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2; } lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); } /* * 1 == internal, 0 == external */ static void lmc_t1_set_clock (lmc_softc_t * const sc, int ie) { int old; old = ie; if (ie == LMC_CTL_CLOCK_SOURCE_EXT) { sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK); LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT; if(old != ie) printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS); } else { sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK; LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; if(old != ie) printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS); } } static void lmc_t1_watchdog (lmc_softc_t * const sc) { } static void lmc_set_protocol (lmc_softc_t * const sc, lmc_ctl_t * ctl) { if (!ctl) sc->ictl.keepalive_onoff = LMC_CTL_ON; }
gpl-2.0
OMFGB/htc-kernel-msm7x30_omfgb
drivers/video/msm/mddi_client_novb9f6_5560.c
112
8075
/* * Copyright (C) 2008 HTC Corporation. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/wakelock.h> #include <linux/slab.h> #include <mach/msm_fb.h> #include <mach/debug_display.h> static DECLARE_WAIT_QUEUE_HEAD(novtec_vsync_wait); struct panel_info { struct msm_mddi_client_data *client_data; struct platform_device pdev; struct msm_panel_data panel_data; struct msmfb_callback *novtec_callback; struct wake_lock idle_lock; int novtec_got_int; int vsync_gpio; }; static struct platform_device mddi_nov_cabc = { .name = "nov_cabc", .id = 0, }; static void novtec_request_vsync(struct msm_panel_data *panel_data, struct msmfb_callback *callback) { struct panel_info *panel = container_of(panel_data, struct panel_info, panel_data); struct msm_mddi_client_data *client_data = panel->client_data; panel->novtec_callback = callback; if (panel->novtec_got_int) { panel->novtec_got_int = 0; client_data->activate_link(client_data); } } static void novtec_clear_vsync(struct msm_panel_data *panel_data) { struct panel_info *panel = container_of(panel_data, struct panel_info, panel_data); struct msm_mddi_client_data *client_data = panel->client_data; client_data->activate_link(client_data); } static void novtec_wait_vsync(struct msm_panel_data *panel_data) { struct panel_info *panel = container_of(panel_data, struct panel_info, panel_data); struct msm_mddi_client_data *client_data = panel->client_data; if (panel->novtec_got_int) { panel->novtec_got_int = 0; client_data->activate_link(client_data); /* clears interrupt */ } if (wait_event_timeout(novtec_vsync_wait, panel->novtec_got_int, HZ/2) == 0) PR_DISP_ERR("timeout waiting for VSYNC\n"); panel->novtec_got_int = 0; /* interrupt clears when screen dma starts */ } static int novtec_suspend(struct msm_panel_data *panel_data) { struct panel_info *panel = container_of(panel_data, struct panel_info, panel_data); struct msm_mddi_client_data *client_data = panel->client_data; struct msm_mddi_bridge_platform_data *bridge_data = client_data->private_client_data; int ret; wake_lock(&panel->idle_lock); ret = bridge_data->uninit(bridge_data, client_data); wake_unlock(&panel->idle_lock); if (ret) { PR_DISP_INFO("mddi novtec client: non zero return from " "uninit\n"); return ret; } client_data->suspend(client_data); return 0; } static int novtec_resume(struct msm_panel_data *panel_data) { struct panel_info *panel = container_of(panel_data, struct panel_info, panel_data); struct msm_mddi_client_data *client_data = panel->client_data; struct msm_mddi_bridge_platform_data *bridge_data = client_data->private_client_data; int ret; wake_lock(&panel->idle_lock); client_data->resume(client_data); wake_unlock(&panel->idle_lock); ret = bridge_data->init(bridge_data, client_data); if (ret) return ret; return 0; } static int novtec_blank(struct msm_panel_data *panel_data) { struct panel_info *panel = container_of(panel_data, struct panel_info, panel_data); struct msm_mddi_client_data *client_data = panel->client_data; struct msm_mddi_bridge_platform_data *bridge_data = client_data->private_client_data; return bridge_data->blank(bridge_data, client_data); } static int novtec_unblank(struct msm_panel_data *panel_data) { struct panel_info *panel = container_of(panel_data, struct panel_info, panel_data); struct msm_mddi_client_data *client_data = panel->client_data; struct msm_mddi_bridge_platform_data *bridge_data = client_data->private_client_data; return bridge_data->unblank(bridge_data, client_data); } static int novtec_recover(struct msm_panel_data *panel_data) { struct panel_info *panel = container_of(panel_data, struct panel_info, panel_data); struct msm_mddi_client_data *client_data = panel->client_data; struct msm_mddi_bridge_platform_data *bridge_data = client_data->private_client_data; int ret; ret = bridge_data->init(bridge_data, client_data); if (ret) return ret; return 0; } static irqreturn_t novtec_vsync_interrupt(int irq, void *data) { struct panel_info *panel = data; panel->novtec_got_int = 1; if (panel->novtec_callback) { panel->novtec_callback->func(panel->novtec_callback); panel->novtec_callback = 0; } wake_up(&novtec_vsync_wait); return IRQ_HANDLED; } static int setup_vsync(struct panel_info *panel, int init) { int ret; int gpio = panel->vsync_gpio; unsigned int irq; if (!init) { ret = 0; goto uninit; } ret = gpio_request(gpio, "vsync"); if (ret) goto err_request_gpio_failed; ret = gpio_direction_input(gpio); if (ret) goto err_gpio_direction_input_failed; ret = irq = gpio_to_irq(gpio); if (ret < 0) goto err_get_irq_num_failed; register_gpio_int_mask(gpio, 1); ret = request_irq(irq, novtec_vsync_interrupt, IRQF_TRIGGER_RISING, "vsync", panel); if (ret) goto err_request_irq_failed; PR_DISP_INFO("vsync on gpio %d now %d\n", gpio, gpio_get_value(gpio)); return 0; uninit: free_irq(gpio_to_irq(gpio), panel); err_request_irq_failed: err_get_irq_num_failed: err_gpio_direction_input_failed: gpio_free(gpio); err_request_gpio_failed: return ret; } static int mddi_novtec_probe(struct platform_device *pdev) { int ret; struct msm_mddi_client_data *client_data = pdev->dev.platform_data; struct msm_mddi_bridge_platform_data *bridge_data = client_data->private_client_data; struct panel_data *panel_data = &bridge_data->panel_conf; struct panel_info *panel = kzalloc(sizeof(struct panel_info), GFP_KERNEL); if (!panel) return -ENOMEM; platform_set_drvdata(pdev, panel); PR_DISP_DEBUG("%s\n", __func__); if (panel_data->caps & MSMFB_CAP_CABC) { PR_DISP_INFO("CABC enabled\n"); mddi_nov_cabc.dev.platform_data = client_data; platform_device_register(&mddi_nov_cabc); } if (panel_data->vsync_gpio == 0) #if defined(CONFIG_ARCH_MSM7X30) panel->vsync_gpio = 30; #else panel->vsync_gpio = 98; #endif else panel->vsync_gpio = panel_data->vsync_gpio; ret = setup_vsync(panel, 1); if (ret) { dev_err(&pdev->dev, "mddi_bridge_setup_vsync failed\n"); return ret; } panel->client_data = client_data; panel->panel_data.suspend = novtec_suspend; panel->panel_data.resume = novtec_resume; panel->panel_data.wait_vsync = novtec_wait_vsync; panel->panel_data.request_vsync = novtec_request_vsync; panel->panel_data.clear_vsync = novtec_clear_vsync; panel->panel_data.blank = novtec_blank; panel->panel_data.unblank = novtec_unblank; panel->panel_data.fb_data = &bridge_data->fb_data; panel->panel_data.caps = MSMFB_CAP_PARTIAL_UPDATES; panel->panel_data.recover_vsync = novtec_recover; panel->pdev.name = "msm_panel"; panel->pdev.id = pdev->id; panel->pdev.resource = client_data->fb_resource; panel->pdev.num_resources = 1; panel->pdev.dev.platform_data = &panel->panel_data; platform_device_register(&panel->pdev); wake_lock_init(&panel->idle_lock, WAKE_LOCK_IDLE, "nov_idle_lock"); return 0; } static int mddi_novtec_remove(struct platform_device *pdev) { struct panel_info *panel = platform_get_drvdata(pdev); setup_vsync(panel, 0); kfree(panel); return 0; } static struct platform_driver mddi_client_d263_0000 = { .probe = mddi_novtec_probe, .remove = mddi_novtec_remove, .driver = { .name = "mddi_c_b9f6_5560" }, }; static int __init mddi_client_novtec_init(void) { platform_driver_register(&mddi_client_d263_0000); return 0; } module_init(mddi_client_novtec_init);
gpl-2.0
jthornber/linux-2.6
drivers/net/wireless/mwifiex/cmdevt.c
368
50237
/* * Marvell Wireless LAN device driver: commands and events * * Copyright (C) 2011-2014, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "main.h" #include "wmm.h" #include "11n.h" #include "11ac.h" /* * This function initializes a command node. * * The actual allocation of the node is not done by this function. It only * initiates a node by filling it with default parameters. Similarly, * allocation of the different buffers used (IOCTL buffer, data buffer) are * not done by this function either. */ static void mwifiex_init_cmd_node(struct mwifiex_private *priv, struct cmd_ctrl_node *cmd_node, u32 cmd_oid, void *data_buf, bool sync) { cmd_node->priv = priv; cmd_node->cmd_oid = cmd_oid; if (sync) { cmd_node->wait_q_enabled = true; cmd_node->cmd_wait_q_woken = false; cmd_node->condition = &cmd_node->cmd_wait_q_woken; } cmd_node->data_buf = data_buf; cmd_node->cmd_skb = cmd_node->skb; } /* * This function returns a command node from the free queue depending upon * availability. */ static struct cmd_ctrl_node * mwifiex_get_cmd_node(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node; unsigned long flags; spin_lock_irqsave(&adapter->cmd_free_q_lock, flags); if (list_empty(&adapter->cmd_free_q)) { dev_err(adapter->dev, "GET_CMD_NODE: cmd node not available\n"); spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags); return NULL; } cmd_node = list_first_entry(&adapter->cmd_free_q, struct cmd_ctrl_node, list); list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags); return cmd_node; } /* * This function cleans up a command node. * * The function resets the fields including the buffer pointers. * This function does not try to free the buffers. They must be * freed before calling this function. * * This function will however call the receive completion callback * in case a response buffer is still available before resetting * the pointer. */ static void mwifiex_clean_cmd_node(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node) { cmd_node->cmd_oid = 0; cmd_node->cmd_flag = 0; cmd_node->data_buf = NULL; cmd_node->wait_q_enabled = false; if (cmd_node->cmd_skb) skb_trim(cmd_node->cmd_skb, 0); if (cmd_node->resp_skb) { adapter->if_ops.cmdrsp_complete(adapter, cmd_node->resp_skb); cmd_node->resp_skb = NULL; } } /* * This function sends a host command to the firmware. * * The function copies the host command into the driver command * buffer, which will be transferred to the firmware later by the * main thread. */ static int mwifiex_cmd_host_cmd(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, struct mwifiex_ds_misc_cmd *pcmd_ptr) { /* Copy the HOST command to command buffer */ memcpy(cmd, pcmd_ptr->cmd, pcmd_ptr->len); dev_dbg(priv->adapter->dev, "cmd: host cmd size = %d\n", pcmd_ptr->len); return 0; } /* * This function downloads a command to the firmware. * * The function performs sanity tests, sets the command sequence * number and size, converts the header fields to CPU format before * sending. Afterwards, it logs the command ID and action for debugging * and sets up the command timeout timer. */ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, struct cmd_ctrl_node *cmd_node) { struct mwifiex_adapter *adapter = priv->adapter; int ret; struct host_cmd_ds_command *host_cmd; uint16_t cmd_code; uint16_t cmd_size; unsigned long flags; __le32 tmp; if (!adapter || !cmd_node) return -1; host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); /* Sanity test */ if (host_cmd == NULL || host_cmd->size == 0) { dev_err(adapter->dev, "DNLD_CMD: host_cmd is null" " or cmd size is 0, not sending\n"); if (cmd_node->wait_q_enabled) adapter->cmd_wait_q.status = -1; mwifiex_recycle_cmd_node(adapter, cmd_node); return -1; } cmd_code = le16_to_cpu(host_cmd->command); cmd_size = le16_to_cpu(host_cmd->size); if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET && cmd_code != HostCmd_CMD_FUNC_SHUTDOWN && cmd_code != HostCmd_CMD_FUNC_INIT) { dev_err(adapter->dev, "DNLD_CMD: FW in reset state, ignore cmd %#x\n", cmd_code); if (cmd_node->wait_q_enabled) mwifiex_complete_cmd(adapter, cmd_node); mwifiex_recycle_cmd_node(adapter, cmd_node); queue_work(adapter->workqueue, &adapter->main_work); return -1; } /* Set command sequence number */ adapter->seq_num++; host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO (adapter->seq_num, cmd_node->priv->bss_num, cmd_node->priv->bss_type)); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = cmd_node; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); /* Adjust skb length */ if (cmd_node->cmd_skb->len > cmd_size) /* * cmd_size is less than sizeof(struct host_cmd_ds_command). * Trim off the unused portion. */ skb_trim(cmd_node->cmd_skb, cmd_size); else if (cmd_node->cmd_skb->len < cmd_size) /* * cmd_size is larger than sizeof(struct host_cmd_ds_command) * because we have appended custom IE TLV. Increase skb length * accordingly. */ skb_put(cmd_node->cmd_skb, cmd_size - cmd_node->cmd_skb->len); dev_dbg(adapter->dev, "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", cmd_code, le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size, le16_to_cpu(host_cmd->seq_num)); if (adapter->iface_type == MWIFIEX_USB) { tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD); skb_push(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN); memcpy(cmd_node->cmd_skb->data, &tmp, MWIFIEX_TYPE_LEN); adapter->cmd_sent = true; ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_CMD_EVENT, cmd_node->cmd_skb, NULL); skb_pull(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN); if (ret == -EBUSY) cmd_node->cmd_skb = NULL; } else { skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD, cmd_node->cmd_skb, NULL); skb_pull(cmd_node->cmd_skb, INTF_HEADER_LEN); } if (ret == -1) { dev_err(adapter->dev, "DNLD_CMD: host to card failed\n"); if (adapter->iface_type == MWIFIEX_USB) adapter->cmd_sent = false; if (cmd_node->wait_q_enabled) adapter->cmd_wait_q.status = -1; mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); adapter->dbg.num_cmd_host_to_card_failure++; return -1; } /* Save the last command id and action to debug log */ adapter->dbg.last_cmd_index = (adapter->dbg.last_cmd_index + 1) % DBG_CMD_NUM; adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index] = cmd_code; adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] = le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)); /* Clear BSS_NO_BITS from HostCmd */ cmd_code &= HostCmd_CMD_ID_MASK; /* Setup the timer after transmit command */ mod_timer(&adapter->cmd_timer, jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S)); return 0; } /* * This function downloads a sleep confirm command to the firmware. * * The function performs sanity tests, sets the command sequence * number and size, converts the header fields to CPU format before * sending. * * No responses are needed for sleep confirm command. */ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter) { int ret; struct mwifiex_private *priv; struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *) adapter->sleep_cfm->data; struct sk_buff *sleep_cfm_tmp; __le32 tmp; priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); adapter->seq_num++; sleep_cfm_buf->seq_num = cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO (adapter->seq_num, priv->bss_num, priv->bss_type))); dev_dbg(adapter->dev, "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", le16_to_cpu(sleep_cfm_buf->command), le16_to_cpu(sleep_cfm_buf->action), le16_to_cpu(sleep_cfm_buf->size), le16_to_cpu(sleep_cfm_buf->seq_num)); if (adapter->iface_type == MWIFIEX_USB) { sleep_cfm_tmp = dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm) + MWIFIEX_TYPE_LEN); skb_put(sleep_cfm_tmp, sizeof(struct mwifiex_opt_sleep_confirm) + MWIFIEX_TYPE_LEN); tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD); memcpy(sleep_cfm_tmp->data, &tmp, MWIFIEX_TYPE_LEN); memcpy(sleep_cfm_tmp->data + MWIFIEX_TYPE_LEN, adapter->sleep_cfm->data, sizeof(struct mwifiex_opt_sleep_confirm)); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_CMD_EVENT, sleep_cfm_tmp, NULL); if (ret != -EBUSY) dev_kfree_skb_any(sleep_cfm_tmp); } else { skb_push(adapter->sleep_cfm, INTF_HEADER_LEN); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD, adapter->sleep_cfm, NULL); skb_pull(adapter->sleep_cfm, INTF_HEADER_LEN); } if (ret == -1) { dev_err(adapter->dev, "SLEEP_CFM: failed\n"); adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++; return -1; } if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl)) /* Response is not needed for sleep confirm command */ adapter->ps_state = PS_STATE_SLEEP; else adapter->ps_state = PS_STATE_SLEEP_CFM; if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) && (adapter->is_hs_configured && !adapter->sleep_period.period)) { adapter->pm_wakeup_card_req = true; mwifiex_hs_activated_event(mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY), true); } return ret; } /* * This function allocates the command buffers and links them to * the command free queue. * * The driver uses a pre allocated number of command buffers, which * are created at driver initializations and freed at driver cleanup. * Every command needs to obtain a command buffer from this pool before * it can be issued. The command free queue lists the command buffers * currently free to use, while the command pending queue lists the * command buffers already in use and awaiting handling. Command buffers * are returned to the free queue after use. */ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_array; u32 i; /* Allocate and initialize struct cmd_ctrl_node */ cmd_array = kcalloc(MWIFIEX_NUM_OF_CMD_BUFFER, sizeof(struct cmd_ctrl_node), GFP_KERNEL); if (!cmd_array) return -ENOMEM; adapter->cmd_pool = cmd_array; /* Allocate and initialize command buffers */ for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) { cmd_array[i].skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER); if (!cmd_array[i].skb) { dev_err(adapter->dev, "ALLOC_CMD_BUF: out of memory\n"); return -1; } } for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) mwifiex_insert_cmd_to_free_q(adapter, &cmd_array[i]); return 0; } /* * This function frees the command buffers. * * The function calls the completion callback for all the command * buffers that still have response buffers associated with them. */ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_array; u32 i; /* Need to check if cmd pool is allocated or not */ if (!adapter->cmd_pool) { dev_dbg(adapter->dev, "info: FREE_CMD_BUF: cmd_pool is null\n"); return 0; } cmd_array = adapter->cmd_pool; /* Release shared memory buffers */ for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) { if (cmd_array[i].skb) { dev_dbg(adapter->dev, "cmd: free cmd buffer %d\n", i); dev_kfree_skb_any(cmd_array[i].skb); } if (!cmd_array[i].resp_skb) continue; if (adapter->iface_type == MWIFIEX_USB) adapter->if_ops.cmdrsp_complete(adapter, cmd_array[i].resp_skb); else dev_kfree_skb_any(cmd_array[i].resp_skb); } /* Release struct cmd_ctrl_node */ if (adapter->cmd_pool) { dev_dbg(adapter->dev, "cmd: free cmd pool\n"); kfree(adapter->cmd_pool); adapter->cmd_pool = NULL; } return 0; } /* * This function handles events generated by firmware. * * Event body of events received from firmware are not used (though they are * saved), only the event ID is used. Some events are re-invoked by * the driver, with a new event body. * * After processing, the function calls the completion callback * for cleanup. */ int mwifiex_process_event(struct mwifiex_adapter *adapter) { int ret; struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); struct sk_buff *skb = adapter->event_skb; u32 eventcause = adapter->event_cause; struct mwifiex_rxinfo *rx_info; /* Save the last event to debug log */ adapter->dbg.last_event_index = (adapter->dbg.last_event_index + 1) % DBG_CMD_NUM; adapter->dbg.last_event[adapter->dbg.last_event_index] = (u16) eventcause; /* Get BSS number and corresponding priv */ priv = mwifiex_get_priv_by_id(adapter, EVENT_GET_BSS_NUM(eventcause), EVENT_GET_BSS_TYPE(eventcause)); if (!priv) priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); /* Clear BSS_NO_BITS from event */ eventcause &= EVENT_ID_MASK; adapter->event_cause = eventcause; if (skb) { rx_info = MWIFIEX_SKB_RXCB(skb); memset(rx_info, 0, sizeof(*rx_info)); rx_info->bss_num = priv->bss_num; rx_info->bss_type = priv->bss_type; } dev_dbg(adapter->dev, "EVENT: cause: %#x\n", eventcause); if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) ret = mwifiex_process_uap_event(priv); else ret = mwifiex_process_sta_event(priv); adapter->event_cause = 0; adapter->event_skb = NULL; adapter->if_ops.event_complete(adapter, skb); return ret; } /* * This function prepares a command and send it to the firmware. * * Preparation includes - * - Sanity tests to make sure the card is still present or the FW * is not reset * - Getting a new command node from the command free queue * - Initializing the command node for default parameters * - Fill up the non-default parameters and buffer pointers * - Add the command to pending queue */ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, u16 cmd_action, u32 cmd_oid, void *data_buf, bool sync) { int ret; struct mwifiex_adapter *adapter = priv->adapter; struct cmd_ctrl_node *cmd_node; struct host_cmd_ds_command *cmd_ptr; if (!adapter) { pr_err("PREP_CMD: adapter is NULL\n"); return -1; } if (adapter->is_suspended) { dev_err(adapter->dev, "PREP_CMD: device in suspended state\n"); return -1; } if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) { dev_err(adapter->dev, "PREP_CMD: host entering sleep state\n"); return -1; } if (adapter->surprise_removed) { dev_err(adapter->dev, "PREP_CMD: card is removed\n"); return -1; } if (adapter->is_cmd_timedout) { dev_err(adapter->dev, "PREP_CMD: FW is in bad state\n"); return -1; } if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) { if (cmd_no != HostCmd_CMD_FUNC_INIT) { dev_err(adapter->dev, "PREP_CMD: FW in reset state\n"); return -1; } } /* Get a new command node */ cmd_node = mwifiex_get_cmd_node(adapter); if (!cmd_node) { dev_err(adapter->dev, "PREP_CMD: no free cmd node\n"); return -1; } /* Initialize the command node */ mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf, sync); if (!cmd_node->cmd_skb) { dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n"); return -1; } memset(skb_put(cmd_node->cmd_skb, sizeof(struct host_cmd_ds_command)), 0, sizeof(struct host_cmd_ds_command)); cmd_ptr = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); cmd_ptr->command = cpu_to_le16(cmd_no); cmd_ptr->result = 0; /* Prepare command */ if (cmd_no) { switch (cmd_no) { case HostCmd_CMD_UAP_SYS_CONFIG: case HostCmd_CMD_UAP_BSS_START: case HostCmd_CMD_UAP_BSS_STOP: case HostCmd_CMD_UAP_STA_DEAUTH: ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action, cmd_oid, data_buf, cmd_ptr); break; default: ret = mwifiex_sta_prepare_cmd(priv, cmd_no, cmd_action, cmd_oid, data_buf, cmd_ptr); break; } } else { ret = mwifiex_cmd_host_cmd(priv, cmd_ptr, data_buf); cmd_node->cmd_flag |= CMD_F_HOSTCMD; } /* Return error, since the command preparation failed */ if (ret) { dev_err(adapter->dev, "PREP_CMD: cmd %#x preparation failed\n", cmd_no); mwifiex_insert_cmd_to_free_q(adapter, cmd_node); return -1; } /* Send command */ if (cmd_no == HostCmd_CMD_802_11_SCAN || cmd_no == HostCmd_CMD_802_11_SCAN_EXT) { mwifiex_queue_scan_cmd(priv, cmd_node); } else { mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); queue_work(adapter->workqueue, &adapter->main_work); if (cmd_node->wait_q_enabled) ret = mwifiex_wait_queue_complete(adapter, cmd_node); } return ret; } /* * This function returns a command to the command free queue. * * The function also calls the completion callback if required, before * cleaning the command node and re-inserting it into the free queue. */ void mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node) { unsigned long flags; if (!cmd_node) return; if (cmd_node->wait_q_enabled) mwifiex_complete_cmd(adapter, cmd_node); /* Clean the node */ mwifiex_clean_cmd_node(adapter, cmd_node); /* Insert node into cmd_free_q */ spin_lock_irqsave(&adapter->cmd_free_q_lock, flags); list_add_tail(&cmd_node->list, &adapter->cmd_free_q); spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags); } /* This function reuses a command node. */ void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node) { struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data; mwifiex_insert_cmd_to_free_q(adapter, cmd_node); atomic_dec(&adapter->cmd_pending); dev_dbg(adapter->dev, "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n", le16_to_cpu(host_cmd->command), atomic_read(&adapter->cmd_pending)); } /* * This function queues a command to the command pending queue. * * This in effect adds the command to the command list to be executed. * Exit PS command is handled specially, by placing it always to the * front of the command queue. */ void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node, u32 add_tail) { struct host_cmd_ds_command *host_cmd = NULL; u16 command; unsigned long flags; host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); if (!host_cmd) { dev_err(adapter->dev, "QUEUE_CMD: host_cmd is NULL\n"); return; } command = le16_to_cpu(host_cmd->command); /* Exit_PS command needs to be queued in the header always. */ if (command == HostCmd_CMD_802_11_PS_MODE_ENH) { struct host_cmd_ds_802_11_ps_mode_enh *pm = &host_cmd->params.psmode_enh; if ((le16_to_cpu(pm->action) == DIS_PS) || (le16_to_cpu(pm->action) == DIS_AUTO_PS)) { if (adapter->ps_state != PS_STATE_AWAKE) add_tail = false; } } spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags); if (add_tail) list_add_tail(&cmd_node->list, &adapter->cmd_pending_q); else list_add(&cmd_node->list, &adapter->cmd_pending_q); spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); atomic_inc(&adapter->cmd_pending); dev_dbg(adapter->dev, "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n", command, atomic_read(&adapter->cmd_pending)); } /* * This function executes the next command in command pending queue. * * This function will fail if a command is already in processing stage, * otherwise it will dequeue the first command from the command pending * queue and send to the firmware. * * If the device is currently in host sleep mode, any commands, except the * host sleep configuration command will de-activate the host sleep. For PS * mode, the function will put the firmware back to sleep if applicable. */ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv; struct cmd_ctrl_node *cmd_node; int ret = 0; struct host_cmd_ds_command *host_cmd; unsigned long cmd_flags; unsigned long cmd_pending_q_flags; /* Check if already in processing */ if (adapter->curr_cmd) { dev_err(adapter->dev, "EXEC_NEXT_CMD: cmd in processing\n"); return -1; } spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); /* Check if any command is pending */ spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_pending_q_flags); if (list_empty(&adapter->cmd_pending_q)) { spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, cmd_pending_q_flags); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); return 0; } cmd_node = list_first_entry(&adapter->cmd_pending_q, struct cmd_ctrl_node, list); spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, cmd_pending_q_flags); host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); priv = cmd_node->priv; if (adapter->ps_state != PS_STATE_AWAKE) { dev_err(adapter->dev, "%s: cannot send cmd in sleep state," " this should not happen\n", __func__); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); return ret; } spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_pending_q_flags); list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, cmd_pending_q_flags); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); ret = mwifiex_dnld_cmd_to_fw(priv, cmd_node); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); /* Any command sent to the firmware when host is in sleep * mode should de-configure host sleep. We should skip the * host sleep configuration command itself though */ if (priv && (host_cmd->command != cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) { if (adapter->hs_activated) { adapter->is_hs_configured = false; mwifiex_hs_activated_event(priv, false); } } return ret; } /* * This function handles the command response. * * After processing, the function cleans the command node and puts * it back to the command free queue. */ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) { struct host_cmd_ds_command *resp; struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); int ret = 0; uint16_t orig_cmdresp_no; uint16_t cmdresp_no; uint16_t cmdresp_result; unsigned long flags; /* Now we got response from FW, cancel the command timer */ del_timer_sync(&adapter->cmd_timer); if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) { resp = (struct host_cmd_ds_command *) adapter->upld_buf; dev_err(adapter->dev, "CMD_RESP: NULL curr_cmd, %#x\n", le16_to_cpu(resp->command)); return -1; } adapter->is_cmd_timedout = 0; resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data; if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) { dev_err(adapter->dev, "CMD_RESP: %#x been canceled\n", le16_to_cpu(resp->command)); mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); return -1; } if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) { /* Copy original response back to response buffer */ struct mwifiex_ds_misc_cmd *hostcmd; uint16_t size = le16_to_cpu(resp->size); dev_dbg(adapter->dev, "info: host cmd resp size = %d\n", size); size = min_t(u16, size, MWIFIEX_SIZE_OF_CMD_BUFFER); if (adapter->curr_cmd->data_buf) { hostcmd = adapter->curr_cmd->data_buf; hostcmd->len = size; memcpy(hostcmd->cmd, resp, size); } } orig_cmdresp_no = le16_to_cpu(resp->command); /* Get BSS number and corresponding priv */ priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(le16_to_cpu(resp->seq_num)), HostCmd_GET_BSS_TYPE(le16_to_cpu(resp->seq_num))); if (!priv) priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); /* Clear RET_BIT from HostCmd */ resp->command = cpu_to_le16(orig_cmdresp_no & HostCmd_CMD_ID_MASK); cmdresp_no = le16_to_cpu(resp->command); cmdresp_result = le16_to_cpu(resp->result); /* Save the last command response to debug log */ adapter->dbg.last_cmd_resp_index = (adapter->dbg.last_cmd_resp_index + 1) % DBG_CMD_NUM; adapter->dbg.last_cmd_resp_id[adapter->dbg.last_cmd_resp_index] = orig_cmdresp_no; dev_dbg(adapter->dev, "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", orig_cmdresp_no, cmdresp_result, le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num)); if (!(orig_cmdresp_no & HostCmd_RET_BIT)) { dev_err(adapter->dev, "CMD_RESP: invalid cmd resp\n"); if (adapter->curr_cmd->wait_q_enabled) adapter->cmd_wait_q.status = -1; mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); return -1; } if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) { adapter->curr_cmd->cmd_flag &= ~CMD_F_HOSTCMD; if ((cmdresp_result == HostCmd_RESULT_OK) && (cmdresp_no == HostCmd_CMD_802_11_HS_CFG_ENH)) ret = mwifiex_ret_802_11_hs_cfg(priv, resp); } else { /* handle response */ ret = mwifiex_process_sta_cmdresp(priv, cmdresp_no, resp); } /* Check init command response */ if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) { if (ret) { dev_err(adapter->dev, "%s: cmd %#x failed during " "initialization\n", __func__, cmdresp_no); mwifiex_init_fw_complete(adapter); return -1; } else if (adapter->last_init_cmd == cmdresp_no) adapter->hw_status = MWIFIEX_HW_STATUS_INIT_DONE; } if (adapter->curr_cmd) { if (adapter->curr_cmd->wait_q_enabled) adapter->cmd_wait_q.status = ret; mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); } return ret; } /* * This function handles the timeout of command sending. * * It will re-send the same command again. */ void mwifiex_cmd_timeout_func(unsigned long function_context) { struct mwifiex_adapter *adapter = (struct mwifiex_adapter *) function_context; struct cmd_ctrl_node *cmd_node; adapter->is_cmd_timedout = 1; if (!adapter->curr_cmd) { dev_dbg(adapter->dev, "cmd: empty curr_cmd\n"); return; } cmd_node = adapter->curr_cmd; if (cmd_node) { adapter->dbg.timeout_cmd_id = adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index]; adapter->dbg.timeout_cmd_act = adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index]; dev_err(adapter->dev, "%s: Timeout cmd id = %#x, act = %#x\n", __func__, adapter->dbg.timeout_cmd_id, adapter->dbg.timeout_cmd_act); dev_err(adapter->dev, "num_data_h2c_failure = %d\n", adapter->dbg.num_tx_host_to_card_failure); dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n", adapter->dbg.num_cmd_host_to_card_failure); dev_err(adapter->dev, "is_cmd_timedout = %d\n", adapter->is_cmd_timedout); dev_err(adapter->dev, "num_tx_timeout = %d\n", adapter->dbg.num_tx_timeout); dev_err(adapter->dev, "last_cmd_index = %d\n", adapter->dbg.last_cmd_index); dev_err(adapter->dev, "last_cmd_id: %*ph\n", (int)sizeof(adapter->dbg.last_cmd_id), adapter->dbg.last_cmd_id); dev_err(adapter->dev, "last_cmd_act: %*ph\n", (int)sizeof(adapter->dbg.last_cmd_act), adapter->dbg.last_cmd_act); dev_err(adapter->dev, "last_cmd_resp_index = %d\n", adapter->dbg.last_cmd_resp_index); dev_err(adapter->dev, "last_cmd_resp_id: %*ph\n", (int)sizeof(adapter->dbg.last_cmd_resp_id), adapter->dbg.last_cmd_resp_id); dev_err(adapter->dev, "last_event_index = %d\n", adapter->dbg.last_event_index); dev_err(adapter->dev, "last_event: %*ph\n", (int)sizeof(adapter->dbg.last_event), adapter->dbg.last_event); dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n", adapter->data_sent, adapter->cmd_sent); dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n", adapter->ps_mode, adapter->ps_state); if (cmd_node->wait_q_enabled) { adapter->cmd_wait_q.status = -ETIMEDOUT; wake_up_interruptible(&adapter->cmd_wait_q.wait); mwifiex_cancel_pending_ioctl(adapter); } } if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) mwifiex_init_fw_complete(adapter); if (adapter->if_ops.fw_dump) adapter->if_ops.fw_dump(adapter); if (adapter->if_ops.card_reset) adapter->if_ops.card_reset(adapter); } /* * This function cancels all the pending commands. * * The current command, all commands in command pending queue and all scan * commands in scan pending queue are cancelled. All the completion callbacks * are called with failure status to ensure cleanup. */ void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node = NULL, *tmp_node; unsigned long flags, cmd_flags; struct mwifiex_private *priv; int i; spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); /* Cancel current cmd */ if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) { adapter->curr_cmd->wait_q_enabled = false; adapter->cmd_wait_q.status = -1; mwifiex_complete_cmd(adapter, adapter->curr_cmd); } /* Cancel all pending command */ spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags); list_for_each_entry_safe(cmd_node, tmp_node, &adapter->cmd_pending_q, list) { list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); if (cmd_node->wait_q_enabled) { adapter->cmd_wait_q.status = -1; mwifiex_complete_cmd(adapter, cmd_node); cmd_node->wait_q_enabled = false; } mwifiex_recycle_cmd_node(adapter, cmd_node); spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags); } spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); /* Cancel all pending scan command */ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); list_for_each_entry_safe(cmd_node, tmp_node, &adapter->scan_pending_q, list) { list_del(&cmd_node->list); cmd_node->wait_q_enabled = false; mwifiex_insert_cmd_to_free_q(adapter, cmd_node); } spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); if (adapter->scan_processing) { spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); adapter->scan_processing = false; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; if (!priv) continue; if (priv->scan_request) { dev_dbg(adapter->dev, "info: aborting scan\n"); cfg80211_scan_done(priv->scan_request, 1); priv->scan_request = NULL; } } } } /* * This function cancels all pending commands that matches with * the given IOCTL request. * * Both the current command buffer and the pending command queue are * searched for matching IOCTL request. The completion callback of * the matched command is called with failure status to ensure cleanup. * In case of scan commands, all pending commands in scan pending queue * are cancelled. */ void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL; unsigned long cmd_flags; unsigned long scan_pending_q_flags; struct mwifiex_private *priv; int i; if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) { spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); cmd_node = adapter->curr_cmd; cmd_node->wait_q_enabled = false; cmd_node->cmd_flag |= CMD_F_CANCELED; mwifiex_recycle_cmd_node(adapter, cmd_node); mwifiex_complete_cmd(adapter, adapter->curr_cmd); adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); } /* Cancel all pending scan command */ spin_lock_irqsave(&adapter->scan_pending_q_lock, scan_pending_q_flags); list_for_each_entry_safe(cmd_node, tmp_node, &adapter->scan_pending_q, list) { list_del(&cmd_node->list); cmd_node->wait_q_enabled = false; mwifiex_insert_cmd_to_free_q(adapter, cmd_node); } spin_unlock_irqrestore(&adapter->scan_pending_q_lock, scan_pending_q_flags); if (adapter->scan_processing) { spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); adapter->scan_processing = false; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; if (!priv) continue; if (priv->scan_request) { dev_dbg(adapter->dev, "info: aborting scan\n"); cfg80211_scan_done(priv->scan_request, 1); priv->scan_request = NULL; } } } adapter->cmd_wait_q.status = -1; } /* * This function sends the sleep confirm command to firmware, if * possible. * * The sleep confirm command cannot be issued if command response, * data response or event response is awaiting handling, or if we * are in the middle of sending a command, or expecting a command * response. */ void mwifiex_check_ps_cond(struct mwifiex_adapter *adapter) { if (!adapter->cmd_sent && !adapter->curr_cmd && !IS_CARD_RX_RCVD(adapter)) mwifiex_dnld_sleep_confirm_cmd(adapter); else dev_dbg(adapter->dev, "cmd: Delay Sleep Confirm (%s%s%s)\n", (adapter->cmd_sent) ? "D" : "", (adapter->curr_cmd) ? "C" : "", (IS_CARD_RX_RCVD(adapter)) ? "R" : ""); } /* * This function sends a Host Sleep activated event to applications. * * This event is generated by the driver, with a blank event body. */ void mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated) { if (activated) { if (priv->adapter->is_hs_configured) { priv->adapter->hs_activated = true; mwifiex_update_rxreor_flags(priv->adapter, RXREOR_FORCE_NO_DROP); dev_dbg(priv->adapter->dev, "event: hs_activated\n"); priv->adapter->hs_activate_wait_q_woken = true; wake_up_interruptible( &priv->adapter->hs_activate_wait_q); } else { dev_dbg(priv->adapter->dev, "event: HS not configured\n"); } } else { dev_dbg(priv->adapter->dev, "event: hs_deactivated\n"); priv->adapter->hs_activated = false; } } /* * This function handles the command response of a Host Sleep configuration * command. * * Handling includes changing the header fields into CPU format * and setting the current host sleep activation status in driver. * * In case host sleep status change, the function generates an event to * notify the applications. */ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { struct mwifiex_adapter *adapter = priv->adapter; struct host_cmd_ds_802_11_hs_cfg_enh *phs_cfg = &resp->params.opt_hs_cfg; uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) && adapter->iface_type != MWIFIEX_USB) { mwifiex_hs_activated_event(priv, true); return 0; } else { dev_dbg(adapter->dev, "cmd: CMD_RESP: HS_CFG cmd reply" " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n", resp->result, conditions, phs_cfg->params.hs_config.gpio, phs_cfg->params.hs_config.gap); } if (conditions != HS_CFG_CANCEL) { adapter->is_hs_configured = true; if (adapter->iface_type == MWIFIEX_USB) mwifiex_hs_activated_event(priv, true); } else { adapter->is_hs_configured = false; if (adapter->hs_activated) mwifiex_hs_activated_event(priv, false); } return 0; } /* * This function wakes up the adapter and generates a Host Sleep * cancel event on receiving the power up interrupt. */ void mwifiex_process_hs_config(struct mwifiex_adapter *adapter) { dev_dbg(adapter->dev, "info: %s: auto cancelling host sleep" " since there is interrupt from the firmware\n", __func__); adapter->if_ops.wakeup(adapter); adapter->hs_activated = false; adapter->is_hs_configured = false; adapter->is_suspended = false; mwifiex_hs_activated_event(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), false); } EXPORT_SYMBOL_GPL(mwifiex_process_hs_config); /* * This function handles the command response of a sleep confirm command. * * The function sets the card state to SLEEP if the response indicates success. */ void mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter, u8 *pbuf, u32 upld_len) { struct host_cmd_ds_command *cmd = (struct host_cmd_ds_command *) pbuf; struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); uint16_t result = le16_to_cpu(cmd->result); uint16_t command = le16_to_cpu(cmd->command); uint16_t seq_num = le16_to_cpu(cmd->seq_num); if (!upld_len) { dev_err(adapter->dev, "%s: cmd size is 0\n", __func__); return; } dev_dbg(adapter->dev, "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", command, result, le16_to_cpu(cmd->size), seq_num); /* Get BSS number and corresponding priv */ priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num), HostCmd_GET_BSS_TYPE(seq_num)); if (!priv) priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); /* Update sequence number */ seq_num = HostCmd_GET_SEQ_NO(seq_num); /* Clear RET_BIT from HostCmd */ command &= HostCmd_CMD_ID_MASK; if (command != HostCmd_CMD_802_11_PS_MODE_ENH) { dev_err(adapter->dev, "%s: rcvd unexpected resp for cmd %#x, result = %x\n", __func__, command, result); return; } if (result) { dev_err(adapter->dev, "%s: sleep confirm cmd failed\n", __func__); adapter->pm_wakeup_card_req = false; adapter->ps_state = PS_STATE_AWAKE; return; } adapter->pm_wakeup_card_req = true; if (adapter->is_hs_configured) mwifiex_hs_activated_event(mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY), true); adapter->ps_state = PS_STATE_SLEEP; cmd->command = cpu_to_le16(command); cmd->seq_num = cpu_to_le16(seq_num); } EXPORT_SYMBOL_GPL(mwifiex_process_sleep_confirm_resp); /* * This function prepares an enhanced power mode command. * * This function can be used to disable power save or to configure * power save with auto PS or STA PS or auto deep sleep. * * Preparation includes - * - Setting command ID, action and proper size * - Setting Power Save bitmap, PS parameters TLV, PS mode TLV, * auto deep sleep TLV (as required) * - Ensuring correct endian-ness */ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, u16 cmd_action, uint16_t ps_bitmap, struct mwifiex_ds_auto_ds *auto_ds) { struct host_cmd_ds_802_11_ps_mode_enh *psmode_enh = &cmd->params.psmode_enh; u8 *tlv; u16 cmd_size = 0; cmd->command = cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH); if (cmd_action == DIS_AUTO_PS) { psmode_enh->action = cpu_to_le16(DIS_AUTO_PS); psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap); cmd->size = cpu_to_le16(S_DS_GEN + sizeof(psmode_enh->action) + sizeof(psmode_enh->params.ps_bitmap)); } else if (cmd_action == GET_PS) { psmode_enh->action = cpu_to_le16(GET_PS); psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap); cmd->size = cpu_to_le16(S_DS_GEN + sizeof(psmode_enh->action) + sizeof(psmode_enh->params.ps_bitmap)); } else if (cmd_action == EN_AUTO_PS) { psmode_enh->action = cpu_to_le16(EN_AUTO_PS); psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap); cmd_size = S_DS_GEN + sizeof(psmode_enh->action) + sizeof(psmode_enh->params.ps_bitmap); tlv = (u8 *) cmd + cmd_size; if (ps_bitmap & BITMAP_STA_PS) { struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_ie_types_ps_param *ps_tlv = (struct mwifiex_ie_types_ps_param *) tlv; struct mwifiex_ps_param *ps_mode = &ps_tlv->param; ps_tlv->header.type = cpu_to_le16(TLV_TYPE_PS_PARAM); ps_tlv->header.len = cpu_to_le16(sizeof(*ps_tlv) - sizeof(struct mwifiex_ie_types_header)); cmd_size += sizeof(*ps_tlv); tlv += sizeof(*ps_tlv); dev_dbg(adapter->dev, "cmd: PS Command: Enter PS\n"); ps_mode->null_pkt_interval = cpu_to_le16(adapter->null_pkt_interval); ps_mode->multiple_dtims = cpu_to_le16(adapter->multiple_dtim); ps_mode->bcn_miss_timeout = cpu_to_le16(adapter->bcn_miss_time_out); ps_mode->local_listen_interval = cpu_to_le16(adapter->local_listen_interval); ps_mode->adhoc_wake_period = cpu_to_le16(adapter->adhoc_awake_period); ps_mode->delay_to_ps = cpu_to_le16(adapter->delay_to_ps); ps_mode->mode = cpu_to_le16(adapter->enhanced_ps_mode); } if (ps_bitmap & BITMAP_AUTO_DS) { struct mwifiex_ie_types_auto_ds_param *auto_ds_tlv = (struct mwifiex_ie_types_auto_ds_param *) tlv; u16 idletime = 0; auto_ds_tlv->header.type = cpu_to_le16(TLV_TYPE_AUTO_DS_PARAM); auto_ds_tlv->header.len = cpu_to_le16(sizeof(*auto_ds_tlv) - sizeof(struct mwifiex_ie_types_header)); cmd_size += sizeof(*auto_ds_tlv); tlv += sizeof(*auto_ds_tlv); if (auto_ds) idletime = auto_ds->idle_time; dev_dbg(priv->adapter->dev, "cmd: PS Command: Enter Auto Deep Sleep\n"); auto_ds_tlv->deep_sleep_timeout = cpu_to_le16(idletime); } cmd->size = cpu_to_le16(cmd_size); } return 0; } /* * This function handles the command response of an enhanced power mode * command. * * Handling includes changing the header fields into CPU format * and setting the current enhanced power mode in driver. */ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv, struct host_cmd_ds_command *resp, struct mwifiex_ds_pm_cfg *pm_cfg) { struct mwifiex_adapter *adapter = priv->adapter; struct host_cmd_ds_802_11_ps_mode_enh *ps_mode = &resp->params.psmode_enh; uint16_t action = le16_to_cpu(ps_mode->action); uint16_t ps_bitmap = le16_to_cpu(ps_mode->params.ps_bitmap); uint16_t auto_ps_bitmap = le16_to_cpu(ps_mode->params.ps_bitmap); dev_dbg(adapter->dev, "info: %s: PS_MODE cmd reply result=%#x action=%#X\n", __func__, resp->result, action); if (action == EN_AUTO_PS) { if (auto_ps_bitmap & BITMAP_AUTO_DS) { dev_dbg(adapter->dev, "cmd: Enabled auto deep sleep\n"); priv->adapter->is_deep_sleep = true; } if (auto_ps_bitmap & BITMAP_STA_PS) { dev_dbg(adapter->dev, "cmd: Enabled STA power save\n"); if (adapter->sleep_period.period) dev_dbg(adapter->dev, "cmd: set to uapsd/pps mode\n"); } } else if (action == DIS_AUTO_PS) { if (ps_bitmap & BITMAP_AUTO_DS) { priv->adapter->is_deep_sleep = false; dev_dbg(adapter->dev, "cmd: Disabled auto deep sleep\n"); } if (ps_bitmap & BITMAP_STA_PS) { dev_dbg(adapter->dev, "cmd: Disabled STA power save\n"); if (adapter->sleep_period.period) { adapter->delay_null_pkt = false; adapter->tx_lock_flag = false; adapter->pps_uapsd_mode = false; } } } else if (action == GET_PS) { if (ps_bitmap & BITMAP_STA_PS) adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP; else adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM; dev_dbg(adapter->dev, "cmd: ps_bitmap=%#x\n", ps_bitmap); if (pm_cfg) { /* This section is for get power save mode */ if (ps_bitmap & BITMAP_STA_PS) pm_cfg->param.ps_mode = 1; else pm_cfg->param.ps_mode = 0; } } return 0; } /* * This function prepares command to get hardware specifications. * * Preparation includes - * - Setting command ID, action and proper size * - Setting permanent address parameter * - Ensuring correct endian-ness */ int mwifiex_cmd_get_hw_spec(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd) { struct host_cmd_ds_get_hw_spec *hw_spec = &cmd->params.hw_spec; cmd->command = cpu_to_le16(HostCmd_CMD_GET_HW_SPEC); cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_get_hw_spec) + S_DS_GEN); memcpy(hw_spec->permanent_addr, priv->curr_addr, ETH_ALEN); return 0; } /* * This function handles the command response of get hardware * specifications. * * Handling includes changing the header fields into CPU format * and saving/updating the following parameters in driver - * - Firmware capability information * - Firmware band settings * - Ad-hoc start band and channel * - Ad-hoc 11n activation status * - Firmware release number * - Number of antennas * - Hardware address * - Hardware interface version * - Firmware version * - Region code * - 11n capabilities * - MCS support fields * - MP end port */ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec; struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_ie_types_header *tlv; struct hw_spec_api_rev *api_rev; u16 resp_size, api_id; int i, left_len, parsed_len = 0; adapter->fw_cap_info = le32_to_cpu(hw_spec->fw_cap_info); if (IS_SUPPORT_MULTI_BANDS(adapter)) adapter->fw_bands = (u8) GET_FW_DEFAULT_BANDS(adapter); else adapter->fw_bands = BAND_B; adapter->config_bands = adapter->fw_bands; if (adapter->fw_bands & BAND_A) { if (adapter->fw_bands & BAND_GN) { adapter->config_bands |= BAND_AN; adapter->fw_bands |= BAND_AN; } if (adapter->fw_bands & BAND_AN) { adapter->adhoc_start_band = BAND_A | BAND_AN; adapter->adhoc_11n_enabled = true; } else { adapter->adhoc_start_band = BAND_A; } priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL_A; } else if (adapter->fw_bands & BAND_GN) { adapter->adhoc_start_band = BAND_G | BAND_B | BAND_GN; priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL; adapter->adhoc_11n_enabled = true; } else if (adapter->fw_bands & BAND_G) { adapter->adhoc_start_band = BAND_G | BAND_B; priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL; } else if (adapter->fw_bands & BAND_B) { adapter->adhoc_start_band = BAND_B; priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL; } adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number); adapter->fw_api_ver = (adapter->fw_release_number >> 16) & 0xff; adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna); if (le32_to_cpu(hw_spec->dot_11ac_dev_cap)) { adapter->is_hw_11ac_capable = true; /* Copy 11AC cap */ adapter->hw_dot_11ac_dev_cap = le32_to_cpu(hw_spec->dot_11ac_dev_cap); adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK; adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK; /* Copy 11AC mcs */ adapter->hw_dot_11ac_mcs_support = le32_to_cpu(hw_spec->dot_11ac_mcs_support); adapter->usr_dot_11ac_mcs_support = adapter->hw_dot_11ac_mcs_support; } else { adapter->is_hw_11ac_capable = false; } resp_size = le16_to_cpu(resp->size) - S_DS_GEN; if (resp_size > sizeof(struct host_cmd_ds_get_hw_spec)) { /* we have variable HW SPEC information */ left_len = resp_size - sizeof(struct host_cmd_ds_get_hw_spec); while (left_len > sizeof(struct mwifiex_ie_types_header)) { tlv = (void *)&hw_spec->tlvs + parsed_len; switch (le16_to_cpu(tlv->type)) { case TLV_TYPE_API_REV: api_rev = (struct hw_spec_api_rev *)tlv; api_id = le16_to_cpu(api_rev->api_id); switch (api_id) { case KEY_API_VER_ID: adapter->key_api_major_ver = api_rev->major_ver; adapter->key_api_minor_ver = api_rev->minor_ver; dev_dbg(adapter->dev, "key_api v%d.%d\n", adapter->key_api_major_ver, adapter->key_api_minor_ver); break; case FW_API_VER_ID: adapter->fw_api_ver = api_rev->major_ver; dev_dbg(adapter->dev, "Firmware api version %d\n", adapter->fw_api_ver); break; default: dev_warn(adapter->dev, "Unknown api_id: %d\n", api_id); break; } break; default: dev_warn(adapter->dev, "Unknown GET_HW_SPEC TLV type: %#x\n", le16_to_cpu(tlv->type)); break; } parsed_len += le16_to_cpu(tlv->len) + sizeof(struct mwifiex_ie_types_header); left_len -= le16_to_cpu(tlv->len) + sizeof(struct mwifiex_ie_types_header); } } dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n", adapter->fw_release_number); dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n", hw_spec->permanent_addr); dev_dbg(adapter->dev, "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n", le16_to_cpu(hw_spec->hw_if_version), le16_to_cpu(hw_spec->version)); ether_addr_copy(priv->adapter->perm_addr, hw_spec->permanent_addr); adapter->region_code = le16_to_cpu(hw_spec->region_code); for (i = 0; i < MWIFIEX_MAX_REGION_CODE; i++) /* Use the region code to search for the index */ if (adapter->region_code == region_code_index[i]) break; /* If it's unidentified region code, use the default (USA) */ if (i >= MWIFIEX_MAX_REGION_CODE) { adapter->region_code = 0x10; dev_dbg(adapter->dev, "cmd: unknown region code, use default (USA)\n"); } adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap); adapter->hw_dev_mcs_support = hw_spec->dev_mcs_support; adapter->user_dev_mcs_support = adapter->hw_dev_mcs_support; if (adapter->if_ops.update_mp_end_port) adapter->if_ops.update_mp_end_port(adapter, le16_to_cpu(hw_spec->mp_end_port)); if (adapter->fw_api_ver == MWIFIEX_FW_V15) adapter->scan_chan_gap_enabled = true; return 0; }
gpl-2.0
lukier/linux-samsung
drivers/net/wireless/adm8211.c
368
55185
/* * Linux device driver for ADMtek ADM8211 (IEEE 802.11b MAC/BBP) * * Copyright (c) 2003, Jouni Malinen <j@w1.fi> * Copyright (c) 2004-2007, Michael Wu <flamingice@sourmilk.net> * Some parts copyright (c) 2003 by David Young <dyoung@pobox.com> * and used with permission. * * Much thanks to Infineon-ADMtek for their support of this driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. */ #include <linux/interrupt.h> #include <linux/if.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/etherdevice.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/crc32.h> #include <linux/eeprom_93cx6.h> #include <linux/module.h> #include <net/mac80211.h> #include "adm8211.h" MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); MODULE_AUTHOR("Jouni Malinen <j@w1.fi>"); MODULE_DESCRIPTION("Driver for IEEE 802.11b wireless cards based on ADMtek ADM8211"); MODULE_SUPPORTED_DEVICE("ADM8211"); MODULE_LICENSE("GPL"); static unsigned int tx_ring_size __read_mostly = 16; static unsigned int rx_ring_size __read_mostly = 16; module_param(tx_ring_size, uint, 0); module_param(rx_ring_size, uint, 0); static const struct pci_device_id adm8211_pci_id_table[] = { /* ADMtek ADM8211 */ { PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */ { PCI_DEVICE(0x1200, 0x8201) }, /* ? */ { PCI_DEVICE(0x1317, 0x8201) }, /* ADM8211A */ { PCI_DEVICE(0x1317, 0x8211) }, /* ADM8211B/C */ { 0 } }; static struct ieee80211_rate adm8211_rates[] = { { .bitrate = 10, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 220, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, /* XX ?? */ }; static const struct ieee80211_channel adm8211_channels[] = { { .center_freq = 2412}, { .center_freq = 2417}, { .center_freq = 2422}, { .center_freq = 2427}, { .center_freq = 2432}, { .center_freq = 2437}, { .center_freq = 2442}, { .center_freq = 2447}, { .center_freq = 2452}, { .center_freq = 2457}, { .center_freq = 2462}, { .center_freq = 2467}, { .center_freq = 2472}, { .center_freq = 2484}, }; static void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom) { struct adm8211_priv *priv = eeprom->data; u32 reg = ADM8211_CSR_READ(SPR); eeprom->reg_data_in = reg & ADM8211_SPR_SDI; eeprom->reg_data_out = reg & ADM8211_SPR_SDO; eeprom->reg_data_clock = reg & ADM8211_SPR_SCLK; eeprom->reg_chip_select = reg & ADM8211_SPR_SCS; } static void adm8211_eeprom_register_write(struct eeprom_93cx6 *eeprom) { struct adm8211_priv *priv = eeprom->data; u32 reg = 0x4000 | ADM8211_SPR_SRS; if (eeprom->reg_data_in) reg |= ADM8211_SPR_SDI; if (eeprom->reg_data_out) reg |= ADM8211_SPR_SDO; if (eeprom->reg_data_clock) reg |= ADM8211_SPR_SCLK; if (eeprom->reg_chip_select) reg |= ADM8211_SPR_SCS; ADM8211_CSR_WRITE(SPR, reg); ADM8211_CSR_READ(SPR); /* eeprom_delay */ } static int adm8211_read_eeprom(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; unsigned int words, i; struct ieee80211_chan_range chan_range; u16 cr49; struct eeprom_93cx6 eeprom = { .data = priv, .register_read = adm8211_eeprom_register_read, .register_write = adm8211_eeprom_register_write }; if (ADM8211_CSR_READ(CSR_TEST0) & ADM8211_CSR_TEST0_EPTYP) { /* 256 * 16-bit = 512 bytes */ eeprom.width = PCI_EEPROM_WIDTH_93C66; words = 256; } else { /* 64 * 16-bit = 128 bytes */ eeprom.width = PCI_EEPROM_WIDTH_93C46; words = 64; } priv->eeprom_len = words * 2; priv->eeprom = kmalloc(priv->eeprom_len, GFP_KERNEL); if (!priv->eeprom) return -ENOMEM; eeprom_93cx6_multiread(&eeprom, 0, (__le16 *)priv->eeprom, words); cr49 = le16_to_cpu(priv->eeprom->cr49); priv->rf_type = (cr49 >> 3) & 0x7; switch (priv->rf_type) { case ADM8211_TYPE_INTERSIL: case ADM8211_TYPE_RFMD: case ADM8211_TYPE_MARVEL: case ADM8211_TYPE_AIROHA: case ADM8211_TYPE_ADMTEK: break; default: if (priv->pdev->revision < ADM8211_REV_CA) priv->rf_type = ADM8211_TYPE_RFMD; else priv->rf_type = ADM8211_TYPE_AIROHA; printk(KERN_WARNING "%s (adm8211): Unknown RFtype %d\n", pci_name(priv->pdev), (cr49 >> 3) & 0x7); } priv->bbp_type = cr49 & 0x7; switch (priv->bbp_type) { case ADM8211_TYPE_INTERSIL: case ADM8211_TYPE_RFMD: case ADM8211_TYPE_MARVEL: case ADM8211_TYPE_AIROHA: case ADM8211_TYPE_ADMTEK: break; default: if (priv->pdev->revision < ADM8211_REV_CA) priv->bbp_type = ADM8211_TYPE_RFMD; else priv->bbp_type = ADM8211_TYPE_ADMTEK; printk(KERN_WARNING "%s (adm8211): Unknown BBPtype: %d\n", pci_name(priv->pdev), cr49 >> 3); } if (priv->eeprom->country_code >= ARRAY_SIZE(cranges)) { printk(KERN_WARNING "%s (adm8211): Invalid country code (%d)\n", pci_name(priv->pdev), priv->eeprom->country_code); chan_range = cranges[2]; } else chan_range = cranges[priv->eeprom->country_code]; printk(KERN_DEBUG "%s (adm8211): Channel range: %d - %d\n", pci_name(priv->pdev), (int)chan_range.min, (int)chan_range.max); BUILD_BUG_ON(sizeof(priv->channels) != sizeof(adm8211_channels)); memcpy(priv->channels, adm8211_channels, sizeof(priv->channels)); priv->band.channels = priv->channels; priv->band.n_channels = ARRAY_SIZE(adm8211_channels); priv->band.bitrates = adm8211_rates; priv->band.n_bitrates = ARRAY_SIZE(adm8211_rates); for (i = 1; i <= ARRAY_SIZE(adm8211_channels); i++) if (i < chan_range.min || i > chan_range.max) priv->channels[i - 1].flags |= IEEE80211_CHAN_DISABLED; switch (priv->eeprom->specific_bbptype) { case ADM8211_BBP_RFMD3000: case ADM8211_BBP_RFMD3002: case ADM8211_BBP_ADM8011: priv->specific_bbptype = priv->eeprom->specific_bbptype; break; default: if (priv->pdev->revision < ADM8211_REV_CA) priv->specific_bbptype = ADM8211_BBP_RFMD3000; else priv->specific_bbptype = ADM8211_BBP_ADM8011; printk(KERN_WARNING "%s (adm8211): Unknown specific BBP: %d\n", pci_name(priv->pdev), priv->eeprom->specific_bbptype); } switch (priv->eeprom->specific_rftype) { case ADM8211_RFMD2948: case ADM8211_RFMD2958: case ADM8211_RFMD2958_RF3000_CONTROL_POWER: case ADM8211_MAX2820: case ADM8211_AL2210L: priv->transceiver_type = priv->eeprom->specific_rftype; break; default: if (priv->pdev->revision == ADM8211_REV_BA) priv->transceiver_type = ADM8211_RFMD2958_RF3000_CONTROL_POWER; else if (priv->pdev->revision == ADM8211_REV_CA) priv->transceiver_type = ADM8211_AL2210L; else if (priv->pdev->revision == ADM8211_REV_AB) priv->transceiver_type = ADM8211_RFMD2948; printk(KERN_WARNING "%s (adm8211): Unknown transceiver: %d\n", pci_name(priv->pdev), priv->eeprom->specific_rftype); break; } printk(KERN_DEBUG "%s (adm8211): RFtype=%d BBPtype=%d Specific BBP=%d " "Transceiver=%d\n", pci_name(priv->pdev), priv->rf_type, priv->bbp_type, priv->specific_bbptype, priv->transceiver_type); return 0; } static inline void adm8211_write_sram(struct ieee80211_hw *dev, u32 addr, u32 data) { struct adm8211_priv *priv = dev->priv; ADM8211_CSR_WRITE(WEPCTL, addr | ADM8211_WEPCTL_TABLE_WR | (priv->pdev->revision < ADM8211_REV_BA ? 0 : ADM8211_WEPCTL_SEL_WEPTABLE )); ADM8211_CSR_READ(WEPCTL); msleep(1); ADM8211_CSR_WRITE(WESK, data); ADM8211_CSR_READ(WESK); msleep(1); } static void adm8211_write_sram_bytes(struct ieee80211_hw *dev, unsigned int addr, u8 *buf, unsigned int len) { struct adm8211_priv *priv = dev->priv; u32 reg = ADM8211_CSR_READ(WEPCTL); unsigned int i; if (priv->pdev->revision < ADM8211_REV_BA) { for (i = 0; i < len; i += 2) { u16 val = buf[i] | (buf[i + 1] << 8); adm8211_write_sram(dev, addr + i / 2, val); } } else { for (i = 0; i < len; i += 4) { u32 val = (buf[i + 0] << 0 ) | (buf[i + 1] << 8 ) | (buf[i + 2] << 16) | (buf[i + 3] << 24); adm8211_write_sram(dev, addr + i / 4, val); } } ADM8211_CSR_WRITE(WEPCTL, reg); } static void adm8211_clear_sram(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; u32 reg = ADM8211_CSR_READ(WEPCTL); unsigned int addr; for (addr = 0; addr < ADM8211_SRAM_SIZE; addr++) adm8211_write_sram(dev, addr, 0); ADM8211_CSR_WRITE(WEPCTL, reg); } static int adm8211_get_stats(struct ieee80211_hw *dev, struct ieee80211_low_level_stats *stats) { struct adm8211_priv *priv = dev->priv; memcpy(stats, &priv->stats, sizeof(*stats)); return 0; } static void adm8211_interrupt_tci(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; unsigned int dirty_tx; spin_lock(&priv->lock); for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) { unsigned int entry = dirty_tx % priv->tx_ring_size; u32 status = le32_to_cpu(priv->tx_ring[entry].status); struct ieee80211_tx_info *txi; struct adm8211_tx_ring_info *info; struct sk_buff *skb; if (status & TDES0_CONTROL_OWN || !(status & TDES0_CONTROL_DONE)) break; info = &priv->tx_buffers[entry]; skb = info->skb; txi = IEEE80211_SKB_CB(skb); /* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */ pci_unmap_single(priv->pdev, info->mapping, info->skb->len, PCI_DMA_TODEVICE); ieee80211_tx_info_clear_status(txi); skb_pull(skb, sizeof(struct adm8211_tx_hdr)); memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen); if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && !(status & TDES0_STATUS_ES)) txi->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(dev, skb); info->skb = NULL; } if (priv->cur_tx - dirty_tx < priv->tx_ring_size - 2) ieee80211_wake_queue(dev, 0); priv->dirty_tx = dirty_tx; spin_unlock(&priv->lock); } static void adm8211_interrupt_rci(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; unsigned int entry = priv->cur_rx % priv->rx_ring_size; u32 status; unsigned int pktlen; struct sk_buff *skb, *newskb; unsigned int limit = priv->rx_ring_size; u8 rssi, rate; while (!(priv->rx_ring[entry].status & cpu_to_le32(RDES0_STATUS_OWN))) { if (!limit--) break; status = le32_to_cpu(priv->rx_ring[entry].status); rate = (status & RDES0_STATUS_RXDR) >> 12; rssi = le32_to_cpu(priv->rx_ring[entry].length) & RDES1_STATUS_RSSI; pktlen = status & RDES0_STATUS_FL; if (pktlen > RX_PKT_SIZE) { if (net_ratelimit()) wiphy_debug(dev->wiphy, "frame too long (%d)\n", pktlen); pktlen = RX_PKT_SIZE; } if (!priv->soft_rx_crc && status & RDES0_STATUS_ES) { skb = NULL; /* old buffer will be reused */ /* TODO: update RX error stats */ /* TODO: check RDES0_STATUS_CRC*E */ } else if (pktlen < RX_COPY_BREAK) { skb = dev_alloc_skb(pktlen); if (skb) { pci_dma_sync_single_for_cpu( priv->pdev, priv->rx_buffers[entry].mapping, pktlen, PCI_DMA_FROMDEVICE); memcpy(skb_put(skb, pktlen), skb_tail_pointer(priv->rx_buffers[entry].skb), pktlen); pci_dma_sync_single_for_device( priv->pdev, priv->rx_buffers[entry].mapping, RX_PKT_SIZE, PCI_DMA_FROMDEVICE); } } else { newskb = dev_alloc_skb(RX_PKT_SIZE); if (newskb) { skb = priv->rx_buffers[entry].skb; skb_put(skb, pktlen); pci_unmap_single( priv->pdev, priv->rx_buffers[entry].mapping, RX_PKT_SIZE, PCI_DMA_FROMDEVICE); priv->rx_buffers[entry].skb = newskb; priv->rx_buffers[entry].mapping = pci_map_single(priv->pdev, skb_tail_pointer(newskb), RX_PKT_SIZE, PCI_DMA_FROMDEVICE); } else { skb = NULL; /* TODO: update rx dropped stats */ } priv->rx_ring[entry].buffer1 = cpu_to_le32(priv->rx_buffers[entry].mapping); } priv->rx_ring[entry].status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL); priv->rx_ring[entry].length = cpu_to_le32(RX_PKT_SIZE | (entry == priv->rx_ring_size - 1 ? RDES1_CONTROL_RER : 0)); if (skb) { struct ieee80211_rx_status rx_status = {0}; if (priv->pdev->revision < ADM8211_REV_CA) rx_status.signal = rssi; else rx_status.signal = 100 - rssi; rx_status.rate_idx = rate; rx_status.freq = adm8211_channels[priv->channel - 1].center_freq; rx_status.band = IEEE80211_BAND_2GHZ; memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); ieee80211_rx_irqsafe(dev, skb); } entry = (++priv->cur_rx) % priv->rx_ring_size; } /* TODO: check LPC and update stats? */ } static irqreturn_t adm8211_interrupt(int irq, void *dev_id) { #define ADM8211_INT(x) \ do { \ if (unlikely(stsr & ADM8211_STSR_ ## x)) \ wiphy_debug(dev->wiphy, "%s\n", #x); \ } while (0) struct ieee80211_hw *dev = dev_id; struct adm8211_priv *priv = dev->priv; u32 stsr = ADM8211_CSR_READ(STSR); ADM8211_CSR_WRITE(STSR, stsr); if (stsr == 0xffffffff) return IRQ_HANDLED; if (!(stsr & (ADM8211_STSR_NISS | ADM8211_STSR_AISS))) return IRQ_HANDLED; if (stsr & ADM8211_STSR_RCI) adm8211_interrupt_rci(dev); if (stsr & ADM8211_STSR_TCI) adm8211_interrupt_tci(dev); ADM8211_INT(PCF); ADM8211_INT(BCNTC); ADM8211_INT(GPINT); ADM8211_INT(ATIMTC); ADM8211_INT(TSFTF); ADM8211_INT(TSCZ); ADM8211_INT(SQL); ADM8211_INT(WEPTD); ADM8211_INT(ATIME); ADM8211_INT(TEIS); ADM8211_INT(FBE); ADM8211_INT(REIS); ADM8211_INT(GPTT); ADM8211_INT(RPS); ADM8211_INT(RDU); ADM8211_INT(TUF); ADM8211_INT(TPS); return IRQ_HANDLED; #undef ADM8211_INT } #define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\ static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \ u16 addr, u32 value) { \ struct adm8211_priv *priv = dev->priv; \ unsigned int i; \ u32 reg, bitbuf; \ \ value &= v_mask; \ addr &= a_mask; \ bitbuf = (value << v_shift) | (addr << a_shift); \ \ ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \ ADM8211_CSR_READ(SYNRF); \ ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \ ADM8211_CSR_READ(SYNRF); \ \ if (prewrite) { \ ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \ ADM8211_CSR_READ(SYNRF); \ } \ \ for (i = 0; i <= bits; i++) { \ if (bitbuf & (1 << (bits - i))) \ reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \ else \ reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \ \ ADM8211_CSR_WRITE(SYNRF, reg); \ ADM8211_CSR_READ(SYNRF); \ \ ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \ ADM8211_CSR_READ(SYNRF); \ ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \ ADM8211_CSR_READ(SYNRF); \ } \ \ if (postwrite == 1) { \ ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \ ADM8211_CSR_READ(SYNRF); \ } \ if (postwrite == 2) { \ ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \ ADM8211_CSR_READ(SYNRF); \ } \ \ ADM8211_CSR_WRITE(SYNRF, 0); \ ADM8211_CSR_READ(SYNRF); \ } WRITE_SYN(max2820, 0x00FFF, 0, 0x0F, 12, 15, 1, 1) WRITE_SYN(al2210l, 0xFFFFF, 4, 0x0F, 0, 23, 1, 1) WRITE_SYN(rfmd2958, 0x3FFFF, 0, 0x1F, 18, 23, 0, 1) WRITE_SYN(rfmd2948, 0x0FFFF, 4, 0x0F, 0, 21, 0, 2) #undef WRITE_SYN static int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data) { struct adm8211_priv *priv = dev->priv; unsigned int timeout; u32 reg; timeout = 10; while (timeout > 0) { reg = ADM8211_CSR_READ(BBPCTL); if (!(reg & (ADM8211_BBPCTL_WR | ADM8211_BBPCTL_RD))) break; timeout--; msleep(2); } if (timeout == 0) { wiphy_debug(dev->wiphy, "adm8211_write_bbp(%d,%d) failed prewrite (reg=0x%08x)\n", addr, data, reg); return -ETIMEDOUT; } switch (priv->bbp_type) { case ADM8211_TYPE_INTERSIL: reg = ADM8211_BBPCTL_MMISEL; /* three wire interface */ break; case ADM8211_TYPE_RFMD: reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP | (0x01 << 18); break; case ADM8211_TYPE_ADMTEK: reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP | (0x05 << 18); break; } reg |= ADM8211_BBPCTL_WR | (addr << 8) | data; ADM8211_CSR_WRITE(BBPCTL, reg); timeout = 10; while (timeout > 0) { reg = ADM8211_CSR_READ(BBPCTL); if (!(reg & ADM8211_BBPCTL_WR)) break; timeout--; msleep(2); } if (timeout == 0) { ADM8211_CSR_WRITE(BBPCTL, ADM8211_CSR_READ(BBPCTL) & ~ADM8211_BBPCTL_WR); wiphy_debug(dev->wiphy, "adm8211_write_bbp(%d,%d) failed postwrite (reg=0x%08x)\n", addr, data, reg); return -ETIMEDOUT; } return 0; } static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan) { static const u32 adm8211_rfmd2958_reg5[] = {0x22BD, 0x22D2, 0x22E8, 0x22FE, 0x2314, 0x232A, 0x2340, 0x2355, 0x236B, 0x2381, 0x2397, 0x23AD, 0x23C2, 0x23F7}; static const u32 adm8211_rfmd2958_reg6[] = {0x05D17, 0x3A2E8, 0x2E8BA, 0x22E8B, 0x1745D, 0x0BA2E, 0x00000, 0x345D1, 0x28BA2, 0x1D174, 0x11745, 0x05D17, 0x3A2E8, 0x11745}; struct adm8211_priv *priv = dev->priv; u8 ant_power = priv->ant_power > 0x3F ? priv->eeprom->antenna_power[chan - 1] : priv->ant_power; u8 tx_power = priv->tx_power > 0x3F ? priv->eeprom->tx_power[chan - 1] : priv->tx_power; u8 lpf_cutoff = priv->lpf_cutoff == 0xFF ? priv->eeprom->lpf_cutoff[chan - 1] : priv->lpf_cutoff; u8 lnags_thresh = priv->lnags_threshold == 0xFF ? priv->eeprom->lnags_threshold[chan - 1] : priv->lnags_threshold; u32 reg; ADM8211_IDLE(); /* Program synthesizer to new channel */ switch (priv->transceiver_type) { case ADM8211_RFMD2958: case ADM8211_RFMD2958_RF3000_CONTROL_POWER: adm8211_rf_write_syn_rfmd2958(dev, 0x00, 0x04007); adm8211_rf_write_syn_rfmd2958(dev, 0x02, 0x00033); adm8211_rf_write_syn_rfmd2958(dev, 0x05, adm8211_rfmd2958_reg5[chan - 1]); adm8211_rf_write_syn_rfmd2958(dev, 0x06, adm8211_rfmd2958_reg6[chan - 1]); break; case ADM8211_RFMD2948: adm8211_rf_write_syn_rfmd2948(dev, SI4126_MAIN_CONF, SI4126_MAIN_XINDIV2); adm8211_rf_write_syn_rfmd2948(dev, SI4126_POWERDOWN, SI4126_POWERDOWN_PDIB | SI4126_POWERDOWN_PDRB); adm8211_rf_write_syn_rfmd2948(dev, SI4126_PHASE_DET_GAIN, 0); adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_N_DIV, (chan == 14 ? 2110 : (2033 + (chan * 5)))); adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_N_DIV, 1496); adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_R_DIV, 44); adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_R_DIV, 44); break; case ADM8211_MAX2820: adm8211_rf_write_syn_max2820(dev, 0x3, (chan == 14 ? 0x054 : (0x7 + (chan * 5)))); break; case ADM8211_AL2210L: adm8211_rf_write_syn_al2210l(dev, 0x0, (chan == 14 ? 0x229B4 : (0x22967 + (chan * 5)))); break; default: wiphy_debug(dev->wiphy, "unsupported transceiver type %d\n", priv->transceiver_type); break; } /* write BBP regs */ if (priv->bbp_type == ADM8211_TYPE_RFMD) { /* SMC 2635W specific? adm8211b doesn't use the 2948 though.. */ /* TODO: remove if SMC 2635W doesn't need this */ if (priv->transceiver_type == ADM8211_RFMD2948) { reg = ADM8211_CSR_READ(GPIO); reg &= 0xfffc0000; reg |= ADM8211_CSR_GPIO_EN0; if (chan != 14) reg |= ADM8211_CSR_GPIO_O0; ADM8211_CSR_WRITE(GPIO, reg); } if (priv->transceiver_type == ADM8211_RFMD2958) { /* set PCNT2 */ adm8211_rf_write_syn_rfmd2958(dev, 0x0B, 0x07100); /* set PCNT1 P_DESIRED/MID_BIAS */ reg = le16_to_cpu(priv->eeprom->cr49); reg >>= 13; reg <<= 15; reg |= ant_power << 9; adm8211_rf_write_syn_rfmd2958(dev, 0x0A, reg); /* set TXRX TX_GAIN */ adm8211_rf_write_syn_rfmd2958(dev, 0x09, 0x00050 | (priv->pdev->revision < ADM8211_REV_CA ? tx_power : 0)); } else { reg = ADM8211_CSR_READ(PLCPHD); reg &= 0xff00ffff; reg |= tx_power << 18; ADM8211_CSR_WRITE(PLCPHD, reg); } ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF | ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST); ADM8211_CSR_READ(SYNRF); msleep(30); /* RF3000 BBP */ if (priv->transceiver_type != ADM8211_RFMD2958) adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT, tx_power<<2); adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, lpf_cutoff); adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, lnags_thresh); adm8211_write_bbp(dev, 0x1c, priv->pdev->revision == ADM8211_REV_BA ? priv->eeprom->cr28 : 0); adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29); ADM8211_CSR_WRITE(SYNRF, 0); /* Nothing to do for ADMtek BBP */ } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK) wiphy_debug(dev->wiphy, "unsupported BBP type %d\n", priv->bbp_type); ADM8211_RESTORE(); /* update current channel for adhoc (and maybe AP mode) */ reg = ADM8211_CSR_READ(CAP0); reg &= ~0xF; reg |= chan; ADM8211_CSR_WRITE(CAP0, reg); return 0; } static void adm8211_update_mode(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; ADM8211_IDLE(); priv->soft_rx_crc = 0; switch (priv->mode) { case NL80211_IFTYPE_STATION: priv->nar &= ~(ADM8211_NAR_PR | ADM8211_NAR_EA); priv->nar |= ADM8211_NAR_ST | ADM8211_NAR_SR; break; case NL80211_IFTYPE_ADHOC: priv->nar &= ~ADM8211_NAR_PR; priv->nar |= ADM8211_NAR_EA | ADM8211_NAR_ST | ADM8211_NAR_SR; /* don't trust the error bits on rev 0x20 and up in adhoc */ if (priv->pdev->revision >= ADM8211_REV_BA) priv->soft_rx_crc = 1; break; case NL80211_IFTYPE_MONITOR: priv->nar &= ~(ADM8211_NAR_EA | ADM8211_NAR_ST); priv->nar |= ADM8211_NAR_PR | ADM8211_NAR_SR; break; } ADM8211_RESTORE(); } static void adm8211_hw_init_syn(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; switch (priv->transceiver_type) { case ADM8211_RFMD2958: case ADM8211_RFMD2958_RF3000_CONTROL_POWER: /* comments taken from ADMtek vendor driver */ /* Reset RF2958 after power on */ adm8211_rf_write_syn_rfmd2958(dev, 0x1F, 0x00000); /* Initialize RF VCO Core Bias to maximum */ adm8211_rf_write_syn_rfmd2958(dev, 0x0C, 0x3001F); /* Initialize IF PLL */ adm8211_rf_write_syn_rfmd2958(dev, 0x01, 0x29C03); /* Initialize IF PLL Coarse Tuning */ adm8211_rf_write_syn_rfmd2958(dev, 0x03, 0x1FF6F); /* Initialize RF PLL */ adm8211_rf_write_syn_rfmd2958(dev, 0x04, 0x29403); /* Initialize RF PLL Coarse Tuning */ adm8211_rf_write_syn_rfmd2958(dev, 0x07, 0x1456F); /* Initialize TX gain and filter BW (R9) */ adm8211_rf_write_syn_rfmd2958(dev, 0x09, (priv->transceiver_type == ADM8211_RFMD2958 ? 0x10050 : 0x00050)); /* Initialize CAL register */ adm8211_rf_write_syn_rfmd2958(dev, 0x08, 0x3FFF8); break; case ADM8211_MAX2820: adm8211_rf_write_syn_max2820(dev, 0x1, 0x01E); adm8211_rf_write_syn_max2820(dev, 0x2, 0x001); adm8211_rf_write_syn_max2820(dev, 0x3, 0x054); adm8211_rf_write_syn_max2820(dev, 0x4, 0x310); adm8211_rf_write_syn_max2820(dev, 0x5, 0x000); break; case ADM8211_AL2210L: adm8211_rf_write_syn_al2210l(dev, 0x0, 0x0196C); adm8211_rf_write_syn_al2210l(dev, 0x1, 0x007CB); adm8211_rf_write_syn_al2210l(dev, 0x2, 0x3582F); adm8211_rf_write_syn_al2210l(dev, 0x3, 0x010A9); adm8211_rf_write_syn_al2210l(dev, 0x4, 0x77280); adm8211_rf_write_syn_al2210l(dev, 0x5, 0x45641); adm8211_rf_write_syn_al2210l(dev, 0x6, 0xEA130); adm8211_rf_write_syn_al2210l(dev, 0x7, 0x80000); adm8211_rf_write_syn_al2210l(dev, 0x8, 0x7850F); adm8211_rf_write_syn_al2210l(dev, 0x9, 0xF900C); adm8211_rf_write_syn_al2210l(dev, 0xA, 0x00000); adm8211_rf_write_syn_al2210l(dev, 0xB, 0x00000); break; case ADM8211_RFMD2948: default: break; } } static int adm8211_hw_init_bbp(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; u32 reg; /* write addresses */ if (priv->bbp_type == ADM8211_TYPE_INTERSIL) { ADM8211_CSR_WRITE(MMIWA, 0x100E0C0A); ADM8211_CSR_WRITE(MMIRD0, 0x00007C7E); ADM8211_CSR_WRITE(MMIRD1, 0x00100000); } else if (priv->bbp_type == ADM8211_TYPE_RFMD || priv->bbp_type == ADM8211_TYPE_ADMTEK) { /* check specific BBP type */ switch (priv->specific_bbptype) { case ADM8211_BBP_RFMD3000: case ADM8211_BBP_RFMD3002: ADM8211_CSR_WRITE(MMIWA, 0x00009101); ADM8211_CSR_WRITE(MMIRD0, 0x00000301); break; case ADM8211_BBP_ADM8011: ADM8211_CSR_WRITE(MMIWA, 0x00008903); ADM8211_CSR_WRITE(MMIRD0, 0x00001716); reg = ADM8211_CSR_READ(BBPCTL); reg &= ~ADM8211_BBPCTL_TYPE; reg |= 0x5 << 18; ADM8211_CSR_WRITE(BBPCTL, reg); break; } switch (priv->pdev->revision) { case ADM8211_REV_CA: if (priv->transceiver_type == ADM8211_RFMD2958 || priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER || priv->transceiver_type == ADM8211_RFMD2948) ADM8211_CSR_WRITE(SYNCTL, 0x1 << 22); else if (priv->transceiver_type == ADM8211_MAX2820 || priv->transceiver_type == ADM8211_AL2210L) ADM8211_CSR_WRITE(SYNCTL, 0x3 << 22); break; case ADM8211_REV_BA: reg = ADM8211_CSR_READ(MMIRD1); reg &= 0x0000FFFF; reg |= 0x7e100000; ADM8211_CSR_WRITE(MMIRD1, reg); break; case ADM8211_REV_AB: case ADM8211_REV_AF: default: ADM8211_CSR_WRITE(MMIRD1, 0x7e100000); break; } /* For RFMD */ ADM8211_CSR_WRITE(MACTEST, 0x800); } adm8211_hw_init_syn(dev); /* Set RF Power control IF pin to PE1+PHYRST# */ ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF | ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST); ADM8211_CSR_READ(SYNRF); msleep(20); /* write BBP regs */ if (priv->bbp_type == ADM8211_TYPE_RFMD) { /* RF3000 BBP */ /* another set: * 11: c8 * 14: 14 * 15: 50 (chan 1..13; chan 14: d0) * 1c: 00 * 1d: 84 */ adm8211_write_bbp(dev, RF3000_CCA_CTRL, 0x80); /* antenna selection: diversity */ adm8211_write_bbp(dev, RF3000_DIVERSITY__RSSI, 0x80); adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT, 0x74); adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, 0x38); adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, 0x40); if (priv->eeprom->major_version < 2) { adm8211_write_bbp(dev, 0x1c, 0x00); adm8211_write_bbp(dev, 0x1d, 0x80); } else { if (priv->pdev->revision == ADM8211_REV_BA) adm8211_write_bbp(dev, 0x1c, priv->eeprom->cr28); else adm8211_write_bbp(dev, 0x1c, 0x00); adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29); } } else if (priv->bbp_type == ADM8211_TYPE_ADMTEK) { /* reset baseband */ adm8211_write_bbp(dev, 0x00, 0xFF); /* antenna selection: diversity */ adm8211_write_bbp(dev, 0x07, 0x0A); /* TODO: find documentation for this */ switch (priv->transceiver_type) { case ADM8211_RFMD2958: case ADM8211_RFMD2958_RF3000_CONTROL_POWER: adm8211_write_bbp(dev, 0x00, 0x00); adm8211_write_bbp(dev, 0x01, 0x00); adm8211_write_bbp(dev, 0x02, 0x00); adm8211_write_bbp(dev, 0x03, 0x00); adm8211_write_bbp(dev, 0x06, 0x0f); adm8211_write_bbp(dev, 0x09, 0x00); adm8211_write_bbp(dev, 0x0a, 0x00); adm8211_write_bbp(dev, 0x0b, 0x00); adm8211_write_bbp(dev, 0x0c, 0x00); adm8211_write_bbp(dev, 0x0f, 0xAA); adm8211_write_bbp(dev, 0x10, 0x8c); adm8211_write_bbp(dev, 0x11, 0x43); adm8211_write_bbp(dev, 0x18, 0x40); adm8211_write_bbp(dev, 0x20, 0x23); adm8211_write_bbp(dev, 0x21, 0x02); adm8211_write_bbp(dev, 0x22, 0x28); adm8211_write_bbp(dev, 0x23, 0x30); adm8211_write_bbp(dev, 0x24, 0x2d); adm8211_write_bbp(dev, 0x28, 0x35); adm8211_write_bbp(dev, 0x2a, 0x8c); adm8211_write_bbp(dev, 0x2b, 0x81); adm8211_write_bbp(dev, 0x2c, 0x44); adm8211_write_bbp(dev, 0x2d, 0x0A); adm8211_write_bbp(dev, 0x29, 0x40); adm8211_write_bbp(dev, 0x60, 0x08); adm8211_write_bbp(dev, 0x64, 0x01); break; case ADM8211_MAX2820: adm8211_write_bbp(dev, 0x00, 0x00); adm8211_write_bbp(dev, 0x01, 0x00); adm8211_write_bbp(dev, 0x02, 0x00); adm8211_write_bbp(dev, 0x03, 0x00); adm8211_write_bbp(dev, 0x06, 0x0f); adm8211_write_bbp(dev, 0x09, 0x05); adm8211_write_bbp(dev, 0x0a, 0x02); adm8211_write_bbp(dev, 0x0b, 0x00); adm8211_write_bbp(dev, 0x0c, 0x0f); adm8211_write_bbp(dev, 0x0f, 0x55); adm8211_write_bbp(dev, 0x10, 0x8d); adm8211_write_bbp(dev, 0x11, 0x43); adm8211_write_bbp(dev, 0x18, 0x4a); adm8211_write_bbp(dev, 0x20, 0x20); adm8211_write_bbp(dev, 0x21, 0x02); adm8211_write_bbp(dev, 0x22, 0x23); adm8211_write_bbp(dev, 0x23, 0x30); adm8211_write_bbp(dev, 0x24, 0x2d); adm8211_write_bbp(dev, 0x2a, 0x8c); adm8211_write_bbp(dev, 0x2b, 0x81); adm8211_write_bbp(dev, 0x2c, 0x44); adm8211_write_bbp(dev, 0x29, 0x4a); adm8211_write_bbp(dev, 0x60, 0x2b); adm8211_write_bbp(dev, 0x64, 0x01); break; case ADM8211_AL2210L: adm8211_write_bbp(dev, 0x00, 0x00); adm8211_write_bbp(dev, 0x01, 0x00); adm8211_write_bbp(dev, 0x02, 0x00); adm8211_write_bbp(dev, 0x03, 0x00); adm8211_write_bbp(dev, 0x06, 0x0f); adm8211_write_bbp(dev, 0x07, 0x05); adm8211_write_bbp(dev, 0x08, 0x03); adm8211_write_bbp(dev, 0x09, 0x00); adm8211_write_bbp(dev, 0x0a, 0x00); adm8211_write_bbp(dev, 0x0b, 0x00); adm8211_write_bbp(dev, 0x0c, 0x10); adm8211_write_bbp(dev, 0x0f, 0x55); adm8211_write_bbp(dev, 0x10, 0x8d); adm8211_write_bbp(dev, 0x11, 0x43); adm8211_write_bbp(dev, 0x18, 0x4a); adm8211_write_bbp(dev, 0x20, 0x20); adm8211_write_bbp(dev, 0x21, 0x02); adm8211_write_bbp(dev, 0x22, 0x23); adm8211_write_bbp(dev, 0x23, 0x30); adm8211_write_bbp(dev, 0x24, 0x2d); adm8211_write_bbp(dev, 0x2a, 0xaa); adm8211_write_bbp(dev, 0x2b, 0x81); adm8211_write_bbp(dev, 0x2c, 0x44); adm8211_write_bbp(dev, 0x29, 0xfa); adm8211_write_bbp(dev, 0x60, 0x2d); adm8211_write_bbp(dev, 0x64, 0x01); break; case ADM8211_RFMD2948: break; default: wiphy_debug(dev->wiphy, "unsupported transceiver %d\n", priv->transceiver_type); break; } } else wiphy_debug(dev->wiphy, "unsupported BBP %d\n", priv->bbp_type); ADM8211_CSR_WRITE(SYNRF, 0); /* Set RF CAL control source to MAC control */ reg = ADM8211_CSR_READ(SYNCTL); reg |= ADM8211_SYNCTL_SELCAL; ADM8211_CSR_WRITE(SYNCTL, reg); return 0; } /* configures hw beacons/probe responses */ static int adm8211_set_rate(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; u32 reg; int i = 0; u8 rate_buf[12] = {0}; /* write supported rates */ if (priv->pdev->revision != ADM8211_REV_BA) { rate_buf[0] = ARRAY_SIZE(adm8211_rates); for (i = 0; i < ARRAY_SIZE(adm8211_rates); i++) rate_buf[i + 1] = (adm8211_rates[i].bitrate / 5) | 0x80; } else { /* workaround for rev BA specific bug */ rate_buf[0] = 0x04; rate_buf[1] = 0x82; rate_buf[2] = 0x04; rate_buf[3] = 0x0b; rate_buf[4] = 0x16; } adm8211_write_sram_bytes(dev, ADM8211_SRAM_SUPP_RATE, rate_buf, ARRAY_SIZE(adm8211_rates) + 1); reg = ADM8211_CSR_READ(PLCPHD) & 0x00FFFFFF; /* keep bits 0-23 */ reg |= 1 << 15; /* short preamble */ reg |= 110 << 24; ADM8211_CSR_WRITE(PLCPHD, reg); /* MTMLT = 512 TU (max TX MSDU lifetime) * BCNTSIG = plcp_signal (beacon, probe resp, and atim TX rate) * SRTYLIM = 224 (short retry limit, TX header value is default) */ ADM8211_CSR_WRITE(TXLMT, (512 << 16) | (110 << 8) | (224 << 0)); return 0; } static void adm8211_hw_init(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; u32 reg; u8 cline; reg = ADM8211_CSR_READ(PAR); reg |= ADM8211_PAR_MRLE | ADM8211_PAR_MRME; reg &= ~(ADM8211_PAR_BAR | ADM8211_PAR_CAL); if (!pci_set_mwi(priv->pdev)) { reg |= 0x1 << 24; pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline); switch (cline) { case 0x8: reg |= (0x1 << 14); break; case 0x16: reg |= (0x2 << 14); break; case 0x32: reg |= (0x3 << 14); break; default: reg |= (0x0 << 14); break; } } ADM8211_CSR_WRITE(PAR, reg); reg = ADM8211_CSR_READ(CSR_TEST1); reg &= ~(0xF << 28); reg |= (1 << 28) | (1 << 31); ADM8211_CSR_WRITE(CSR_TEST1, reg); /* lose link after 4 lost beacons */ reg = (0x04 << 21) | ADM8211_WCSR_TSFTWE | ADM8211_WCSR_LSOE; ADM8211_CSR_WRITE(WCSR, reg); /* Disable APM, enable receive FIFO threshold, and set drain receive * threshold to store-and-forward */ reg = ADM8211_CSR_READ(CMDR); reg &= ~(ADM8211_CMDR_APM | ADM8211_CMDR_DRT); reg |= ADM8211_CMDR_RTE | ADM8211_CMDR_DRT_SF; ADM8211_CSR_WRITE(CMDR, reg); adm8211_set_rate(dev); /* 4-bit values: * PWR1UP = 8 * 2 ms * PWR0PAPE = 8 us or 5 us * PWR1PAPE = 1 us or 3 us * PWR0TRSW = 5 us * PWR1TRSW = 12 us * PWR0PE2 = 13 us * PWR1PE2 = 1 us * PWR0TXPE = 8 or 6 */ if (priv->pdev->revision < ADM8211_REV_CA) ADM8211_CSR_WRITE(TOFS2, 0x8815cd18); else ADM8211_CSR_WRITE(TOFS2, 0x8535cd16); /* Enable store and forward for transmit */ priv->nar = ADM8211_NAR_SF | ADM8211_NAR_PB; ADM8211_CSR_WRITE(NAR, priv->nar); /* Reset RF */ ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_RADIO); ADM8211_CSR_READ(SYNRF); msleep(10); ADM8211_CSR_WRITE(SYNRF, 0); ADM8211_CSR_READ(SYNRF); msleep(5); /* Set CFP Max Duration to 0x10 TU */ reg = ADM8211_CSR_READ(CFPP); reg &= ~(0xffff << 8); reg |= 0x0010 << 8; ADM8211_CSR_WRITE(CFPP, reg); /* USCNT = 0x16 (number of system clocks, 22 MHz, in 1us * TUCNT = 0x3ff - Tu counter 1024 us */ ADM8211_CSR_WRITE(TOFS0, (0x16 << 24) | 0x3ff); /* SLOT=20 us, SIFS=110 cycles of 22 MHz (5 us), * DIFS=50 us, EIFS=100 us */ if (priv->pdev->revision < ADM8211_REV_CA) ADM8211_CSR_WRITE(IFST, (20 << 23) | (110 << 15) | (50 << 9) | 100); else ADM8211_CSR_WRITE(IFST, (20 << 23) | (24 << 15) | (50 << 9) | 100); /* PCNT = 1 (MAC idle time awake/sleep, unit S) * RMRD = 2346 * 8 + 1 us (max RX duration) */ ADM8211_CSR_WRITE(RMD, (1 << 16) | 18769); /* MART=65535 us, MIRT=256 us, TSFTOFST=0 us */ ADM8211_CSR_WRITE(RSPT, 0xffffff00); /* Initialize BBP (and SYN) */ adm8211_hw_init_bbp(dev); /* make sure interrupts are off */ ADM8211_CSR_WRITE(IER, 0); /* ACK interrupts */ ADM8211_CSR_WRITE(STSR, ADM8211_CSR_READ(STSR)); /* Setup WEP (turns it off for now) */ reg = ADM8211_CSR_READ(MACTEST); reg &= ~(7 << 20); ADM8211_CSR_WRITE(MACTEST, reg); reg = ADM8211_CSR_READ(WEPCTL); reg &= ~ADM8211_WEPCTL_WEPENABLE; reg |= ADM8211_WEPCTL_WEPRXBYP; ADM8211_CSR_WRITE(WEPCTL, reg); /* Clear the missed-packet counter. */ ADM8211_CSR_READ(LPC); } static int adm8211_hw_reset(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; u32 reg, tmp; int timeout = 100; /* Power-on issue */ /* TODO: check if this is necessary */ ADM8211_CSR_WRITE(FRCTL, 0); /* Reset the chip */ tmp = ADM8211_CSR_READ(PAR); ADM8211_CSR_WRITE(PAR, ADM8211_PAR_SWR); while ((ADM8211_CSR_READ(PAR) & ADM8211_PAR_SWR) && timeout--) msleep(50); if (timeout <= 0) return -ETIMEDOUT; ADM8211_CSR_WRITE(PAR, tmp); if (priv->pdev->revision == ADM8211_REV_BA && (priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER || priv->transceiver_type == ADM8211_RFMD2958)) { reg = ADM8211_CSR_READ(CSR_TEST1); reg |= (1 << 4) | (1 << 5); ADM8211_CSR_WRITE(CSR_TEST1, reg); } else if (priv->pdev->revision == ADM8211_REV_CA) { reg = ADM8211_CSR_READ(CSR_TEST1); reg &= ~((1 << 4) | (1 << 5)); ADM8211_CSR_WRITE(CSR_TEST1, reg); } ADM8211_CSR_WRITE(FRCTL, 0); reg = ADM8211_CSR_READ(CSR_TEST0); reg |= ADM8211_CSR_TEST0_EPRLD; /* EEPROM Recall */ ADM8211_CSR_WRITE(CSR_TEST0, reg); adm8211_clear_sram(dev); return 0; } static u64 adm8211_get_tsft(struct ieee80211_hw *dev, struct ieee80211_vif *vif) { struct adm8211_priv *priv = dev->priv; u32 tsftl; u64 tsft; tsftl = ADM8211_CSR_READ(TSFTL); tsft = ADM8211_CSR_READ(TSFTH); tsft <<= 32; tsft |= tsftl; return tsft; } static void adm8211_set_interval(struct ieee80211_hw *dev, unsigned short bi, unsigned short li) { struct adm8211_priv *priv = dev->priv; u32 reg; /* BP (beacon interval) = data->beacon_interval * LI (listen interval) = data->listen_interval (in beacon intervals) */ reg = (bi << 16) | li; ADM8211_CSR_WRITE(BPLI, reg); } static void adm8211_set_bssid(struct ieee80211_hw *dev, const u8 *bssid) { struct adm8211_priv *priv = dev->priv; u32 reg; ADM8211_CSR_WRITE(BSSID0, le32_to_cpu(*(__le32 *)bssid)); reg = ADM8211_CSR_READ(ABDA1); reg &= 0x0000ffff; reg |= (bssid[4] << 16) | (bssid[5] << 24); ADM8211_CSR_WRITE(ABDA1, reg); } static int adm8211_config(struct ieee80211_hw *dev, u32 changed) { struct adm8211_priv *priv = dev->priv; struct ieee80211_conf *conf = &dev->conf; int channel = ieee80211_frequency_to_channel(conf->chandef.chan->center_freq); if (channel != priv->channel) { priv->channel = channel; adm8211_rf_set_channel(dev, priv->channel); } return 0; } static void adm8211_bss_info_changed(struct ieee80211_hw *dev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf, u32 changes) { struct adm8211_priv *priv = dev->priv; if (!(changes & BSS_CHANGED_BSSID)) return; if (!ether_addr_equal(conf->bssid, priv->bssid)) { adm8211_set_bssid(dev, conf->bssid); memcpy(priv->bssid, conf->bssid, ETH_ALEN); } } static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { unsigned int bit_nr; u32 mc_filter[2]; struct netdev_hw_addr *ha; mc_filter[1] = mc_filter[0] = 0; netdev_hw_addr_list_for_each(ha, mc_list) { bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; bit_nr &= 0x3F; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); } return mc_filter[0] | ((u64)(mc_filter[1]) << 32); } static void adm8211_configure_filter(struct ieee80211_hw *dev, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { static const u8 bcast[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; struct adm8211_priv *priv = dev->priv; unsigned int new_flags; u32 mc_filter[2]; mc_filter[0] = multicast; mc_filter[1] = multicast >> 32; new_flags = 0; if (*total_flags & FIF_PROMISC_IN_BSS) { new_flags |= FIF_PROMISC_IN_BSS; priv->nar |= ADM8211_NAR_PR; priv->nar &= ~ADM8211_NAR_MM; mc_filter[1] = mc_filter[0] = ~0; } else if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) { new_flags |= FIF_ALLMULTI; priv->nar &= ~ADM8211_NAR_PR; priv->nar |= ADM8211_NAR_MM; mc_filter[1] = mc_filter[0] = ~0; } else { priv->nar &= ~(ADM8211_NAR_MM | ADM8211_NAR_PR); } ADM8211_IDLE_RX(); ADM8211_CSR_WRITE(MAR0, mc_filter[0]); ADM8211_CSR_WRITE(MAR1, mc_filter[1]); ADM8211_CSR_READ(NAR); if (priv->nar & ADM8211_NAR_PR) dev->flags |= IEEE80211_HW_RX_INCLUDES_FCS; else dev->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS; if (*total_flags & FIF_BCN_PRBRESP_PROMISC) adm8211_set_bssid(dev, bcast); else adm8211_set_bssid(dev, priv->bssid); ADM8211_RESTORE(); *total_flags = new_flags; } static int adm8211_add_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif) { struct adm8211_priv *priv = dev->priv; if (priv->mode != NL80211_IFTYPE_MONITOR) return -EOPNOTSUPP; switch (vif->type) { case NL80211_IFTYPE_STATION: priv->mode = vif->type; break; default: return -EOPNOTSUPP; } ADM8211_IDLE(); ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)vif->addr)); ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(vif->addr + 4))); adm8211_update_mode(dev); ADM8211_RESTORE(); return 0; } static void adm8211_remove_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif) { struct adm8211_priv *priv = dev->priv; priv->mode = NL80211_IFTYPE_MONITOR; } static int adm8211_init_rings(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; struct adm8211_desc *desc = NULL; struct adm8211_rx_ring_info *rx_info; struct adm8211_tx_ring_info *tx_info; unsigned int i; for (i = 0; i < priv->rx_ring_size; i++) { desc = &priv->rx_ring[i]; desc->status = 0; desc->length = cpu_to_le32(RX_PKT_SIZE); priv->rx_buffers[i].skb = NULL; } /* Mark the end of RX ring; hw returns to base address after this * descriptor */ desc->length |= cpu_to_le32(RDES1_CONTROL_RER); for (i = 0; i < priv->rx_ring_size; i++) { desc = &priv->rx_ring[i]; rx_info = &priv->rx_buffers[i]; rx_info->skb = dev_alloc_skb(RX_PKT_SIZE); if (rx_info->skb == NULL) break; rx_info->mapping = pci_map_single(priv->pdev, skb_tail_pointer(rx_info->skb), RX_PKT_SIZE, PCI_DMA_FROMDEVICE); desc->buffer1 = cpu_to_le32(rx_info->mapping); desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL); } /* Setup TX ring. TX buffers descriptors will be filled in as needed */ for (i = 0; i < priv->tx_ring_size; i++) { desc = &priv->tx_ring[i]; tx_info = &priv->tx_buffers[i]; tx_info->skb = NULL; tx_info->mapping = 0; desc->status = 0; } desc->length = cpu_to_le32(TDES1_CONTROL_TER); priv->cur_rx = priv->cur_tx = priv->dirty_tx = 0; ADM8211_CSR_WRITE(RDB, priv->rx_ring_dma); ADM8211_CSR_WRITE(TDBD, priv->tx_ring_dma); return 0; } static void adm8211_free_rings(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; unsigned int i; for (i = 0; i < priv->rx_ring_size; i++) { if (!priv->rx_buffers[i].skb) continue; pci_unmap_single( priv->pdev, priv->rx_buffers[i].mapping, RX_PKT_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb(priv->rx_buffers[i].skb); } for (i = 0; i < priv->tx_ring_size; i++) { if (!priv->tx_buffers[i].skb) continue; pci_unmap_single(priv->pdev, priv->tx_buffers[i].mapping, priv->tx_buffers[i].skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(priv->tx_buffers[i].skb); } } static int adm8211_start(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; int retval; /* Power up MAC and RF chips */ retval = adm8211_hw_reset(dev); if (retval) { wiphy_err(dev->wiphy, "hardware reset failed\n"); goto fail; } retval = adm8211_init_rings(dev); if (retval) { wiphy_err(dev->wiphy, "failed to initialize rings\n"); goto fail; } /* Init hardware */ adm8211_hw_init(dev); adm8211_rf_set_channel(dev, priv->channel); retval = request_irq(priv->pdev->irq, adm8211_interrupt, IRQF_SHARED, "adm8211", dev); if (retval) { wiphy_err(dev->wiphy, "failed to register IRQ handler\n"); goto fail; } ADM8211_CSR_WRITE(IER, ADM8211_IER_NIE | ADM8211_IER_AIE | ADM8211_IER_RCIE | ADM8211_IER_TCIE | ADM8211_IER_TDUIE | ADM8211_IER_GPTIE); priv->mode = NL80211_IFTYPE_MONITOR; adm8211_update_mode(dev); ADM8211_CSR_WRITE(RDR, 0); adm8211_set_interval(dev, 100, 10); return 0; fail: return retval; } static void adm8211_stop(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; priv->mode = NL80211_IFTYPE_UNSPECIFIED; priv->nar = 0; ADM8211_CSR_WRITE(NAR, 0); ADM8211_CSR_WRITE(IER, 0); ADM8211_CSR_READ(NAR); free_irq(priv->pdev->irq, dev); adm8211_free_rings(dev); } static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int len, int plcp_signal, int short_preamble) { /* Alternative calculation from NetBSD: */ /* IEEE 802.11b durations for DSSS PHY in microseconds */ #define IEEE80211_DUR_DS_LONG_PREAMBLE 144 #define IEEE80211_DUR_DS_SHORT_PREAMBLE 72 #define IEEE80211_DUR_DS_FAST_PLCPHDR 24 #define IEEE80211_DUR_DS_SLOW_PLCPHDR 48 #define IEEE80211_DUR_DS_SLOW_ACK 112 #define IEEE80211_DUR_DS_FAST_ACK 56 #define IEEE80211_DUR_DS_SLOW_CTS 112 #define IEEE80211_DUR_DS_FAST_CTS 56 #define IEEE80211_DUR_DS_SLOT 20 #define IEEE80211_DUR_DS_SIFS 10 int remainder; *dur = (80 * (24 + payload_len) + plcp_signal - 1) / plcp_signal; if (plcp_signal <= PLCP_SIGNAL_2M) /* 1-2Mbps WLAN: send ACK/CTS at 1Mbps */ *dur += 3 * (IEEE80211_DUR_DS_SIFS + IEEE80211_DUR_DS_SHORT_PREAMBLE + IEEE80211_DUR_DS_FAST_PLCPHDR) + IEEE80211_DUR_DS_SLOW_CTS + IEEE80211_DUR_DS_SLOW_ACK; else /* 5-11Mbps WLAN: send ACK/CTS at 2Mbps */ *dur += 3 * (IEEE80211_DUR_DS_SIFS + IEEE80211_DUR_DS_SHORT_PREAMBLE + IEEE80211_DUR_DS_FAST_PLCPHDR) + IEEE80211_DUR_DS_FAST_CTS + IEEE80211_DUR_DS_FAST_ACK; /* lengthen duration if long preamble */ if (!short_preamble) *dur += 3 * (IEEE80211_DUR_DS_LONG_PREAMBLE - IEEE80211_DUR_DS_SHORT_PREAMBLE) + 3 * (IEEE80211_DUR_DS_SLOW_PLCPHDR - IEEE80211_DUR_DS_FAST_PLCPHDR); *plcp = (80 * len) / plcp_signal; remainder = (80 * len) % plcp_signal; if (plcp_signal == PLCP_SIGNAL_11M && remainder <= 30 && remainder > 0) *plcp = (*plcp | 0x8000) + 1; else if (remainder) (*plcp)++; } /* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, u16 plcp_signal, size_t hdrlen) { struct adm8211_priv *priv = dev->priv; unsigned long flags; dma_addr_t mapping; unsigned int entry; u32 flag; mapping = pci_map_single(priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); spin_lock_irqsave(&priv->lock, flags); if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size / 2) flag = TDES1_CONTROL_IC | TDES1_CONTROL_LS | TDES1_CONTROL_FS; else flag = TDES1_CONTROL_LS | TDES1_CONTROL_FS; if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size - 2) ieee80211_stop_queue(dev, 0); entry = priv->cur_tx % priv->tx_ring_size; priv->tx_buffers[entry].skb = skb; priv->tx_buffers[entry].mapping = mapping; priv->tx_buffers[entry].hdrlen = hdrlen; priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping); if (entry == priv->tx_ring_size - 1) flag |= TDES1_CONTROL_TER; priv->tx_ring[entry].length = cpu_to_le32(flag | skb->len); /* Set TX rate (SIGNAL field in PLCP PPDU format) */ flag = TDES0_CONTROL_OWN | (plcp_signal << 20) | 8 /* ? */; priv->tx_ring[entry].status = cpu_to_le32(flag); priv->cur_tx++; spin_unlock_irqrestore(&priv->lock, flags); /* Trigger transmit poll */ ADM8211_CSR_WRITE(TDR, 0); } /* Put adm8211_tx_hdr on skb and transmit */ static void adm8211_tx(struct ieee80211_hw *dev, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct adm8211_tx_hdr *txhdr; size_t payload_len, hdrlen; int plcp, dur, len, plcp_signal, short_preamble; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_rate *txrate = ieee80211_get_tx_rate(dev, info); u8 rc_flags; rc_flags = info->control.rates[0].flags; short_preamble = !!(rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); plcp_signal = txrate->bitrate; hdr = (struct ieee80211_hdr *)skb->data; hdrlen = ieee80211_hdrlen(hdr->frame_control); memcpy(skb->cb, skb->data, hdrlen); hdr = (struct ieee80211_hdr *)skb->cb; skb_pull(skb, hdrlen); payload_len = skb->len; txhdr = (struct adm8211_tx_hdr *) skb_push(skb, sizeof(*txhdr)); memset(txhdr, 0, sizeof(*txhdr)); memcpy(txhdr->da, ieee80211_get_DA(hdr), ETH_ALEN); txhdr->signal = plcp_signal; txhdr->frame_body_size = cpu_to_le16(payload_len); txhdr->frame_control = hdr->frame_control; len = hdrlen + payload_len + FCS_LEN; txhdr->frag = cpu_to_le16(0x0FFF); adm8211_calc_durations(&dur, &plcp, payload_len, len, plcp_signal, short_preamble); txhdr->plcp_frag_head_len = cpu_to_le16(plcp); txhdr->plcp_frag_tail_len = cpu_to_le16(plcp); txhdr->dur_frag_head = cpu_to_le16(dur); txhdr->dur_frag_tail = cpu_to_le16(dur); txhdr->header_control = cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_EXTEND_HEADER); if (short_preamble) txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_SHORT_PREAMBLE); if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_RTS); txhdr->retry_limit = info->control.rates[0].count; adm8211_tx_raw(dev, skb, plcp_signal, hdrlen); } static int adm8211_alloc_rings(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; unsigned int ring_size; priv->rx_buffers = kmalloc(sizeof(*priv->rx_buffers) * priv->rx_ring_size + sizeof(*priv->tx_buffers) * priv->tx_ring_size, GFP_KERNEL); if (!priv->rx_buffers) return -ENOMEM; priv->tx_buffers = (void *)priv->rx_buffers + sizeof(*priv->rx_buffers) * priv->rx_ring_size; /* Allocate TX/RX descriptors */ ring_size = sizeof(struct adm8211_desc) * priv->rx_ring_size + sizeof(struct adm8211_desc) * priv->tx_ring_size; priv->rx_ring = pci_alloc_consistent(priv->pdev, ring_size, &priv->rx_ring_dma); if (!priv->rx_ring) { kfree(priv->rx_buffers); priv->rx_buffers = NULL; priv->tx_buffers = NULL; return -ENOMEM; } priv->tx_ring = priv->rx_ring + priv->rx_ring_size; priv->tx_ring_dma = priv->rx_ring_dma + sizeof(struct adm8211_desc) * priv->rx_ring_size; return 0; } static const struct ieee80211_ops adm8211_ops = { .tx = adm8211_tx, .start = adm8211_start, .stop = adm8211_stop, .add_interface = adm8211_add_interface, .remove_interface = adm8211_remove_interface, .config = adm8211_config, .bss_info_changed = adm8211_bss_info_changed, .prepare_multicast = adm8211_prepare_multicast, .configure_filter = adm8211_configure_filter, .get_stats = adm8211_get_stats, .get_tsf = adm8211_get_tsft }; static int adm8211_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ieee80211_hw *dev; struct adm8211_priv *priv; unsigned long mem_addr, mem_len; unsigned int io_addr, io_len; int err; u32 reg; u8 perm_addr[ETH_ALEN]; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR "%s (adm8211): Cannot enable new PCI device\n", pci_name(pdev)); return err; } io_addr = pci_resource_start(pdev, 0); io_len = pci_resource_len(pdev, 0); mem_addr = pci_resource_start(pdev, 1); mem_len = pci_resource_len(pdev, 1); if (io_len < 256 || mem_len < 1024) { printk(KERN_ERR "%s (adm8211): Too short PCI resources\n", pci_name(pdev)); goto err_disable_pdev; } /* check signature */ pci_read_config_dword(pdev, 0x80 /* CR32 */, &reg); if (reg != ADM8211_SIG1 && reg != ADM8211_SIG2) { printk(KERN_ERR "%s (adm8211): Invalid signature (0x%x)\n", pci_name(pdev), reg); goto err_disable_pdev; } err = pci_request_regions(pdev, "adm8211"); if (err) { printk(KERN_ERR "%s (adm8211): Cannot obtain PCI resources\n", pci_name(pdev)); return err; /* someone else grabbed it? don't disable it */ } if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR "%s (adm8211): No suitable DMA available\n", pci_name(pdev)); goto err_free_reg; } pci_set_master(pdev); dev = ieee80211_alloc_hw(sizeof(*priv), &adm8211_ops); if (!dev) { printk(KERN_ERR "%s (adm8211): ieee80211 alloc failed\n", pci_name(pdev)); err = -ENOMEM; goto err_free_reg; } priv = dev->priv; priv->pdev = pdev; spin_lock_init(&priv->lock); SET_IEEE80211_DEV(dev, &pdev->dev); pci_set_drvdata(pdev, dev); priv->map = pci_iomap(pdev, 1, mem_len); if (!priv->map) priv->map = pci_iomap(pdev, 0, io_len); if (!priv->map) { printk(KERN_ERR "%s (adm8211): Cannot map device memory\n", pci_name(pdev)); err = -ENOMEM; goto err_free_dev; } priv->rx_ring_size = rx_ring_size; priv->tx_ring_size = tx_ring_size; if (adm8211_alloc_rings(dev)) { printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n", pci_name(pdev)); goto err_iounmap; } *(__le32 *)perm_addr = cpu_to_le32(ADM8211_CSR_READ(PAR0)); *(__le16 *)&perm_addr[4] = cpu_to_le16(ADM8211_CSR_READ(PAR1) & 0xFFFF); if (!is_valid_ether_addr(perm_addr)) { printk(KERN_WARNING "%s (adm8211): Invalid hwaddr in EEPROM!\n", pci_name(pdev)); eth_random_addr(perm_addr); } SET_IEEE80211_PERM_ADDR(dev, perm_addr); dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr); /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */ dev->flags = IEEE80211_HW_SIGNAL_UNSPEC; dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); dev->max_signal = 100; /* FIXME: find better value */ dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */ priv->retry_limit = 3; priv->ant_power = 0x40; priv->tx_power = 0x40; priv->lpf_cutoff = 0xFF; priv->lnags_threshold = 0xFF; priv->mode = NL80211_IFTYPE_UNSPECIFIED; /* Power-on issue. EEPROM won't read correctly without */ if (pdev->revision >= ADM8211_REV_BA) { ADM8211_CSR_WRITE(FRCTL, 0); ADM8211_CSR_READ(FRCTL); ADM8211_CSR_WRITE(FRCTL, 1); ADM8211_CSR_READ(FRCTL); msleep(100); } err = adm8211_read_eeprom(dev); if (err) { printk(KERN_ERR "%s (adm8211): Can't alloc eeprom buffer\n", pci_name(pdev)); goto err_free_desc; } priv->channel = 1; dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; err = ieee80211_register_hw(dev); if (err) { printk(KERN_ERR "%s (adm8211): Cannot register device\n", pci_name(pdev)); goto err_free_eeprom; } wiphy_info(dev->wiphy, "hwaddr %pM, Rev 0x%02x\n", dev->wiphy->perm_addr, pdev->revision); return 0; err_free_eeprom: kfree(priv->eeprom); err_free_desc: pci_free_consistent(pdev, sizeof(struct adm8211_desc) * priv->rx_ring_size + sizeof(struct adm8211_desc) * priv->tx_ring_size, priv->rx_ring, priv->rx_ring_dma); kfree(priv->rx_buffers); err_iounmap: pci_iounmap(pdev, priv->map); err_free_dev: ieee80211_free_hw(dev); err_free_reg: pci_release_regions(pdev); err_disable_pdev: pci_disable_device(pdev); return err; } static void adm8211_remove(struct pci_dev *pdev) { struct ieee80211_hw *dev = pci_get_drvdata(pdev); struct adm8211_priv *priv; if (!dev) return; ieee80211_unregister_hw(dev); priv = dev->priv; pci_free_consistent(pdev, sizeof(struct adm8211_desc) * priv->rx_ring_size + sizeof(struct adm8211_desc) * priv->tx_ring_size, priv->rx_ring, priv->rx_ring_dma); kfree(priv->rx_buffers); kfree(priv->eeprom); pci_iounmap(pdev, priv->map); pci_release_regions(pdev); pci_disable_device(pdev); ieee80211_free_hw(dev); } #ifdef CONFIG_PM static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state) { pci_save_state(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int adm8211_resume(struct pci_dev *pdev) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); return 0; } #endif /* CONFIG_PM */ MODULE_DEVICE_TABLE(pci, adm8211_pci_id_table); /* TODO: implement enable_wake */ static struct pci_driver adm8211_driver = { .name = "adm8211", .id_table = adm8211_pci_id_table, .probe = adm8211_probe, .remove = adm8211_remove, #ifdef CONFIG_PM .suspend = adm8211_suspend, .resume = adm8211_resume, #endif /* CONFIG_PM */ }; module_pci_driver(adm8211_driver);
gpl-2.0
BenHuiHui/linux
arch/arm/mach-s3c64xx/s3c6400.c
368
2111
/* linux/arch/arm/mach-s3c64xx/cpu.c * * Copyright 2009 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* * NOTE: Code in this file is not used when booting with Device Tree support. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/device.h> #include <linux/serial_core.h> #include <linux/serial_s3c.h> #include <linux/platform_device.h> #include <linux/of.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <asm/irq.h> #include <plat/cpu-freq.h> #include <mach/regs-clock.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/sdhci.h> #include <plat/iic-core.h> #include "common.h" #include "onenand-core.h" void __init s3c6400_map_io(void) { /* setup SDHCI */ s3c6400_default_sdhci0(); s3c6400_default_sdhci1(); s3c6400_default_sdhci2(); /* the i2c devices are directly compatible with s3c2440 */ s3c_i2c0_setname("s3c2440-i2c"); s3c_device_nand.name = "s3c6400-nand"; s3c_onenand_setname("s3c6400-onenand"); s3c64xx_onenand1_setname("s3c6400-onenand"); } void __init s3c6400_init_irq(void) { /* VIC0 does not have IRQS 5..7, * VIC1 is fully populated. */ s3c64xx_init_irq(~0 & ~(0xf << 5), ~0); } static struct bus_type s3c6400_subsys = { .name = "s3c6400-core", .dev_name = "s3c6400-core", }; static struct device s3c6400_dev = { .bus = &s3c6400_subsys, }; static int __init s3c6400_core_init(void) { /* Not applicable when using DT. */ if (of_have_populated_dt()) return 0; return subsys_system_register(&s3c6400_subsys, NULL); } core_initcall(s3c6400_core_init); int __init s3c6400_init(void) { printk("S3C6400: Initialising architecture\n"); return device_register(&s3c6400_dev); }
gpl-2.0
saydulk/linux
drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
624
13001
/* Intel Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Contact Information: * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 */ #include <linux/ptp_classify.h> #include <linux/ptp_clock_kernel.h> #include "fm10k.h" #define FM10K_TS_TX_TIMEOUT (HZ * 15) void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface, struct skb_shared_hwtstamps *hwtstamp, u64 systime) { unsigned long flags; read_lock_irqsave(&interface->systime_lock, flags); systime += interface->ptp_adjust; read_unlock_irqrestore(&interface->systime_lock, flags); hwtstamp->hwtstamp = ns_to_ktime(systime); } static struct sk_buff *fm10k_ts_tx_skb(struct fm10k_intfc *interface, __le16 dglort) { struct sk_buff_head *list = &interface->ts_tx_skb_queue; struct sk_buff *skb; skb_queue_walk(list, skb) { if (FM10K_CB(skb)->fi.w.dglort == dglort) return skb; } return NULL; } void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb) { struct sk_buff_head *list = &interface->ts_tx_skb_queue; struct sk_buff *clone; unsigned long flags; /* create clone for us to return on the Tx path */ clone = skb_clone_sk(skb); if (!clone) return; FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT; spin_lock_irqsave(&list->lock, flags); /* attempt to locate any buffers with the same dglort, * if none are present then insert skb in tail of list */ skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort); if (!skb) { skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; __skb_queue_tail(list, clone); } spin_unlock_irqrestore(&list->lock, flags); /* if list is already has one then we just free the clone */ if (skb) dev_kfree_skb(clone); } void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort, u64 systime) { struct skb_shared_hwtstamps shhwtstamps; struct sk_buff_head *list = &interface->ts_tx_skb_queue; struct sk_buff *skb; unsigned long flags; spin_lock_irqsave(&list->lock, flags); /* attempt to locate and pull the sk_buff out of the list */ skb = fm10k_ts_tx_skb(interface, dglort); if (skb) __skb_unlink(skb, list); spin_unlock_irqrestore(&list->lock, flags); /* if not found do nothing */ if (!skb) return; /* timestamp the sk_buff and free out copy */ fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime); skb_tstamp_tx(skb, &shhwtstamps); dev_kfree_skb_any(skb); } void fm10k_ts_tx_subtask(struct fm10k_intfc *interface) { struct sk_buff_head *list = &interface->ts_tx_skb_queue; struct sk_buff *skb, *tmp; unsigned long flags; /* If we're down or resetting, just bail */ if (test_bit(__FM10K_DOWN, &interface->state) || test_bit(__FM10K_RESETTING, &interface->state)) return; spin_lock_irqsave(&list->lock, flags); /* walk though the list and flush any expired timestamp packets */ skb_queue_walk_safe(list, skb, tmp) { if (!time_is_after_jiffies(FM10K_CB(skb)->ts_tx_timeout)) continue; __skb_unlink(skb, list); kfree_skb(skb); interface->tx_hwtstamp_timeouts++; } spin_unlock_irqrestore(&list->lock, flags); } static u64 fm10k_systime_read(struct fm10k_intfc *interface) { struct fm10k_hw *hw = &interface->hw; return hw->mac.ops.read_systime(hw); } void fm10k_ts_reset(struct fm10k_intfc *interface) { s64 ns = ktime_to_ns(ktime_get_real()); unsigned long flags; /* reinitialize the clock */ write_lock_irqsave(&interface->systime_lock, flags); interface->ptp_adjust = fm10k_systime_read(interface) - ns; write_unlock_irqrestore(&interface->systime_lock, flags); } void fm10k_ts_init(struct fm10k_intfc *interface) { /* Initialize lock protecting systime access */ rwlock_init(&interface->systime_lock); /* Initialize skb queue for pending timestamp requests */ skb_queue_head_init(&interface->ts_tx_skb_queue); /* reset the clock to current kernel time */ fm10k_ts_reset(interface); } /** * fm10k_get_ts_config - get current hardware timestamping configuration * @netdev: network interface device structure * @ifreq: ioctl data * * This function returns the current timestamping settings. Rather than * attempt to deconstruct registers to fill in the values, simply keep a copy * of the old settings around, and return a copy when requested. */ int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr) { struct fm10k_intfc *interface = netdev_priv(netdev); struct hwtstamp_config *config = &interface->ts_config; return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? -EFAULT : 0; } /** * fm10k_set_ts_config - control hardware time stamping * @netdev: network interface device structure * @ifreq: ioctl data * * Outgoing time stamping can be enabled and disabled. Play nice and * disable it when requested, although it shouldn't cause any overhead * when no packet needs it. At most one packet in the queue may be * marked for time stamping, otherwise it would be impossible to tell * for sure to which packet the hardware time stamp belongs. * * Incoming time stamping has to be configured via the hardware * filters. Not all combinations are supported, in particular event * type has to be specified. Matching the kind of event packet is * not supported, with the exception of "all V2 events regardless of * level 2 or 4". * * Since hardware always timestamps Path delay packets when timestamping V2 * packets, regardless of the type specified in the register, only use V2 * Event mode. This more accurately tells the user what the hardware is going * to do anyways. */ int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr) { struct fm10k_intfc *interface = netdev_priv(netdev); struct hwtstamp_config ts_config; if (copy_from_user(&ts_config, ifr->ifr_data, sizeof(ts_config))) return -EFAULT; /* reserved for future extensions */ if (ts_config.flags) return -EINVAL; switch (ts_config.tx_type) { case HWTSTAMP_TX_OFF: break; case HWTSTAMP_TX_ON: /* we likely need some check here to see if this is supported */ break; default: return -ERANGE; } switch (ts_config.rx_filter) { case HWTSTAMP_FILTER_NONE: interface->flags &= ~FM10K_FLAG_RX_TS_ENABLED; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_ALL: interface->flags |= FM10K_FLAG_RX_TS_ENABLED; ts_config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } /* save these settings for future reference */ interface->ts_config = ts_config; return copy_to_user(ifr->ifr_data, &ts_config, sizeof(ts_config)) ? -EFAULT : 0; } static int fm10k_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) { struct fm10k_intfc *interface; struct fm10k_hw *hw; int err; interface = container_of(ptp, struct fm10k_intfc, ptp_caps); hw = &interface->hw; err = hw->mac.ops.adjust_systime(hw, ppb); /* the only error we should see is if the value is out of range */ return (err == FM10K_ERR_PARAM) ? -ERANGE : err; } static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct fm10k_intfc *interface; unsigned long flags; interface = container_of(ptp, struct fm10k_intfc, ptp_caps); write_lock_irqsave(&interface->systime_lock, flags); interface->ptp_adjust += delta; write_unlock_irqrestore(&interface->systime_lock, flags); return 0; } static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct fm10k_intfc *interface; unsigned long flags; u64 now; interface = container_of(ptp, struct fm10k_intfc, ptp_caps); read_lock_irqsave(&interface->systime_lock, flags); now = fm10k_systime_read(interface) + interface->ptp_adjust; read_unlock_irqrestore(&interface->systime_lock, flags); *ts = ns_to_timespec64(now); return 0; } static int fm10k_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) { struct fm10k_intfc *interface; unsigned long flags; u64 ns = timespec64_to_ns(ts); interface = container_of(ptp, struct fm10k_intfc, ptp_caps); write_lock_irqsave(&interface->systime_lock, flags); interface->ptp_adjust = fm10k_systime_read(interface) - ns; write_unlock_irqrestore(&interface->systime_lock, flags); return 0; } static int fm10k_ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int __always_unused on) { struct ptp_clock_time *t = &rq->perout.period; struct fm10k_intfc *interface; struct fm10k_hw *hw; u64 period; u32 step; /* we can only support periodic output */ if (rq->type != PTP_CLK_REQ_PEROUT) return -EINVAL; /* verify the requested channel is there */ if (rq->perout.index >= ptp->n_per_out) return -EINVAL; /* we cannot enforce start time as there is no * mechanism for that in the hardware, we can only control * the period. */ /* we cannot support periods greater than 4 seconds due to reg limit */ if (t->sec > 4 || t->sec < 0) return -ERANGE; interface = container_of(ptp, struct fm10k_intfc, ptp_caps); hw = &interface->hw; /* we simply cannot support the operation if we don't have BAR4 */ if (!hw->sw_addr) return -ENOTSUPP; /* convert to unsigned 64b ns, verify we can put it in a 32b register */ period = t->sec * 1000000000LL + t->nsec; /* determine the minimum size for period */ step = 2 * (fm10k_read_reg(hw, FM10K_SYSTIME_CFG) & FM10K_SYSTIME_CFG_STEP_MASK); /* verify the value is in range supported by hardware */ if ((period && (period < step)) || (period > U32_MAX)) return -ERANGE; /* notify hardware of request to being sending pulses */ fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index), (u32)period); return 0; } static struct ptp_pin_desc fm10k_ptp_pd[2] = { { .name = "IEEE1588_PULSE0", .index = 0, .func = PTP_PF_PEROUT, .chan = 0 }, { .name = "IEEE1588_PULSE1", .index = 1, .func = PTP_PF_PEROUT, .chan = 1 } }; static int fm10k_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { /* verify the requested pin is there */ if (pin >= ptp->n_pins || !ptp->pin_config) return -EINVAL; /* enforce locked channels, no changing them */ if (chan != ptp->pin_config[pin].chan) return -EINVAL; /* we want to keep the functions locked as well */ if (func != ptp->pin_config[pin].func) return -EINVAL; return 0; } void fm10k_ptp_register(struct fm10k_intfc *interface) { struct ptp_clock_info *ptp_caps = &interface->ptp_caps; struct device *dev = &interface->pdev->dev; struct ptp_clock *ptp_clock; snprintf(ptp_caps->name, sizeof(ptp_caps->name), "%s", interface->netdev->name); ptp_caps->owner = THIS_MODULE; /* This math is simply the inverse of the math in * fm10k_adjust_systime_pf applied to an adjustment value * of 2^30 - 1 which is the maximum value of the register: * max_ppb == ((2^30 - 1) * 5^9) / 2^31 */ ptp_caps->max_adj = 976562; ptp_caps->adjfreq = fm10k_ptp_adjfreq; ptp_caps->adjtime = fm10k_ptp_adjtime; ptp_caps->gettime64 = fm10k_ptp_gettime; ptp_caps->settime64 = fm10k_ptp_settime; /* provide pins if BAR4 is accessible */ if (interface->sw_addr) { /* enable periodic outputs */ ptp_caps->n_per_out = 2; ptp_caps->enable = fm10k_ptp_enable; /* enable clock pins */ ptp_caps->verify = fm10k_ptp_verify; ptp_caps->n_pins = 2; ptp_caps->pin_config = fm10k_ptp_pd; } ptp_clock = ptp_clock_register(ptp_caps, dev); if (IS_ERR(ptp_clock)) { ptp_clock = NULL; dev_err(dev, "ptp_clock_register failed\n"); } else { dev_info(dev, "registered PHC device %s\n", ptp_caps->name); } interface->ptp_clock = ptp_clock; } void fm10k_ptp_unregister(struct fm10k_intfc *interface) { struct ptp_clock *ptp_clock = interface->ptp_clock; struct device *dev = &interface->pdev->dev; if (!ptp_clock) return; interface->ptp_clock = NULL; ptp_clock_unregister(ptp_clock); dev_info(dev, "removed PHC %s\n", interface->ptp_caps.name); }
gpl-2.0
kmmxracer/samsung_kernel_gt-p6210
drivers/usb/core/devices.c
624
19044
/* * devices.c * (C) Copyright 1999 Randy Dunlap. * (C) Copyright 1999,2000 Thomas Sailer <sailer@ife.ee.ethz.ch>. * (proc file per device) * (C) Copyright 1999 Deti Fliegl (new USB architecture) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * ************************************************************* * * <mountpoint>/devices contains USB topology, device, config, class, * interface, & endpoint data. * * I considered using /proc/bus/usb/devices/device# for each device * as it is attached or detached, but I didn't like this for some * reason -- maybe it's just too deep of a directory structure. * I also don't like looking in multiple places to gather and view * the data. Having only one file for ./devices also prevents race * conditions that could arise if a program was reading device info * for devices that are being removed (unplugged). (That is, the * program may find a directory for devnum_12 then try to open it, * but it was just unplugged, so the directory is now deleted. * But programs would just have to be prepared for situations like * this in any plug-and-play environment.) * * 1999-12-16: Thomas Sailer <sailer@ife.ee.ethz.ch> * Converted the whole proc stuff to real * read methods. Now not the whole device list needs to fit * into one page, only the device list for one bus. * Added a poll method to /proc/bus/usb/devices, to wake * up an eventual usbd * 2000-01-04: Thomas Sailer <sailer@ife.ee.ethz.ch> * Turned into its own filesystem * 2000-07-05: Ashley Montanaro <ashley@compsoc.man.ac.uk> * Converted file reading routine to dump to buffer once * per device, not per bus */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/gfp.h> #include <linux/poll.h> #include <linux/usb.h> #include <linux/smp_lock.h> #include <linux/usbdevice_fs.h> #include <linux/usb/hcd.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include "usb.h" /* Define ALLOW_SERIAL_NUMBER if you want to see the serial number of devices */ #define ALLOW_SERIAL_NUMBER static const char *format_topo = /* T: Bus=dd Lev=dd Prnt=dd Port=dd Cnt=dd Dev#=ddd Spd=ddd MxCh=dd */ "\nT: Bus=%2.2d Lev=%2.2d Prnt=%2.2d Port=%2.2d Cnt=%2.2d Dev#=%3d Spd=%3s MxCh=%2d\n"; static const char *format_string_manufacturer = /* S: Manufacturer=xxxx */ "S: Manufacturer=%.100s\n"; static const char *format_string_product = /* S: Product=xxxx */ "S: Product=%.100s\n"; #ifdef ALLOW_SERIAL_NUMBER static const char *format_string_serialnumber = /* S: SerialNumber=xxxx */ "S: SerialNumber=%.100s\n"; #endif static const char *format_bandwidth = /* B: Alloc=ddd/ddd us (xx%), #Int=ddd, #Iso=ddd */ "B: Alloc=%3d/%3d us (%2d%%), #Int=%3d, #Iso=%3d\n"; static const char *format_device1 = /* D: Ver=xx.xx Cls=xx(sssss) Sub=xx Prot=xx MxPS=dd #Cfgs=dd */ "D: Ver=%2x.%02x Cls=%02x(%-5s) Sub=%02x Prot=%02x MxPS=%2d #Cfgs=%3d\n"; static const char *format_device2 = /* P: Vendor=xxxx ProdID=xxxx Rev=xx.xx */ "P: Vendor=%04x ProdID=%04x Rev=%2x.%02x\n"; static const char *format_config = /* C: #Ifs=dd Cfg#=dd Atr=xx MPwr=dddmA */ "C:%c #Ifs=%2d Cfg#=%2d Atr=%02x MxPwr=%3dmA\n"; static const char *format_iad = /* A: FirstIf#=dd IfCount=dd Cls=xx(sssss) Sub=xx Prot=xx */ "A: FirstIf#=%2d IfCount=%2d Cls=%02x(%-5s) Sub=%02x Prot=%02x\n"; static const char *format_iface = /* I: If#=dd Alt=dd #EPs=dd Cls=xx(sssss) Sub=xx Prot=xx Driver=xxxx*/ "I:%c If#=%2d Alt=%2d #EPs=%2d Cls=%02x(%-5s) Sub=%02x Prot=%02x Driver=%s\n"; static const char *format_endpt = /* E: Ad=xx(s) Atr=xx(ssss) MxPS=dddd Ivl=D?s */ "E: Ad=%02x(%c) Atr=%02x(%-4s) MxPS=%4d Ivl=%d%cs\n"; /* * Need access to the driver and USB bus lists. * extern struct list_head usb_bus_list; * However, these will come from functions that return ptrs to each of them. */ /* * Wait for an connect/disconnect event to happen. We initialize * the event counter with an odd number, and each event will increment * the event counter by two, so it will always _stay_ odd. That means * that it will never be zero, so "event 0" will never match a current * event, and thus 'poll' will always trigger as readable for the first * time it gets called. */ static struct device_connect_event { atomic_t count; wait_queue_head_t wait; } device_event = { .count = ATOMIC_INIT(1), .wait = __WAIT_QUEUE_HEAD_INITIALIZER(device_event.wait) }; struct class_info { int class; char *class_name; }; static const struct class_info clas_info[] = { /* max. 5 chars. per name string */ {USB_CLASS_PER_INTERFACE, ">ifc"}, {USB_CLASS_AUDIO, "audio"}, {USB_CLASS_COMM, "comm."}, {USB_CLASS_HID, "HID"}, {USB_CLASS_PHYSICAL, "PID"}, {USB_CLASS_STILL_IMAGE, "still"}, {USB_CLASS_PRINTER, "print"}, {USB_CLASS_MASS_STORAGE, "stor."}, {USB_CLASS_HUB, "hub"}, {USB_CLASS_CDC_DATA, "data"}, {USB_CLASS_CSCID, "scard"}, {USB_CLASS_CONTENT_SEC, "c-sec"}, {USB_CLASS_VIDEO, "video"}, {USB_CLASS_WIRELESS_CONTROLLER, "wlcon"}, {USB_CLASS_MISC, "misc"}, {USB_CLASS_APP_SPEC, "app."}, {USB_CLASS_VENDOR_SPEC, "vend."}, {-1, "unk."} /* leave as last */ }; /*****************************************************************/ void usbfs_conn_disc_event(void) { atomic_add(2, &device_event.count); wake_up(&device_event.wait); } static const char *class_decode(const int class) { int ix; for (ix = 0; clas_info[ix].class != -1; ix++) if (clas_info[ix].class == class) break; return clas_info[ix].class_name; } static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, const struct usb_endpoint_descriptor *desc) { char dir, unit, *type; unsigned interval, bandwidth = 1; if (start > end) return start; dir = usb_endpoint_dir_in(desc) ? 'I' : 'O'; if (speed == USB_SPEED_HIGH) { switch (le16_to_cpu(desc->wMaxPacketSize) & (0x03 << 11)) { case 1 << 11: bandwidth = 2; break; case 2 << 11: bandwidth = 3; break; } } /* this isn't checking for illegal values */ switch (usb_endpoint_type(desc)) { case USB_ENDPOINT_XFER_CONTROL: type = "Ctrl"; if (speed == USB_SPEED_HIGH) /* uframes per NAK */ interval = desc->bInterval; else interval = 0; dir = 'B'; /* ctrl is bidirectional */ break; case USB_ENDPOINT_XFER_ISOC: type = "Isoc"; interval = 1 << (desc->bInterval - 1); break; case USB_ENDPOINT_XFER_BULK: type = "Bulk"; if (speed == USB_SPEED_HIGH && dir == 'O') /* uframes per NAK */ interval = desc->bInterval; else interval = 0; break; case USB_ENDPOINT_XFER_INT: type = "Int."; if (speed == USB_SPEED_HIGH) interval = 1 << (desc->bInterval - 1); else interval = desc->bInterval; break; default: /* "can't happen" */ return start; } interval *= (speed == USB_SPEED_HIGH) ? 125 : 1000; if (interval % 1000) unit = 'u'; else { unit = 'm'; interval /= 1000; } start += sprintf(start, format_endpt, desc->bEndpointAddress, dir, desc->bmAttributes, type, (le16_to_cpu(desc->wMaxPacketSize) & 0x07ff) * bandwidth, interval, unit); return start; } static char *usb_dump_interface_descriptor(char *start, char *end, const struct usb_interface_cache *intfc, const struct usb_interface *iface, int setno) { const struct usb_interface_descriptor *desc; const char *driver_name = ""; int active = 0; if (start > end) return start; desc = &intfc->altsetting[setno].desc; if (iface) { driver_name = (iface->dev.driver ? iface->dev.driver->name : "(none)"); active = (desc == &iface->cur_altsetting->desc); } start += sprintf(start, format_iface, active ? '*' : ' ', /* mark active altsetting */ desc->bInterfaceNumber, desc->bAlternateSetting, desc->bNumEndpoints, desc->bInterfaceClass, class_decode(desc->bInterfaceClass), desc->bInterfaceSubClass, desc->bInterfaceProtocol, driver_name); return start; } static char *usb_dump_interface(int speed, char *start, char *end, const struct usb_interface_cache *intfc, const struct usb_interface *iface, int setno) { const struct usb_host_interface *desc = &intfc->altsetting[setno]; int i; start = usb_dump_interface_descriptor(start, end, intfc, iface, setno); for (i = 0; i < desc->desc.bNumEndpoints; i++) { if (start > end) return start; start = usb_dump_endpoint_descriptor(speed, start, end, &desc->endpoint[i].desc); } return start; } static char *usb_dump_iad_descriptor(char *start, char *end, const struct usb_interface_assoc_descriptor *iad) { if (start > end) return start; start += sprintf(start, format_iad, iad->bFirstInterface, iad->bInterfaceCount, iad->bFunctionClass, class_decode(iad->bFunctionClass), iad->bFunctionSubClass, iad->bFunctionProtocol); return start; } /* TBD: * 0. TBDs * 1. marking active interface altsettings (code lists all, but should mark * which ones are active, if any) */ static char *usb_dump_config_descriptor(char *start, char *end, const struct usb_config_descriptor *desc, int active) { if (start > end) return start; start += sprintf(start, format_config, /* mark active/actual/current cfg. */ active ? '*' : ' ', desc->bNumInterfaces, desc->bConfigurationValue, desc->bmAttributes, desc->bMaxPower * 2); return start; } static char *usb_dump_config(int speed, char *start, char *end, const struct usb_host_config *config, int active) { int i, j; struct usb_interface_cache *intfc; struct usb_interface *interface; if (start > end) return start; if (!config) /* getting these some in 2.3.7; none in 2.3.6 */ return start + sprintf(start, "(null Cfg. desc.)\n"); start = usb_dump_config_descriptor(start, end, &config->desc, active); for (i = 0; i < USB_MAXIADS; i++) { if (config->intf_assoc[i] == NULL) break; start = usb_dump_iad_descriptor(start, end, config->intf_assoc[i]); } for (i = 0; i < config->desc.bNumInterfaces; i++) { intfc = config->intf_cache[i]; interface = config->interface[i]; for (j = 0; j < intfc->num_altsetting; j++) { if (start > end) return start; start = usb_dump_interface(speed, start, end, intfc, interface, j); } } return start; } /* * Dump the different USB descriptors. */ static char *usb_dump_device_descriptor(char *start, char *end, const struct usb_device_descriptor *desc) { u16 bcdUSB = le16_to_cpu(desc->bcdUSB); u16 bcdDevice = le16_to_cpu(desc->bcdDevice); if (start > end) return start; start += sprintf(start, format_device1, bcdUSB >> 8, bcdUSB & 0xff, desc->bDeviceClass, class_decode(desc->bDeviceClass), desc->bDeviceSubClass, desc->bDeviceProtocol, desc->bMaxPacketSize0, desc->bNumConfigurations); if (start > end) return start; start += sprintf(start, format_device2, le16_to_cpu(desc->idVendor), le16_to_cpu(desc->idProduct), bcdDevice >> 8, bcdDevice & 0xff); return start; } /* * Dump the different strings that this device holds. */ static char *usb_dump_device_strings(char *start, char *end, struct usb_device *dev) { if (start > end) return start; if (dev->manufacturer) start += sprintf(start, format_string_manufacturer, dev->manufacturer); if (start > end) goto out; if (dev->product) start += sprintf(start, format_string_product, dev->product); if (start > end) goto out; #ifdef ALLOW_SERIAL_NUMBER if (dev->serial) start += sprintf(start, format_string_serialnumber, dev->serial); #endif out: return start; } static char *usb_dump_desc(char *start, char *end, struct usb_device *dev) { int i; if (start > end) return start; start = usb_dump_device_descriptor(start, end, &dev->descriptor); if (start > end) return start; start = usb_dump_device_strings(start, end, dev); for (i = 0; i < dev->descriptor.bNumConfigurations; i++) { if (start > end) return start; start = usb_dump_config(dev->speed, start, end, dev->config + i, /* active ? */ (dev->config + i) == dev->actconfig); } return start; } #ifdef PROC_EXTRA /* TBD: may want to add this code later */ static char *usb_dump_hub_descriptor(char *start, char *end, const struct usb_hub_descriptor *desc) { int leng = USB_DT_HUB_NONVAR_SIZE; unsigned char *ptr = (unsigned char *)desc; if (start > end) return start; start += sprintf(start, "Interface:"); while (leng && start <= end) { start += sprintf(start, " %02x", *ptr); ptr++; leng--; } *start++ = '\n'; return start; } static char *usb_dump_string(char *start, char *end, const struct usb_device *dev, char *id, int index) { if (start > end) return start; start += sprintf(start, "Interface:"); if (index <= dev->maxstring && dev->stringindex && dev->stringindex[index]) start += sprintf(start, "%s: %.100s ", id, dev->stringindex[index]); return start; } #endif /* PROC_EXTRA */ /*****************************************************************/ /* This is a recursive function. Parameters: * buffer - the user-space buffer to write data into * nbytes - the maximum number of bytes to write * skip_bytes - the number of bytes to skip before writing anything * file_offset - the offset into the devices file on completion * The caller must own the device lock. */ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes, loff_t *skip_bytes, loff_t *file_offset, struct usb_device *usbdev, struct usb_bus *bus, int level, int index, int count) { int chix; int ret, cnt = 0; int parent_devnum = 0; char *pages_start, *data_end, *speed; unsigned int length; ssize_t total_written = 0; /* don't bother with anything else if we're not writing any data */ if (*nbytes <= 0) return 0; if (level > MAX_TOPO_LEVEL) return 0; /* allocate 2^1 pages = 8K (on i386); * should be more than enough for one device */ pages_start = (char *)__get_free_pages(GFP_NOIO, 1); if (!pages_start) return -ENOMEM; if (usbdev->parent && usbdev->parent->devnum != -1) parent_devnum = usbdev->parent->devnum; /* * So the root hub's parent is 0 and any device that is * plugged into the root hub has a parent of 0. */ switch (usbdev->speed) { case USB_SPEED_LOW: speed = "1.5"; break; case USB_SPEED_UNKNOWN: /* usb 1.1 root hub code */ case USB_SPEED_FULL: speed = "12 "; break; case USB_SPEED_HIGH: speed = "480"; break; default: speed = "?? "; } data_end = pages_start + sprintf(pages_start, format_topo, bus->busnum, level, parent_devnum, index, count, usbdev->devnum, speed, usbdev->maxchild); /* * level = topology-tier level; * parent_devnum = parent device number; * index = parent's connector number; * count = device count at this level */ /* If this is the root hub, display the bandwidth information */ if (level == 0) { int max; /* high speed reserves 80%, full/low reserves 90% */ if (usbdev->speed == USB_SPEED_HIGH) max = 800; else max = FRAME_TIME_MAX_USECS_ALLOC; /* report "average" periodic allocation over a microsecond. * the schedules are actually bursty, HCDs need to deal with * that and just compute/report this average. */ data_end += sprintf(data_end, format_bandwidth, bus->bandwidth_allocated, max, (100 * bus->bandwidth_allocated + max / 2) / max, bus->bandwidth_int_reqs, bus->bandwidth_isoc_reqs); } data_end = usb_dump_desc(data_end, pages_start + (2 * PAGE_SIZE) - 256, usbdev); if (data_end > (pages_start + (2 * PAGE_SIZE) - 256)) data_end += sprintf(data_end, "(truncated)\n"); length = data_end - pages_start; /* if we can start copying some data to the user */ if (length > *skip_bytes) { length -= *skip_bytes; if (length > *nbytes) length = *nbytes; if (copy_to_user(*buffer, pages_start + *skip_bytes, length)) { free_pages((unsigned long)pages_start, 1); return -EFAULT; } *nbytes -= length; *file_offset += length; total_written += length; *buffer += length; *skip_bytes = 0; } else *skip_bytes -= length; free_pages((unsigned long)pages_start, 1); /* Now look at all of this device's children. */ for (chix = 0; chix < usbdev->maxchild; chix++) { struct usb_device *childdev = usbdev->children[chix]; if (childdev) { usb_lock_device(childdev); ret = usb_device_dump(buffer, nbytes, skip_bytes, file_offset, childdev, bus, level + 1, chix, ++cnt); usb_unlock_device(childdev); if (ret == -EFAULT) return total_written; total_written += ret; } } return total_written; } static ssize_t usb_device_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct usb_bus *bus; ssize_t ret, total_written = 0; loff_t skip_bytes = *ppos; if (*ppos < 0) return -EINVAL; if (nbytes <= 0) return 0; if (!access_ok(VERIFY_WRITE, buf, nbytes)) return -EFAULT; mutex_lock(&usb_bus_list_lock); /* print devices for all busses */ list_for_each_entry(bus, &usb_bus_list, bus_list) { /* recurse through all children of the root hub */ if (!bus->root_hub) continue; usb_lock_device(bus->root_hub); ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos, bus->root_hub, bus, 0, 0, 0); usb_unlock_device(bus->root_hub); if (ret < 0) { mutex_unlock(&usb_bus_list_lock); return ret; } total_written += ret; } mutex_unlock(&usb_bus_list_lock); return total_written; } /* Kernel lock for "lastev" protection */ static unsigned int usb_device_poll(struct file *file, struct poll_table_struct *wait) { unsigned int event_count; poll_wait(file, &device_event.wait, wait); event_count = atomic_read(&device_event.count); if (file->f_version != event_count) { file->f_version = event_count; return POLLIN | POLLRDNORM; } return 0; } static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig) { loff_t ret; mutex_lock(&file->f_dentry->d_inode->i_mutex); switch (orig) { case 0: file->f_pos = offset; ret = file->f_pos; break; case 1: file->f_pos += offset; ret = file->f_pos; break; case 2: default: ret = -EINVAL; } mutex_unlock(&file->f_dentry->d_inode->i_mutex); return ret; } const struct file_operations usbfs_devices_fops = { .llseek = usb_device_lseek, .read = usb_device_read, .poll = usb_device_poll, };
gpl-2.0
Hima-Dev/android_kernel_htc_msm8994
arch/metag/mm/hugetlbpage.c
1136
5884
/* * arch/metag/mm/hugetlbpage.c * * METAG HugeTLB page support. * * Cloned from SuperH * * Cloned from sparc64 by Paul Mundt. * * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) */ #include <linux/init.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/sysctl.h> #include <asm/mman.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> /* * If the arch doesn't supply something else, assume that hugepage * size aligned regions are ok without further preparation. */ int prepare_hugepage_range(struct file *file, unsigned long addr, unsigned long len) { struct mm_struct *mm = current->mm; struct hstate *h = hstate_file(file); struct vm_area_struct *vma; if (len & ~huge_page_mask(h)) return -EINVAL; if (addr & ~huge_page_mask(h)) return -EINVAL; if (TASK_SIZE - len < addr) return -EINVAL; vma = find_vma(mm, ALIGN_HUGEPT(addr)); if (vma && !(vma->vm_flags & MAP_HUGETLB)) return -EINVAL; vma = find_vma(mm, addr); if (vma) { if (addr + len > vma->vm_start) return -EINVAL; if (!(vma->vm_flags & MAP_HUGETLB) && (ALIGN_HUGEPT(addr + len) > vma->vm_start)) return -EINVAL; } return 0; } pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; pgd = pgd_offset(mm, addr); pud = pud_offset(pgd, addr); pmd = pmd_offset(pud, addr); pte = pte_alloc_map(mm, NULL, pmd, addr); pgd->pgd &= ~_PAGE_SZ_MASK; pgd->pgd |= _PAGE_SZHUGE; return pte; } pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); pud = pud_offset(pgd, addr); pmd = pmd_offset(pud, addr); pte = pte_offset_kernel(pmd, addr); return pte; } int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) { return 0; } struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { return ERR_PTR(-EINVAL); } int pmd_huge(pmd_t pmd) { return pmd_page_shift(pmd) > PAGE_SHIFT; } int pud_huge(pud_t pud) { return 0; } int pmd_huge_support(void) { return 1; } struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { return NULL; } #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA /* * Look for an unmapped area starting after another hugetlb vma. * There are guaranteed to be no huge pte's spare if all the huge pages are * full size (4MB), so in that case compile out this search. */ #if HPAGE_SHIFT == HUGEPT_SHIFT static inline unsigned long hugetlb_get_unmapped_area_existing(unsigned long len) { return 0; } #else static unsigned long hugetlb_get_unmapped_area_existing(unsigned long len) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr, addr; int after_huge; if (mm->context.part_huge) { start_addr = mm->context.part_huge; after_huge = 1; } else { start_addr = TASK_UNMAPPED_BASE; after_huge = 0; } new_search: addr = start_addr; for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { if ((!vma && !after_huge) || TASK_SIZE - len < addr) { /* * Start a new search - just in case we missed * some holes. */ if (start_addr != TASK_UNMAPPED_BASE) { start_addr = TASK_UNMAPPED_BASE; goto new_search; } return 0; } /* skip ahead if we've aligned right over some vmas */ if (vma && vma->vm_end <= addr) continue; /* space before the next vma? */ if (after_huge && (!vma || ALIGN_HUGEPT(addr + len) <= vma->vm_start)) { unsigned long end = addr + len; if (end & HUGEPT_MASK) mm->context.part_huge = end; else if (addr == mm->context.part_huge) mm->context.part_huge = 0; return addr; } if (vma && (vma->vm_flags & MAP_HUGETLB)) { /* space after a huge vma in 2nd level page table? */ if (vma->vm_end & HUGEPT_MASK) { after_huge = 1; /* no need to align to the next PT block */ addr = vma->vm_end; continue; } } after_huge = 0; addr = ALIGN_HUGEPT(vma->vm_end); } } #endif /* Do a full search to find an area without any nearby normal pages. */ static unsigned long hugetlb_get_unmapped_area_new_pmd(unsigned long len) { struct vm_unmapped_area_info info; info.flags = 0; info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; info.align_mask = PAGE_MASK & HUGEPT_MASK; info.align_offset = 0; return vm_unmapped_area(&info); } unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); if (len & ~huge_page_mask(h)) return -EINVAL; if (len > TASK_SIZE) return -ENOMEM; if (flags & MAP_FIXED) { if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } if (addr) { addr = ALIGN(addr, huge_page_size(h)); if (!prepare_hugepage_range(file, addr, len)) return addr; } /* * Look for an existing hugetlb vma with space after it (this is to to * minimise fragmentation caused by huge pages. */ addr = hugetlb_get_unmapped_area_existing(len); if (addr) return addr; /* * Find an unmapped naturally aligned set of 4MB blocks that we can use * for huge pages. */ return hugetlb_get_unmapped_area_new_pmd(len); } #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ /* necessary for boot time 4MB huge page allocation */ static __init int setup_hugepagesz(char *opt) { unsigned long ps = memparse(opt, &opt); if (ps == (1 << HPAGE_SHIFT)) { hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT); } else { pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20); return 0; } return 1; } __setup("hugepagesz=", setup_hugepagesz);
gpl-2.0
CandyDevices/kernel_mediatek_sprout
drivers/scsi/qla2xxx/qla_isr.c
1648
82905
/* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include "qla_target.h" #include <linux/delay.h> #include <linux/slab.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_bsg_fc.h> #include <scsi/scsi_eh.h> static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, sts_entry_t *); /** * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. * @irq: * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla2100_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct device_reg_2xxx __iomem *reg; int status; unsigned long iter; uint16_t hccr; uint16_t mb[4]; struct rsp_que *rsp; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0x505d, "%s: NULL response queue pointer.\n", __func__); return (IRQ_NONE); } ha = rsp->hw; reg = &ha->iobase->isp; status = 0; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { hccr = RD_REG_WORD(&reg->hccr); if (hccr & HCCR_RISC_PAUSE) { if (pci_channel_offline(ha->pdev)) break; /* * Issue a "HARD" reset in order for the RISC interrupt * bit to be cleared. Schedule a big hammer to get * out of the RISC PAUSED state. */ WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); RD_REG_WORD(&reg->hccr); ha->isp_ops->fw_dump(vha, 1); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0) break; if (RD_REG_WORD(&reg->semaphore) & BIT_0) { WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(&reg->hccr); /* Get mailbox data. */ mb[0] = RD_MAILBOX_REG(ha, reg, 0); if (mb[0] > 0x3fff && mb[0] < 0x8000) { qla2x00_mbx_completion(vha, mb[0]); status |= MBX_INTERRUPT; } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); mb[3] = RD_MAILBOX_REG(ha, reg, 3); qla2x00_async_event(vha, rsp, mb); } else { /*EMPTY*/ ql_dbg(ql_dbg_async, vha, 0x5025, "Unrecognized interrupt type (%d).\n", mb[0]); } /* Release mailbox registers. */ WRT_REG_WORD(&reg->semaphore, 0); RD_REG_WORD(&reg->semaphore); } else { qla2x00_process_response_queue(rsp); WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(&reg->hccr); } } qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return (IRQ_HANDLED); } /** * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. * @irq: * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla2300_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct device_reg_2xxx __iomem *reg; int status; unsigned long iter; uint32_t stat; uint16_t hccr; uint16_t mb[4]; struct rsp_que *rsp; struct qla_hw_data *ha; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0x5058, "%s: NULL response queue pointer.\n", __func__); return (IRQ_NONE); } ha = rsp->hw; reg = &ha->iobase->isp; status = 0; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(&reg->u.isp2300.host_status); if (stat & HSR_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = RD_REG_WORD(&reg->hccr); if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) ql_log(ql_log_warn, vha, 0x5026, "Parity error -- HCCR=%x, Dumping " "firmware.\n", hccr); else ql_log(ql_log_warn, vha, 0x5027, "RISC paused -- HCCR=%x, Dumping " "firmware.\n", hccr); /* * Issue a "HARD" reset in order for the RISC * interrupt bit to be cleared. Schedule a big * hammer to get out of the RISC PAUSED state. */ WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); RD_REG_WORD(&reg->hccr); ha->isp_ops->fw_dump(vha, 1); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((stat & HSR_RISC_INT) == 0) break; switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla2x00_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; /* Release mailbox registers. */ WRT_REG_WORD(&reg->semaphore, 0); break; case 0x12: mb[0] = MSW(stat); mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); mb[3] = RD_MAILBOX_REG(ha, reg, 3); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla2x00_process_response_queue(rsp); break; case 0x15: mb[0] = MBA_CMPLT_1_16BIT; mb[1] = MSW(stat); qla2x00_async_event(vha, rsp, mb); break; case 0x16: mb[0] = MBA_SCSI_COMPLETION; mb[1] = MSW(stat); mb[2] = RD_MAILBOX_REG(ha, reg, 2); qla2x00_async_event(vha, rsp, mb); break; default: ql_dbg(ql_dbg_async, vha, 0x5028, "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD_RELAXED(&reg->hccr); } qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return (IRQ_HANDLED); } /** * qla2x00_mbx_completion() - Process mailbox command completions. * @ha: SCSI driver HA context * @mb0: Mailbox0 register */ static void qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; uint32_t mboxes; uint16_t __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; /* Read all mbox registers? */ mboxes = (1 << ha->mbx_count) - 1; if (!ha->mcp) ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); else mboxes = ha->mcp->in_mb; /* Load return mailbox registers. */ ha->flags.mbox_int = 1; ha->mailbox_out[0] = mb0; mboxes >>= 1; wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); for (cnt = 1; cnt < ha->mbx_count; cnt++) { if (IS_QLA2200(ha) && cnt == 8) wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); else if (mboxes & BIT_0) ha->mailbox_out[cnt] = RD_REG_WORD(wptr); wptr++; mboxes >>= 1; } } static void qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) { static char *event[] = { "Complete", "Request Notification", "Time Extension" }; int rval; struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; uint16_t __iomem *wptr; uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; /* Seed data -- mailbox1 -> mailbox7. */ wptr = (uint16_t __iomem *)&reg24->mailbox1; for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) mb[cnt] = RD_REG_WORD(wptr); ql_dbg(ql_dbg_async, vha, 0x5021, "Inter-Driver Communication %s -- " "%04x %04x %04x %04x %04x %04x %04x.\n", event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]); if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) { vha->hw->flags.idc_compl_status = 1; if (vha->hw->notify_dcbx_comp) complete(&vha->hw->dcbx_comp); } /* Acknowledgement needed? [Notify && non-zero timeout]. */ timeout = (descr >> 8) & 0xf; if (aen != MBA_IDC_NOTIFY || !timeout) return; ql_dbg(ql_dbg_async, vha, 0x5022, "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout); rval = qla2x00_post_idc_ack_work(vha, mb); if (rval != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0x5023, "IDC failed to post ACK.\n"); } #define LS_UNKNOWN 2 const char * qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) { static const char * const link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" }; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return link_speeds[0]; else if (speed == 0x13) return link_speeds[6]; else if (speed < 6) return link_speeds[speed]; else return link_speeds[LS_UNKNOWN]; } static void qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) { struct qla_hw_data *ha = vha->hw; /* * 8200 AEN Interpretation: * mb[0] = AEN code * mb[1] = AEN Reason code * mb[2] = LSW of Peg-Halt Status-1 Register * mb[6] = MSW of Peg-Halt Status-1 Register * mb[3] = LSW of Peg-Halt Status-2 register * mb[7] = MSW of Peg-Halt Status-2 register * mb[4] = IDC Device-State Register value * mb[5] = IDC Driver-Presence Register value */ ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", mb[0], mb[1], mb[2], mb[6]); ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | IDC_HEARTBEAT_FAILURE)) { ha->flags.nic_core_hung = 1; ql_log(ql_log_warn, vha, 0x5060, "83XX: F/W Error Reported: Check if reset required.\n"); if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { uint32_t protocol_engine_id, fw_err_code, err_level; /* * IDC_PEG_HALT_STATUS_CHANGE interpretation: * - PEG-Halt Status-1 Register: * (LSW = mb[2], MSW = mb[6]) * Bits 0-7 = protocol-engine ID * Bits 8-28 = f/w error code * Bits 29-31 = Error-level * Error-level 0x1 = Non-Fatal error * Error-level 0x2 = Recoverable Fatal error * Error-level 0x4 = UnRecoverable Fatal error * - PEG-Halt Status-2 Register: * (LSW = mb[3], MSW = mb[7]) */ protocol_engine_id = (mb[2] & 0xff); fw_err_code = (((mb[2] & 0xff00) >> 8) | ((mb[6] & 0x1fff) << 8)); err_level = ((mb[6] & 0xe000) >> 13); ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " "Register: protocol_engine_id=0x%x " "fw_err_code=0x%x err_level=0x%x.\n", protocol_engine_id, fw_err_code, err_level); ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " "Register: 0x%x%x.\n", mb[7], mb[3]); if (err_level == ERR_LEVEL_NON_FATAL) { ql_log(ql_log_warn, vha, 0x5063, "Not a fatal error, f/w has recovered " "iteself.\n"); } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { ql_log(ql_log_fatal, vha, 0x5064, "Recoverable Fatal error: Chip reset " "required.\n"); qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { ql_log(ql_log_fatal, vha, 0x5065, "Unrecoverable Fatal error: Set FAILED " "state, reboot required.\n"); qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_UNRECOVERABLE); } } if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { uint16_t peg_fw_state, nw_interface_link_up; uint16_t nw_interface_signal_detect, sfp_status; uint16_t htbt_counter, htbt_monitor_enable; uint16_t sfp_additonal_info, sfp_multirate; uint16_t sfp_tx_fault, link_speed, dcbx_status; /* * IDC_NIC_FW_REPORTED_FAILURE interpretation: * - PEG-to-FC Status Register: * (LSW = mb[2], MSW = mb[6]) * Bits 0-7 = Peg-Firmware state * Bit 8 = N/W Interface Link-up * Bit 9 = N/W Interface signal detected * Bits 10-11 = SFP Status * SFP Status 0x0 = SFP+ transceiver not expected * SFP Status 0x1 = SFP+ transceiver not present * SFP Status 0x2 = SFP+ transceiver invalid * SFP Status 0x3 = SFP+ transceiver present and * valid * Bits 12-14 = Heartbeat Counter * Bit 15 = Heartbeat Monitor Enable * Bits 16-17 = SFP Additional Info * SFP info 0x0 = Unregocnized transceiver for * Ethernet * SFP info 0x1 = SFP+ brand validation failed * SFP info 0x2 = SFP+ speed validation failed * SFP info 0x3 = SFP+ access error * Bit 18 = SFP Multirate * Bit 19 = SFP Tx Fault * Bits 20-22 = Link Speed * Bits 23-27 = Reserved * Bits 28-30 = DCBX Status * DCBX Status 0x0 = DCBX Disabled * DCBX Status 0x1 = DCBX Enabled * DCBX Status 0x2 = DCBX Exchange error * Bit 31 = Reserved */ peg_fw_state = (mb[2] & 0x00ff); nw_interface_link_up = ((mb[2] & 0x0100) >> 8); nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); sfp_status = ((mb[2] & 0x0c00) >> 10); htbt_counter = ((mb[2] & 0x7000) >> 12); htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); sfp_additonal_info = (mb[6] & 0x0003); sfp_multirate = ((mb[6] & 0x0004) >> 2); sfp_tx_fault = ((mb[6] & 0x0008) >> 3); link_speed = ((mb[6] & 0x0070) >> 4); dcbx_status = ((mb[6] & 0x7000) >> 12); ql_log(ql_log_warn, vha, 0x5066, "Peg-to-Fc Status Register:\n" "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " "nw_interface_signal_detect=0x%x" "\nsfp_statis=0x%x.\n ", peg_fw_state, nw_interface_link_up, nw_interface_signal_detect, sfp_status); ql_log(ql_log_warn, vha, 0x5067, "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ", htbt_counter, htbt_monitor_enable, sfp_additonal_info, sfp_multirate); ql_log(ql_log_warn, vha, 0x5068, "sfp_tx_fault=0x%x, link_state=0x%x, " "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, dcbx_status); qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); } if (mb[1] & IDC_HEARTBEAT_FAILURE) { ql_log(ql_log_warn, vha, 0x5069, "Heartbeat Failure encountered, chip reset " "required.\n"); qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); } } if (mb[1] & IDC_DEVICE_STATE_CHANGE) { ql_log(ql_log_info, vha, 0x506a, "IDC Device-State changed = 0x%x.\n", mb[4]); if (ha->flags.nic_core_reset_owner) return; qla83xx_schedule_work(vha, MBA_IDC_AEN); } } int qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) { struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp; uint32_t vp_did; unsigned long flags; int ret = 0; if (!ha->num_vhosts) return ret; spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { vp_did = vp->d_id.b24; if (vp_did == rscn_entry) { ret = 1; break; } } spin_unlock_irqrestore(&ha->vport_slock, flags); return ret; } /** * qla2x00_async_event() - Process aynchronous events. * @ha: SCSI driver HA context * @mb: Mailbox registers (0 - 3) */ void qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) { uint16_t handle_cnt; uint16_t cnt, mbx; uint32_t handles[5]; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; uint32_t rscn_entry, host_pid; unsigned long flags; /* Setup to process RIO completion. */ handle_cnt = 0; if (IS_CNA_CAPABLE(ha)) goto skip_rio; switch (mb[0]) { case MBA_SCSI_COMPLETION: handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); handle_cnt = 1; break; case MBA_CMPLT_1_16BIT: handles[0] = mb[1]; handle_cnt = 1; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_2_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handle_cnt = 2; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_3_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handles[2] = mb[3]; handle_cnt = 3; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_4_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handles[2] = mb[3]; handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); handle_cnt = 4; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_5_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handles[2] = mb[3]; handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); handle_cnt = 5; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_2_32BIT: handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); handles[1] = le32_to_cpu( ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | RD_MAILBOX_REG(ha, reg, 6)); handle_cnt = 2; mb[0] = MBA_SCSI_COMPLETION; break; default: break; } skip_rio: switch (mb[0]) { case MBA_SCSI_COMPLETION: /* Fast Post */ if (!vha->flags.online) break; for (cnt = 0; cnt < handle_cnt; cnt++) qla2x00_process_completed_request(vha, rsp->req, handles[cnt]); break; case MBA_RESET: /* Reset */ ql_dbg(ql_dbg_async, vha, 0x5002, "Asynchronous RESET.\n"); set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); break; case MBA_SYSTEM_ERR: /* System Error */ mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ? RD_REG_WORD(&reg24->mailbox7) : 0; ql_log(ql_log_warn, vha, 0x5003, "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); ha->isp_ops->fw_dump(vha, 1); if (IS_FWI2_CAPABLE(ha)) { if (mb[1] == 0 && mb[2] == 0) { ql_log(ql_log_fatal, vha, 0x5004, "Unrecoverable Hardware Error: adapter " "marked OFFLINE!\n"); vha->flags.online = 0; vha->device_flags |= DFLG_DEV_FAILED; } else { /* Check to see if MPI timeout occurred */ if ((mbx & MBX_3) && (ha->flags.port0)) set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } } else if (mb[1] == 0) { ql_log(ql_log_fatal, vha, 0x5005, "Unrecoverable Hardware Error: adapter marked " "OFFLINE!\n"); vha->flags.online = 0; vha->device_flags |= DFLG_DEV_FAILED; } else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ ql_log(ql_log_warn, vha, 0x5006, "ISP Request Transfer Error (%x).\n", mb[1]); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ ql_log(ql_log_warn, vha, 0x5007, "ISP Response Transfer Error.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ ql_dbg(ql_dbg_async, vha, 0x5008, "Asynchronous WAKEUP_THRES.\n"); break; case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ ql_dbg(ql_dbg_async, vha, 0x5009, "LIP occurred (%x).\n", mb[1]); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(vha, 1); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); vha->flags.management_server_logged_in = 0; qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); break; case MBA_LOOP_UP: /* Loop Up Event */ if (IS_QLA2100(ha) || IS_QLA2200(ha)) ha->link_data_rate = PORT_SPEED_1GB; else ha->link_data_rate = mb[1]; ql_dbg(ql_dbg_async, vha, 0x500a, "LOOP UP detected (%s Gbps).\n", qla2x00_get_link_speed_str(ha, ha->link_data_rate)); vha->flags.management_server_logged_in = 0; qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); break; case MBA_LOOP_DOWN: /* Loop Down Event */ mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) ? RD_REG_WORD(&reg24->mailbox4) : 0; mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx; ql_dbg(ql_dbg_async, vha, 0x500b, "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3], mbx); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); vha->device_flags |= DFLG_NO_CABLE; qla2x00_mark_all_devices_lost(vha, 1); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } vha->flags.management_server_logged_in = 0; ha->link_data_rate = PORT_SPEED_UNKNOWN; qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); break; case MBA_LIP_RESET: /* LIP reset occurred */ ql_dbg(ql_dbg_async, vha, 0x500c, "LIP reset occurred (%x).\n", mb[1]); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(vha, 1); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); ha->operating_mode = LOOP; vha->flags.management_server_logged_in = 0; qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); break; /* case MBA_DCBX_COMPLETE: */ case MBA_POINT_TO_POINT: /* Point-to-Point */ if (IS_QLA2100(ha)) break; if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) { ql_dbg(ql_dbg_async, vha, 0x500d, "DCBX Completed -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); if (ha->notify_dcbx_comp) complete(&ha->dcbx_comp); } else ql_dbg(ql_dbg_async, vha, 0x500e, "Asynchronous P2P MODE received.\n"); /* * Until there's a transition from loop down to loop up, treat * this as loop down only. */ if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(vha, 1); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); ha->flags.gpsc_supported = 1; vha->flags.management_server_logged_in = 0; break; case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ if (IS_QLA2100(ha)) break; ql_dbg(ql_dbg_async, vha, 0x500f, "Configuration change detected: value=%x.\n", mb[1]); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(vha, 1); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); break; case MBA_PORT_UPDATE: /* Port database update */ /* * Handle only global and vn-port update events * * Relevant inputs: * mb[1] = N_Port handle of changed port * OR 0xffff for global event * mb[2] = New login state * 7 = Port logged out * mb[3] = LSB is vp_idx, 0xff = all vps * * Skip processing if: * Event is global, vp_idx is NOT all vps, * vp_idx does not match * Event is not global, vp_idx does not match */ if (IS_QLA2XXX_MIDTYPE(ha) && ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) break; /* Global event -- port logout or port unavailable. */ if (mb[1] == 0xffff && mb[2] == 0x7) { ql_dbg(ql_dbg_async, vha, 0x5010, "Port unavailable %04x %04x %04x.\n", mb[1], mb[2], mb[3]); ql_log(ql_log_warn, vha, 0x505e, "Link is offline.\n"); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); vha->device_flags |= DFLG_NO_CABLE; qla2x00_mark_all_devices_lost(vha, 1); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); qla2x00_mark_all_devices_lost(vha, 1); } vha->flags.management_server_logged_in = 0; ha->link_data_rate = PORT_SPEED_UNKNOWN; break; } /* * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET * event etc. earlier indicating loop is down) then process * it. Otherwise ignore it and Wait for RSCN to come in. */ atomic_set(&vha->loop_down_timer, 0); if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) { ql_dbg(ql_dbg_async, vha, 0x5011, "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", mb[1], mb[2], mb[3]); qlt_async_event(mb[0], vha, mb); break; } ql_dbg(ql_dbg_async, vha, 0x5012, "Port database changed %04x %04x %04x.\n", mb[1], mb[2], mb[3]); ql_log(ql_log_warn, vha, 0x505f, "Link is operational (%s Gbps).\n", qla2x00_get_link_speed_str(ha, ha->link_data_rate)); /* * Mark all devices as missing so we will login again. */ atomic_set(&vha->loop_state, LOOP_UP); qla2x00_mark_all_devices_lost(vha, 1); if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha)) set_bit(SCR_PENDING, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); qlt_async_event(mb[0], vha, mb); break; case MBA_RSCN_UPDATE: /* State Change Registration */ /* Check if the Vport has issued a SCR */ if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) break; /* Only handle SCNs for our Vport index. */ if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) break; ql_dbg(ql_dbg_async, vha, 0x5013, "RSCN database changed -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) | vha->d_id.b.al_pa; if (rscn_entry == host_pid) { ql_dbg(ql_dbg_async, vha, 0x5014, "Ignoring RSCN update to local host " "port ID (%06x).\n", host_pid); break; } /* Ignore reserved bits from RSCN-payload. */ rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; /* Skip RSCNs for virtual ports on the same physical port */ if (qla2x00_is_a_vp_did(vha, rscn_entry)) break; atomic_set(&vha->loop_down_timer, 0); vha->flags.management_server_logged_in = 0; set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(RSCN_UPDATE, &vha->dpc_flags); qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); break; /* case MBA_RIO_RESPONSE: */ case MBA_ZIO_RESPONSE: ql_dbg(ql_dbg_async, vha, 0x5015, "[R|Z]IO update completion.\n"); if (IS_FWI2_CAPABLE(ha)) qla24xx_process_response_queue(vha, rsp); else qla2x00_process_response_queue(rsp); break; case MBA_DISCARD_RND_FRAME: ql_dbg(ql_dbg_async, vha, 0x5016, "Discard RND Frame -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); break; case MBA_TRACE_NOTIFICATION: ql_dbg(ql_dbg_async, vha, 0x5017, "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); break; case MBA_ISP84XX_ALERT: ql_dbg(ql_dbg_async, vha, 0x5018, "ISP84XX Alert Notification -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); spin_lock_irqsave(&ha->cs84xx->access_lock, flags); switch (mb[1]) { case A84_PANIC_RECOVERY: ql_log(ql_log_info, vha, 0x5019, "Alert 84XX: panic recovery %04x %04x.\n", mb[2], mb[3]); break; case A84_OP_LOGIN_COMPLETE: ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; ql_log(ql_log_info, vha, 0x501a, "Alert 84XX: firmware version %x.\n", ha->cs84xx->op_fw_version); break; case A84_DIAG_LOGIN_COMPLETE: ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; ql_log(ql_log_info, vha, 0x501b, "Alert 84XX: diagnostic firmware version %x.\n", ha->cs84xx->diag_fw_version); break; case A84_GOLD_LOGIN_COMPLETE: ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; ha->cs84xx->fw_update = 1; ql_log(ql_log_info, vha, 0x501c, "Alert 84XX: gold firmware version %x.\n", ha->cs84xx->gold_fw_version); break; default: ql_log(ql_log_warn, vha, 0x501d, "Alert 84xx: Invalid Alert %04x %04x %04x.\n", mb[1], mb[2], mb[3]); } spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); break; case MBA_DCBX_START: ql_dbg(ql_dbg_async, vha, 0x501e, "DCBX Started -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); break; case MBA_DCBX_PARAM_UPDATE: ql_dbg(ql_dbg_async, vha, 0x501f, "DCBX Parameters Updated -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); break; case MBA_FCF_CONF_ERR: ql_dbg(ql_dbg_async, vha, 0x5020, "FCF Configuration Error -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); break; case MBA_IDC_NOTIFY: if (IS_QLA8031(vha->hw)) { mb[4] = RD_REG_WORD(&reg24->mailbox4); if (((mb[2] & 0x7fff) == MBC_PORT_RESET || (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); /* * Extend loop down timer since port is active. */ if (atomic_read(&vha->loop_state) == LOOP_DOWN) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2xxx_wake_dpc(vha); } } case MBA_IDC_COMPLETE: if (ha->notify_lb_portup_comp) complete(&ha->lb_portup_comp); /* Fallthru */ case MBA_IDC_TIME_EXT: if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) qla81xx_idc_event(vha, mb[0], mb[1]); break; case MBA_IDC_AEN: mb[4] = RD_REG_WORD(&reg24->mailbox4); mb[5] = RD_REG_WORD(&reg24->mailbox5); mb[6] = RD_REG_WORD(&reg24->mailbox6); mb[7] = RD_REG_WORD(&reg24->mailbox7); qla83xx_handle_8200_aen(vha, mb); break; default: ql_dbg(ql_dbg_async, vha, 0x5057, "Unknown AEN:%04x %04x %04x %04x\n", mb[0], mb[1], mb[2], mb[3]); } qlt_async_event(mb[0], vha, mb); if (!vha->vp_idx && ha->num_vhosts) qla2x00_alert_all_vps(rsp, mb); } /** * qla2x00_process_completed_request() - Process a Fast Post response. * @ha: SCSI driver HA context * @index: SRB index */ void qla2x00_process_completed_request(struct scsi_qla_host *vha, struct req_que *req, uint32_t index) { srb_t *sp; struct qla_hw_data *ha = vha->hw; /* Validate handle. */ if (index >= req->num_outstanding_cmds) { ql_log(ql_log_warn, vha, 0x3014, "Invalid SCSI command index (%x).\n", index); if (IS_QLA82XX(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); return; } sp = req->outstanding_cmds[index]; if (sp) { /* Free outstanding command slot. */ req->outstanding_cmds[index] = NULL; /* Save ISP completion status */ sp->done(ha, sp, DID_OK << 16); } else { ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); if (IS_QLA82XX(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } } srb_t * qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, struct req_que *req, void *iocb) { struct qla_hw_data *ha = vha->hw; sts_entry_t *pkt = iocb; srb_t *sp = NULL; uint16_t index; index = LSW(pkt->handle); if (index >= req->num_outstanding_cmds) { ql_log(ql_log_warn, vha, 0x5031, "Invalid command index (%x).\n", index); if (IS_QLA82XX(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); goto done; } sp = req->outstanding_cmds[index]; if (!sp) { ql_log(ql_log_warn, vha, 0x5032, "Invalid completion handle (%x) -- timed-out.\n", index); return sp; } if (sp->handle != index) { ql_log(ql_log_warn, vha, 0x5033, "SRB handle (%x) mismatch %x.\n", sp->handle, index); return NULL; } req->outstanding_cmds[index] = NULL; done: return sp; } static void qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct mbx_entry *mbx) { const char func[] = "MBX-IOCB"; const char *type; fc_port_t *fcport; srb_t *sp; struct srb_iocb *lio; uint16_t *data; uint16_t status; sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); if (!sp) return; lio = &sp->u.iocb_cmd; type = sp->name; fcport = sp->fcport; data = lio->u.logio.data; data[0] = MBS_COMMAND_ERROR; data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? QLA_LOGIO_LOGIN_RETRIED : 0; if (mbx->entry_status) { ql_dbg(ql_dbg_async, vha, 0x5043, "Async-%s error entry - hdl=%x portid=%02x%02x%02x " "entry-status=%x status=%x state-flag=%x " "status-flags=%x.\n", type, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, mbx->entry_status, le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), le16_to_cpu(mbx->status_flags)); ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, (uint8_t *)mbx, sizeof(*mbx)); goto logio_done; } status = le16_to_cpu(mbx->status); if (status == 0x30 && sp->type == SRB_LOGIN_CMD && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) status = 0; if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_async, vha, 0x5045, "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", type, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)); data[0] = MBS_COMMAND_COMPLETE; if (sp->type == SRB_LOGIN_CMD) { fcport->port_type = FCT_TARGET; if (le16_to_cpu(mbx->mb1) & BIT_0) fcport->port_type = FCT_INITIATOR; else if (le16_to_cpu(mbx->mb1) & BIT_1) fcport->flags |= FCF_FCP2_DEVICE; } goto logio_done; } data[0] = le16_to_cpu(mbx->mb0); switch (data[0]) { case MBS_PORT_ID_USED: data[1] = le16_to_cpu(mbx->mb1); break; case MBS_LOOP_ID_USED: break; default: data[0] = MBS_COMMAND_ERROR; break; } ql_log(ql_log_warn, vha, 0x5046, "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), le16_to_cpu(mbx->mb7)); logio_done: sp->done(vha, sp, 0); } static void qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, sts_entry_t *pkt, int iocb_type) { const char func[] = "CT_IOCB"; const char *type; srb_t *sp; struct fc_bsg_job *bsg_job; uint16_t comp_status; int res; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; bsg_job = sp->u.bsg_job; type = "ct pass-through"; comp_status = le16_to_cpu(pkt->comp_status); /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT * fc payload to the caller */ bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; bsg_job->reply_len = sizeof(struct fc_bsg_reply); if (comp_status != CS_COMPLETE) { if (comp_status == CS_DATA_UNDERRUN) { res = DID_OK << 16; bsg_job->reply->reply_payload_rcv_len = le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); ql_log(ql_log_warn, vha, 0x5048, "CT pass-through-%s error " "comp_status-status=0x%x total_byte = 0x%x.\n", type, comp_status, bsg_job->reply->reply_payload_rcv_len); } else { ql_log(ql_log_warn, vha, 0x5049, "CT pass-through-%s error " "comp_status-status=0x%x.\n", type, comp_status); res = DID_ERROR << 16; bsg_job->reply->reply_payload_rcv_len = 0; } ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, (uint8_t *)pkt, sizeof(*pkt)); } else { res = DID_OK << 16; bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; bsg_job->reply_len = 0; } sp->done(vha, sp, res); } static void qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, struct sts_entry_24xx *pkt, int iocb_type) { const char func[] = "ELS_CT_IOCB"; const char *type; srb_t *sp; struct fc_bsg_job *bsg_job; uint16_t comp_status; uint32_t fw_status[3]; uint8_t* fw_sts_ptr; int res; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; bsg_job = sp->u.bsg_job; type = NULL; switch (sp->type) { case SRB_ELS_CMD_RPT: case SRB_ELS_CMD_HST: type = "els"; break; case SRB_CT_CMD: type = "ct pass-through"; break; default: ql_dbg(ql_dbg_user, vha, 0x503e, "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); return; } comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT * fc payload to the caller */ bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); if (comp_status != CS_COMPLETE) { if (comp_status == CS_DATA_UNDERRUN) { res = DID_OK << 16; bsg_job->reply->reply_payload_rcv_len = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); ql_dbg(ql_dbg_user, vha, 0x503f, "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", type, sp->handle, comp_status, fw_status[1], fw_status[2], le16_to_cpu(((struct els_sts_entry_24xx *) pkt)->total_byte_count)); fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); } else { ql_dbg(ql_dbg_user, vha, 0x5040, "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " "error subcode 1=0x%x error subcode 2=0x%x.\n", type, sp->handle, comp_status, le16_to_cpu(((struct els_sts_entry_24xx *) pkt)->error_subcode_1), le16_to_cpu(((struct els_sts_entry_24xx *) pkt)->error_subcode_2)); res = DID_ERROR << 16; bsg_job->reply->reply_payload_rcv_len = 0; fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); } ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, (uint8_t *)pkt, sizeof(*pkt)); } else { res = DID_OK << 16; bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; bsg_job->reply_len = 0; } sp->done(vha, sp, res); } static void qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, struct logio_entry_24xx *logio) { const char func[] = "LOGIO-IOCB"; const char *type; fc_port_t *fcport; srb_t *sp; struct srb_iocb *lio; uint16_t *data; uint32_t iop[2]; sp = qla2x00_get_sp_from_handle(vha, func, req, logio); if (!sp) return; lio = &sp->u.iocb_cmd; type = sp->name; fcport = sp->fcport; data = lio->u.logio.data; data[0] = MBS_COMMAND_ERROR; data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? QLA_LOGIO_LOGIN_RETRIED : 0; if (logio->entry_status) { ql_log(ql_log_warn, fcport->vha, 0x5034, "Async-%s error entry - hdl=%x" "portid=%02x%02x%02x entry-status=%x.\n", type, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, logio->entry_status); ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, (uint8_t *)logio, sizeof(*logio)); goto logio_done; } if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { ql_dbg(ql_dbg_async, fcport->vha, 0x5036, "Async-%s complete - hdl=%x portid=%02x%02x%02x " "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, le32_to_cpu(logio->io_parameter[0])); data[0] = MBS_COMMAND_COMPLETE; if (sp->type != SRB_LOGIN_CMD) goto logio_done; iop[0] = le32_to_cpu(logio->io_parameter[0]); if (iop[0] & BIT_4) { fcport->port_type = FCT_TARGET; if (iop[0] & BIT_8) fcport->flags |= FCF_FCP2_DEVICE; } else if (iop[0] & BIT_5) fcport->port_type = FCT_INITIATOR; if (iop[0] & BIT_7) fcport->flags |= FCF_CONF_COMP_SUPPORTED; if (logio->io_parameter[7] || logio->io_parameter[8]) fcport->supported_classes |= FC_COS_CLASS2; if (logio->io_parameter[9] || logio->io_parameter[10]) fcport->supported_classes |= FC_COS_CLASS3; goto logio_done; } iop[0] = le32_to_cpu(logio->io_parameter[0]); iop[1] = le32_to_cpu(logio->io_parameter[1]); switch (iop[0]) { case LSC_SCODE_PORTID_USED: data[0] = MBS_PORT_ID_USED; data[1] = LSW(iop[1]); break; case LSC_SCODE_NPORT_USED: data[0] = MBS_LOOP_ID_USED; break; default: data[0] = MBS_COMMAND_ERROR; break; } ql_dbg(ql_dbg_async, fcport->vha, 0x5037, "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, le16_to_cpu(logio->comp_status), le32_to_cpu(logio->io_parameter[0]), le32_to_cpu(logio->io_parameter[1])); logio_done: sp->done(vha, sp, 0); } static void qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct tsk_mgmt_entry *tsk) { const char func[] = "TMF-IOCB"; const char *type; fc_port_t *fcport; srb_t *sp; struct srb_iocb *iocb; struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; int error = 1; sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); if (!sp) return; iocb = &sp->u.iocb_cmd; type = sp->name; fcport = sp->fcport; if (sts->entry_status) { ql_log(ql_log_warn, fcport->vha, 0x5038, "Async-%s error - hdl=%x entry-status(%x).\n", type, sp->handle, sts->entry_status); } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { ql_log(ql_log_warn, fcport->vha, 0x5039, "Async-%s error - hdl=%x completion status(%x).\n", type, sp->handle, sts->comp_status); } else if (!(le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID)) { ql_log(ql_log_warn, fcport->vha, 0x503a, "Async-%s error - hdl=%x no response info(%x).\n", type, sp->handle, sts->scsi_status); } else if (le32_to_cpu(sts->rsp_data_len) < 4) { ql_log(ql_log_warn, fcport->vha, 0x503b, "Async-%s error - hdl=%x not enough response(%d).\n", type, sp->handle, sts->rsp_data_len); } else if (sts->data[3]) { ql_log(ql_log_warn, fcport->vha, 0x503c, "Async-%s error - hdl=%x response(%x).\n", type, sp->handle, sts->data[3]); } else { error = 0; } if (error) { iocb->u.tmf.data = error; ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, (uint8_t *)sts, sizeof(*sts)); } sp->done(vha, sp, 0); } /** * qla2x00_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context */ void qla2x00_process_response_queue(struct rsp_que *rsp) { struct scsi_qla_host *vha; struct qla_hw_data *ha = rsp->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; sts_entry_t *pkt; uint16_t handle_cnt; uint16_t cnt; vha = pci_get_drvdata(ha->pdev); if (!vha->flags.online) return; while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { pkt = (sts_entry_t *)rsp->ring_ptr; rsp->ring_index++; if (rsp->ring_index == rsp->length) { rsp->ring_index = 0; rsp->ring_ptr = rsp->ring; } else { rsp->ring_ptr++; } if (pkt->entry_status != 0) { qla2x00_error_entry(vha, rsp, pkt); ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); continue; } switch (pkt->entry_type) { case STATUS_TYPE: qla2x00_status_entry(vha, rsp, pkt); break; case STATUS_TYPE_21: handle_cnt = ((sts21_entry_t *)pkt)->handle_count; for (cnt = 0; cnt < handle_cnt; cnt++) { qla2x00_process_completed_request(vha, rsp->req, ((sts21_entry_t *)pkt)->handle[cnt]); } break; case STATUS_TYPE_22: handle_cnt = ((sts22_entry_t *)pkt)->handle_count; for (cnt = 0; cnt < handle_cnt; cnt++) { qla2x00_process_completed_request(vha, rsp->req, ((sts22_entry_t *)pkt)->handle[cnt]); } break; case STATUS_CONT_TYPE: qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); break; case MBX_IOCB_TYPE: qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt); break; case CT_IOCB_TYPE: qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); break; default: /* Type Not Supported. */ ql_log(ql_log_warn, vha, 0x504a, "Received unknown response pkt type %x " "entry status=%x.\n", pkt->entry_type, pkt->entry_status); break; } ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); } /* Adjust ring index */ WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); } static inline void qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, uint32_t sense_len, struct rsp_que *rsp, int res) { struct scsi_qla_host *vha = sp->fcport->vha; struct scsi_cmnd *cp = GET_CMD_SP(sp); uint32_t track_sense_len; if (sense_len >= SCSI_SENSE_BUFFERSIZE) sense_len = SCSI_SENSE_BUFFERSIZE; SET_CMD_SENSE_LEN(sp, sense_len); SET_CMD_SENSE_PTR(sp, cp->sense_buffer); track_sense_len = sense_len; if (sense_len > par_sense_len) sense_len = par_sense_len; memcpy(cp->sense_buffer, sense_data, sense_len); SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); track_sense_len -= sense_len; SET_CMD_SENSE_LEN(sp, track_sense_len); if (track_sense_len != 0) { rsp->status_srb = sp; cp->result = res; } if (sense_len) { ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", sp->fcport->vha->host_no, cp->device->id, cp->device->lun, cp); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, cp->sense_buffer, sense_len); } } struct scsi_dif_tuple { __be16 guard; /* Checksum */ __be16 app_tag; /* APPL identifier */ __be32 ref_tag; /* Target LBA or indirect LBA */ }; /* * Checks the guard or meta-data for the type of error * detected by the HBA. In case of errors, we set the * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST * to indicate to the kernel that the HBA detected error. */ static inline int qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) { struct scsi_qla_host *vha = sp->fcport->vha; struct scsi_cmnd *cmd = GET_CMD_SP(sp); uint8_t *ap = &sts24->data[12]; uint8_t *ep = &sts24->data[20]; uint32_t e_ref_tag, a_ref_tag; uint16_t e_app_tag, a_app_tag; uint16_t e_guard, a_guard; /* * swab32 of the "data" field in the beginning of qla2x00_status_entry() * would make guard field appear at offset 2 */ a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); ql_dbg(ql_dbg_io, vha, 0x3023, "iocb(s) %p Returned STATUS.\n", sts24); ql_dbg(ql_dbg_io, vha, 0x3024, "DIF ERROR in cmd 0x%x lba 0x%llx act ref" " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); /* * Ignore sector if: * For type 3: ref & app tag is all 'f's * For type 0,1,2: app tag is all 'f's */ if ((a_app_tag == 0xffff) && ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || (a_ref_tag == 0xffffffff))) { uint32_t blocks_done, resid; sector_t lba_s = scsi_get_lba(cmd); /* 2TB boundary case covered automatically with this */ blocks_done = e_ref_tag - (uint32_t)lba_s + 1; resid = scsi_bufflen(cmd) - (blocks_done * cmd->device->sector_size); scsi_set_resid(cmd, resid); cmd->result = DID_OK << 16; /* Update protection tag */ if (scsi_prot_sg_count(cmd)) { uint32_t i, j = 0, k = 0, num_ent; struct scatterlist *sg; struct sd_dif_tuple *spt; /* Patch the corresponding protection tags */ scsi_for_each_prot_sg(cmd, sg, scsi_prot_sg_count(cmd), i) { num_ent = sg_dma_len(sg) / 8; if (k + num_ent < blocks_done) { k += num_ent; continue; } j = blocks_done - k - 1; k = blocks_done; break; } if (k != blocks_done) { ql_log(ql_log_warn, vha, 0x302f, "unexpected tag values tag:lba=%x:%llx)\n", e_ref_tag, (unsigned long long)lba_s); return 1; } spt = page_address(sg_page(sg)) + sg->offset; spt += j; spt->app_tag = 0xffff; if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) spt->ref_tag = 0xffffffff; } return 0; } /* check guard */ if (e_guard != a_guard) { scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 0x10, 0x1); set_driver_byte(cmd, DRIVER_SENSE); set_host_byte(cmd, DID_ABORT); cmd->result |= SAM_STAT_CHECK_CONDITION << 1; return 1; } /* check ref tag */ if (e_ref_tag != a_ref_tag) { scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 0x10, 0x3); set_driver_byte(cmd, DRIVER_SENSE); set_host_byte(cmd, DID_ABORT); cmd->result |= SAM_STAT_CHECK_CONDITION << 1; return 1; } /* check appl tag */ if (e_app_tag != a_app_tag) { scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 0x10, 0x2); set_driver_byte(cmd, DRIVER_SENSE); set_host_byte(cmd, DID_ABORT); cmd->result |= SAM_STAT_CHECK_CONDITION << 1; return 1; } return 1; } static void qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, struct req_que *req, uint32_t index) { struct qla_hw_data *ha = vha->hw; srb_t *sp; uint16_t comp_status; uint16_t scsi_status; uint16_t thread_id; uint32_t rval = EXT_STATUS_OK; struct fc_bsg_job *bsg_job = NULL; sts_entry_t *sts; struct sts_entry_24xx *sts24; sts = (sts_entry_t *) pkt; sts24 = (struct sts_entry_24xx *) pkt; /* Validate handle. */ if (index >= req->num_outstanding_cmds) { ql_log(ql_log_warn, vha, 0x70af, "Invalid SCSI completion handle 0x%x.\n", index); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); return; } sp = req->outstanding_cmds[index]; if (sp) { /* Free outstanding command slot. */ req->outstanding_cmds[index] = NULL; bsg_job = sp->u.bsg_job; } else { ql_log(ql_log_warn, vha, 0x70b0, "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", req->id, index); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); return; } if (IS_FWI2_CAPABLE(ha)) { comp_status = le16_to_cpu(sts24->comp_status); scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; } else { comp_status = le16_to_cpu(sts->comp_status); scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; } thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; switch (comp_status) { case CS_COMPLETE: if (scsi_status == 0) { bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; rval = EXT_STATUS_OK; } goto done; case CS_DATA_OVERRUN: ql_dbg(ql_dbg_user, vha, 0x70b1, "Command completed with date overrun thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_OVERRUN; break; case CS_DATA_UNDERRUN: ql_dbg(ql_dbg_user, vha, 0x70b2, "Command completed with date underrun thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_UNDERRUN; break; case CS_BIDIR_RD_OVERRUN: ql_dbg(ql_dbg_user, vha, 0x70b3, "Command completed with read data overrun thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_OVERRUN; break; case CS_BIDIR_RD_WR_OVERRUN: ql_dbg(ql_dbg_user, vha, 0x70b4, "Command completed with read and write data overrun " "thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_OVERRUN; break; case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: ql_dbg(ql_dbg_user, vha, 0x70b5, "Command completed with read data over and write data " "underrun thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_OVERRUN; break; case CS_BIDIR_RD_UNDERRUN: ql_dbg(ql_dbg_user, vha, 0x70b6, "Command completed with read data data underrun " "thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_UNDERRUN; break; case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: ql_dbg(ql_dbg_user, vha, 0x70b7, "Command completed with read data under and write data " "overrun thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_UNDERRUN; break; case CS_BIDIR_RD_WR_UNDERRUN: ql_dbg(ql_dbg_user, vha, 0x70b8, "Command completed with read and write data underrun " "thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_UNDERRUN; break; case CS_BIDIR_DMA: ql_dbg(ql_dbg_user, vha, 0x70b9, "Command completed with data DMA error thread_id=%d\n", thread_id); rval = EXT_STATUS_DMA_ERR; break; case CS_TIMEOUT: ql_dbg(ql_dbg_user, vha, 0x70ba, "Command completed with timeout thread_id=%d\n", thread_id); rval = EXT_STATUS_TIMEOUT; break; default: ql_dbg(ql_dbg_user, vha, 0x70bb, "Command completed with completion status=0x%x " "thread_id=%d\n", comp_status, thread_id); rval = EXT_STATUS_ERR; break; } bsg_job->reply->reply_payload_rcv_len = 0; done: /* Return the vendor specific reply to API */ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; bsg_job->reply_len = sizeof(struct fc_bsg_reply); /* Always return DID_OK, bsg will send the vendor specific response * in this case only */ sp->done(vha, sp, (DID_OK << 6)); } /** * qla2x00_status_entry() - Process a Status IOCB entry. * @ha: SCSI driver HA context * @pkt: Entry pointer */ static void qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) { srb_t *sp; fc_port_t *fcport; struct scsi_cmnd *cp; sts_entry_t *sts; struct sts_entry_24xx *sts24; uint16_t comp_status; uint16_t scsi_status; uint16_t ox_id; uint8_t lscsi_status; int32_t resid; uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, fw_resid_len; uint8_t *rsp_info, *sense_data; struct qla_hw_data *ha = vha->hw; uint32_t handle; uint16_t que; struct req_que *req; int logit = 1; int res = 0; uint16_t state_flags = 0; sts = (sts_entry_t *) pkt; sts24 = (struct sts_entry_24xx *) pkt; if (IS_FWI2_CAPABLE(ha)) { comp_status = le16_to_cpu(sts24->comp_status); scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; state_flags = le16_to_cpu(sts24->state_flags); } else { comp_status = le16_to_cpu(sts->comp_status); scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; } handle = (uint32_t) LSW(sts->handle); que = MSW(sts->handle); req = ha->req_q_map[que]; /* Validate handle. */ if (handle < req->num_outstanding_cmds) sp = req->outstanding_cmds[handle]; else sp = NULL; if (sp == NULL) { ql_dbg(ql_dbg_io, vha, 0x3017, "Invalid status handle (0x%x).\n", sts->handle); if (IS_QLA82XX(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); return; } if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); return; } /* Fast path completion. */ if (comp_status == CS_COMPLETE && scsi_status == 0) { qla2x00_do_host_ramp_up(vha); qla2x00_process_completed_request(vha, req, handle); return; } req->outstanding_cmds[handle] = NULL; cp = GET_CMD_SP(sp); if (cp == NULL) { ql_dbg(ql_dbg_io, vha, 0x3018, "Command already returned (0x%x/%p).\n", sts->handle, sp); return; } lscsi_status = scsi_status & STATUS_MASK; fcport = sp->fcport; ox_id = 0; sense_len = par_sense_len = rsp_info_len = resid_len = fw_resid_len = 0; if (IS_FWI2_CAPABLE(ha)) { if (scsi_status & SS_SENSE_LEN_VALID) sense_len = le32_to_cpu(sts24->sense_len); if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) rsp_info_len = le32_to_cpu(sts24->rsp_data_len); if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) resid_len = le32_to_cpu(sts24->rsp_residual_count); if (comp_status == CS_DATA_UNDERRUN) fw_resid_len = le32_to_cpu(sts24->residual_len); rsp_info = sts24->data; sense_data = sts24->data; host_to_fcp_swap(sts24->data, sizeof(sts24->data)); ox_id = le16_to_cpu(sts24->ox_id); par_sense_len = sizeof(sts24->data); } else { if (scsi_status & SS_SENSE_LEN_VALID) sense_len = le16_to_cpu(sts->req_sense_length); if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) rsp_info_len = le16_to_cpu(sts->rsp_info_len); resid_len = le32_to_cpu(sts->residual_length); rsp_info = sts->rsp_info; sense_data = sts->req_sense_data; par_sense_len = sizeof(sts->req_sense_data); } /* Check for any FCP transport errors. */ if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { /* Sense data lies beyond any FCP RESPONSE data. */ if (IS_FWI2_CAPABLE(ha)) { sense_data += rsp_info_len; par_sense_len -= rsp_info_len; } if (rsp_info_len > 3 && rsp_info[3]) { ql_dbg(ql_dbg_io, fcport->vha, 0x3019, "FCP I/O protocol failure (0x%x/0x%x).\n", rsp_info_len, rsp_info[3]); res = DID_BUS_BUSY << 16; goto out; } } /* Check for overrun. */ if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && scsi_status & SS_RESIDUAL_OVER) comp_status = CS_DATA_OVERRUN; /* * Based on Host and scsi status generate status code for Linux */ switch (comp_status) { case CS_COMPLETE: case CS_QUEUE_FULL: if (scsi_status == 0) { res = DID_OK << 16; break; } if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { resid = resid_len; scsi_set_resid(cp, resid); if (!lscsi_status && ((unsigned)(scsi_bufflen(cp) - resid) < cp->underflow)) { ql_dbg(ql_dbg_io, fcport->vha, 0x301a, "Mid-layer underflow " "detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16; break; } } res = DID_OK << 16 | lscsi_status; if (lscsi_status == SAM_STAT_TASK_SET_FULL) { ql_dbg(ql_dbg_io, fcport->vha, 0x301b, "QUEUE FULL detected.\n"); break; } logit = 0; if (lscsi_status != SS_CHECK_CONDITION) break; memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (!(scsi_status & SS_SENSE_LEN_VALID)) break; qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, rsp, res); break; case CS_DATA_UNDERRUN: /* Use F/W calculated residual length. */ resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; scsi_set_resid(cp, resid); if (scsi_status & SS_RESIDUAL_UNDER) { if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { ql_dbg(ql_dbg_io, fcport->vha, 0x301d, "Dropped frame(s) detected " "(0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16 | lscsi_status; goto check_scsi_status; } if (!lscsi_status && ((unsigned)(scsi_bufflen(cp) - resid) < cp->underflow)) { ql_dbg(ql_dbg_io, fcport->vha, 0x301e, "Mid-layer underflow " "detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16; break; } } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && lscsi_status != SAM_STAT_BUSY) { /* * scsi status of task set and busy are considered to be * task not completed. */ ql_dbg(ql_dbg_io, fcport->vha, 0x301f, "Dropped frame(s) detected (0x%x " "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16 | lscsi_status; goto check_scsi_status; } else { ql_dbg(ql_dbg_io, fcport->vha, 0x3030, "scsi_status: 0x%x, lscsi_status: 0x%x\n", scsi_status, lscsi_status); } res = DID_OK << 16 | lscsi_status; logit = 0; check_scsi_status: /* * Check to see if SCSI Status is non zero. If so report SCSI * Status. */ if (lscsi_status != 0) { if (lscsi_status == SAM_STAT_TASK_SET_FULL) { ql_dbg(ql_dbg_io, fcport->vha, 0x3020, "QUEUE FULL detected.\n"); logit = 1; break; } if (lscsi_status != SS_CHECK_CONDITION) break; memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (!(scsi_status & SS_SENSE_LEN_VALID)) break; qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, rsp, res); } break; case CS_PORT_LOGGED_OUT: case CS_PORT_CONFIG_CHG: case CS_PORT_BUSY: case CS_INCOMPLETE: case CS_PORT_UNAVAILABLE: case CS_TIMEOUT: case CS_RESET: /* * We are going to have the fc class block the rport * while we try to recover so instruct the mid layer * to requeue until the class decides how to handle this. */ res = DID_TRANSPORT_DISRUPTED << 16; if (comp_status == CS_TIMEOUT) { if (IS_FWI2_CAPABLE(ha)) break; else if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT) == 0) break; } ql_dbg(ql_dbg_io, fcport->vha, 0x3021, "Port down status: port-state=0x%x.\n", atomic_read(&fcport->state)); if (atomic_read(&fcport->state) == FCS_ONLINE) qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); break; case CS_ABORTED: res = DID_RESET << 16; break; case CS_DIF_ERROR: logit = qla2x00_handle_dif_error(sp, sts24); res = cp->result; break; case CS_TRANSPORT: res = DID_ERROR << 16; if (!IS_PI_SPLIT_DET_CAPABLE(ha)) break; if (state_flags & BIT_4) scmd_printk(KERN_WARNING, cp, "Unsupported device '%s' found.\n", cp->device->vendor); break; default: res = DID_ERROR << 16; break; } out: if (logit) ql_dbg(ql_dbg_io, fcport->vha, 0x3022, "FCP command status: 0x%x-0x%x (0x%x) " "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x " "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", comp_status, scsi_status, res, vha->host_no, cp->device->id, cp->device->lun, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7], cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len, resid_len, fw_resid_len); if (!res) qla2x00_do_host_ramp_up(vha); if (rsp->status_srb == NULL) sp->done(ha, sp, res); } /** * qla2x00_status_cont_entry() - Process a Status Continuations entry. * @ha: SCSI driver HA context * @pkt: Entry pointer * * Extended sense data. */ static void qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) { uint8_t sense_sz = 0; struct qla_hw_data *ha = rsp->hw; struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); srb_t *sp = rsp->status_srb; struct scsi_cmnd *cp; uint32_t sense_len; uint8_t *sense_ptr; if (!sp || !GET_CMD_SENSE_LEN(sp)) return; sense_len = GET_CMD_SENSE_LEN(sp); sense_ptr = GET_CMD_SENSE_PTR(sp); cp = GET_CMD_SP(sp); if (cp == NULL) { ql_log(ql_log_warn, vha, 0x3025, "cmd is NULL: already returned to OS (sp=%p).\n", sp); rsp->status_srb = NULL; return; } if (sense_len > sizeof(pkt->data)) sense_sz = sizeof(pkt->data); else sense_sz = sense_len; /* Move sense data. */ if (IS_FWI2_CAPABLE(ha)) host_to_fcp_swap(pkt->data, sizeof(pkt->data)); memcpy(sense_ptr, pkt->data, sense_sz); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, sense_ptr, sense_sz); sense_len -= sense_sz; sense_ptr += sense_sz; SET_CMD_SENSE_PTR(sp, sense_ptr); SET_CMD_SENSE_LEN(sp, sense_len); /* Place command on done queue. */ if (sense_len == 0) { rsp->status_srb = NULL; sp->done(ha, sp, cp->result); } } /** * qla2x00_error_entry() - Process an error entry. * @ha: SCSI driver HA context * @pkt: Entry pointer */ static void qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) { srb_t *sp; struct qla_hw_data *ha = vha->hw; const char func[] = "ERROR-IOCB"; uint16_t que = MSW(pkt->handle); struct req_que *req = NULL; int res = DID_ERROR << 16; ql_dbg(ql_dbg_async, vha, 0x502a, "type of error status in response: 0x%x\n", pkt->entry_status); if (que >= ha->max_req_queues || !ha->req_q_map[que]) goto fatal; req = ha->req_q_map[que]; if (pkt->entry_status & RF_BUSY) res = DID_BUS_BUSY << 16; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { sp->done(ha, sp, res); return; } fatal: ql_log(ql_log_warn, vha, 0x5030, "Error entry - invalid handle/queue.\n"); if (IS_QLA82XX(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } /** * qla24xx_mbx_completion() - Process mailbox command completions. * @ha: SCSI driver HA context * @mb0: Mailbox0 register */ static void qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; uint32_t mboxes; uint16_t __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; /* Read all mbox registers? */ mboxes = (1 << ha->mbx_count) - 1; if (!ha->mcp) ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); else mboxes = ha->mcp->in_mb; /* Load return mailbox registers. */ ha->flags.mbox_int = 1; ha->mailbox_out[0] = mb0; mboxes >>= 1; wptr = (uint16_t __iomem *)&reg->mailbox1; for (cnt = 1; cnt < ha->mbx_count; cnt++) { if (mboxes & BIT_0) ha->mailbox_out[cnt] = RD_REG_WORD(wptr); mboxes >>= 1; wptr++; } } /** * qla24xx_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context */ void qla24xx_process_response_queue(struct scsi_qla_host *vha, struct rsp_que *rsp) { struct sts_entry_24xx *pkt; struct qla_hw_data *ha = vha->hw; if (!vha->flags.online) return; while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { pkt = (struct sts_entry_24xx *)rsp->ring_ptr; rsp->ring_index++; if (rsp->ring_index == rsp->length) { rsp->ring_index = 0; rsp->ring_ptr = rsp->ring; } else { rsp->ring_ptr++; } if (pkt->entry_status != 0) { qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); (void)qlt_24xx_process_response_error(vha, pkt); ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); continue; } switch (pkt->entry_type) { case STATUS_TYPE: qla2x00_status_entry(vha, rsp, pkt); break; case STATUS_CONT_TYPE: qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); break; case VP_RPT_ID_IOCB_TYPE: qla24xx_report_id_acquisition(vha, (struct vp_rpt_id_entry_24xx *)pkt); break; case LOGINOUT_PORT_IOCB_TYPE: qla24xx_logio_entry(vha, rsp->req, (struct logio_entry_24xx *)pkt); break; case TSK_MGMT_IOCB_TYPE: qla24xx_tm_iocb_entry(vha, rsp->req, (struct tsk_mgmt_entry *)pkt); break; case CT_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); break; case ELS_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); break; case ABTS_RECV_24XX: /* ensure that the ATIO queue is empty */ qlt_24xx_process_atio_queue(vha); case ABTS_RESP_24XX: case CTIO_TYPE7: case NOTIFY_ACK_TYPE: qlt_response_pkt_all_vps(vha, (response_t *)pkt); break; case MARKER_TYPE: /* Do nothing in this case, this check is to prevent it * from falling into default case */ break; default: /* Type Not Supported. */ ql_dbg(ql_dbg_async, vha, 0x5042, "Received unknown response pkt type %x " "entry status=%x.\n", pkt->entry_type, pkt->entry_status); break; } ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); } /* Adjust ring index */ if (IS_QLA82XX(ha)) { struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index); } else WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); } static void qla2xxx_check_risc_status(scsi_qla_host_t *vha) { int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) return; rval = QLA_SUCCESS; WRT_REG_DWORD(&reg->iobase_addr, 0x7C00); RD_REG_DWORD(&reg->iobase_addr); WRT_REG_DWORD(&reg->iobase_window, 0x0001); for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) { WRT_REG_DWORD(&reg->iobase_window, 0x0001); udelay(10); } else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) goto next_test; WRT_REG_DWORD(&reg->iobase_window, 0x0003); for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) { WRT_REG_DWORD(&reg->iobase_window, 0x0003); udelay(10); } else rval = QLA_FUNCTION_TIMEOUT; } if (rval != QLA_SUCCESS) goto done; next_test: if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3) ql_log(ql_log_info, vha, 0x504c, "Additional code -- 0x55AA.\n"); done: WRT_REG_DWORD(&reg->iobase_window, 0x0000); RD_REG_DWORD(&reg->iobase_window); } /** * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. * @irq: * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla24xx_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct device_reg_24xx __iomem *reg; int status; unsigned long iter; uint32_t stat; uint32_t hccr; uint16_t mb[8]; struct rsp_que *rsp; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0x5059, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp24; status = 0; if (unlikely(pci_channel_offline(ha->pdev))) return IRQ_HANDLED; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(&reg->host_status); if (stat & HSRX_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = RD_REG_DWORD(&reg->hccr); ql_log(ql_log_warn, vha, 0x504b, "RISC paused -- HCCR=%x, Dumping firmware.\n", hccr); qla2xxx_check_risc_status(vha); ha->isp_ops->fw_dump(vha, 1); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((stat & HSRX_RISC_INT) == 0) break; switch (stat & 0xff) { case INTR_ROM_MB_SUCCESS: case INTR_ROM_MB_FAILED: case INTR_MB_SUCCESS: case INTR_MB_FAILED: qla24xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case INTR_ASYNC_EVENT: mb[0] = MSW(stat); mb[1] = RD_REG_WORD(&reg->mailbox1); mb[2] = RD_REG_WORD(&reg->mailbox2); mb[3] = RD_REG_WORD(&reg->mailbox3); qla2x00_async_event(vha, rsp, mb); break; case INTR_RSP_QUE_UPDATE: case INTR_RSP_QUE_UPDATE_83XX: qla24xx_process_response_queue(vha, rsp); break; case INTR_ATIO_QUE_UPDATE: qlt_24xx_process_atio_queue(vha); break; case INTR_ATIO_RSP_QUE_UPDATE: qlt_24xx_process_atio_queue(vha); qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_async, vha, 0x504f, "Unrecognized interrupt type (%d).\n", stat * 0xff); break; } WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(&reg->hccr); if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) ndelay(3500); } qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } static irqreturn_t qla24xx_msix_rsp_q(int irq, void *dev_id) { struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; struct scsi_qla_host *vha; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0x505a, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp24; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); if (!ha->flags.disable_msix_handshake) { WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(&reg->hccr); } spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } static irqreturn_t qla25xx_msix_rsp_q(int irq, void *dev_id) { struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0x505b, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; /* Clear the interrupt, if enabled, for this response queue */ if (!ha->flags.disable_msix_handshake) { reg = &ha->iobase->isp24; spin_lock_irqsave(&ha->hardware_lock, flags); WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(&reg->hccr); spin_unlock_irqrestore(&ha->hardware_lock, flags); } queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); return IRQ_HANDLED; } static irqreturn_t qla24xx_msix_default(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; int status; uint32_t stat; uint32_t hccr; uint16_t mb[8]; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0x505c, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp24; status = 0; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); do { stat = RD_REG_DWORD(&reg->host_status); if (stat & HSRX_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = RD_REG_DWORD(&reg->hccr); ql_log(ql_log_info, vha, 0x5050, "RISC paused -- HCCR=%x, Dumping firmware.\n", hccr); qla2xxx_check_risc_status(vha); ha->isp_ops->fw_dump(vha, 1); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((stat & HSRX_RISC_INT) == 0) break; switch (stat & 0xff) { case INTR_ROM_MB_SUCCESS: case INTR_ROM_MB_FAILED: case INTR_MB_SUCCESS: case INTR_MB_FAILED: qla24xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case INTR_ASYNC_EVENT: mb[0] = MSW(stat); mb[1] = RD_REG_WORD(&reg->mailbox1); mb[2] = RD_REG_WORD(&reg->mailbox2); mb[3] = RD_REG_WORD(&reg->mailbox3); qla2x00_async_event(vha, rsp, mb); break; case INTR_RSP_QUE_UPDATE: case INTR_RSP_QUE_UPDATE_83XX: qla24xx_process_response_queue(vha, rsp); break; case INTR_ATIO_QUE_UPDATE: qlt_24xx_process_atio_queue(vha); break; case INTR_ATIO_RSP_QUE_UPDATE: qlt_24xx_process_atio_queue(vha); qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_async, vha, 0x5051, "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); } while (0); qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } /* Interrupt handling helpers. */ struct qla_init_msix_entry { const char *name; irq_handler_t handler; }; static struct qla_init_msix_entry msix_entries[3] = { { "qla2xxx (default)", qla24xx_msix_default }, { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, }; static struct qla_init_msix_entry qla82xx_msix_entries[2] = { { "qla2xxx (default)", qla82xx_msix_default }, { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, }; static struct qla_init_msix_entry qla83xx_msix_entries[3] = { { "qla2xxx (default)", qla24xx_msix_default }, { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, }; static void qla24xx_disable_msix(struct qla_hw_data *ha) { int i; struct qla_msix_entry *qentry; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); for (i = 0; i < ha->msix_count; i++) { qentry = &ha->msix_entries[i]; if (qentry->have_irq) free_irq(qentry->vector, qentry->rsp); } pci_disable_msix(ha->pdev); kfree(ha->msix_entries); ha->msix_entries = NULL; ha->flags.msix_enabled = 0; ql_dbg(ql_dbg_init, vha, 0x0042, "Disabled the MSI.\n"); } static int qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) { #define MIN_MSIX_COUNT 2 int i, ret; struct msix_entry *entries; struct qla_msix_entry *qentry; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, GFP_KERNEL); if (!entries) { ql_log(ql_log_warn, vha, 0x00bc, "Failed to allocate memory for msix_entry.\n"); return -ENOMEM; } for (i = 0; i < ha->msix_count; i++) entries[i].entry = i; ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); if (ret) { if (ret < MIN_MSIX_COUNT) goto msix_failed; ql_log(ql_log_warn, vha, 0x00c6, "MSI-X: Failed to enable support " "-- %d/%d\n Retry with %d vectors.\n", ha->msix_count, ret, ret); ha->msix_count = ret; ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); if (ret) { msix_failed: ql_log(ql_log_fatal, vha, 0x00c7, "MSI-X: Failed to enable support, " "giving up -- %d/%d.\n", ha->msix_count, ret); goto msix_out; } ha->max_rsp_queues = ha->msix_count - 1; } ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * ha->msix_count, GFP_KERNEL); if (!ha->msix_entries) { ql_log(ql_log_fatal, vha, 0x00c8, "Failed to allocate memory for ha->msix_entries.\n"); ret = -ENOMEM; goto msix_out; } ha->flags.msix_enabled = 1; for (i = 0; i < ha->msix_count; i++) { qentry = &ha->msix_entries[i]; qentry->vector = entries[i].vector; qentry->entry = entries[i].entry; qentry->have_irq = 0; qentry->rsp = NULL; } /* Enable MSI-X vectors for the base queue */ for (i = 0; i < ha->msix_count; i++) { qentry = &ha->msix_entries[i]; if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { ret = request_irq(qentry->vector, qla83xx_msix_entries[i].handler, 0, qla83xx_msix_entries[i].name, rsp); } else if (IS_QLA82XX(ha)) { ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, 0, qla82xx_msix_entries[i].name, rsp); } else { ret = request_irq(qentry->vector, msix_entries[i].handler, 0, msix_entries[i].name, rsp); } if (ret) { ql_log(ql_log_fatal, vha, 0x00cb, "MSI-X: unable to register handler -- %x/%d.\n", qentry->vector, ret); qla24xx_disable_msix(ha); ha->mqenable = 0; goto msix_out; } qentry->have_irq = 1; qentry->rsp = rsp; rsp->msix = qentry; } /* Enable MSI-X vector for response queue update for queue 0 */ if (IS_QLA83XX(ha)) { if (ha->msixbase && ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) ha->mqenable = 1; } else if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) ha->mqenable = 1; ql_dbg(ql_dbg_multiq, vha, 0xc005, "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); ql_dbg(ql_dbg_init, vha, 0x0055, "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); msix_out: kfree(entries); return ret; } int qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) { int ret; device_reg_t __iomem *reg = ha->iobase; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); /* If possible, enable MSI-X. */ if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha)) goto skip_msi; if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && (ha->pdev->subsystem_device == 0x7040 || ha->pdev->subsystem_device == 0x7041 || ha->pdev->subsystem_device == 0x1705)) { ql_log(ql_log_warn, vha, 0x0034, "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", ha->pdev->subsystem_vendor, ha->pdev->subsystem_device); goto skip_msi; } if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { ql_log(ql_log_warn, vha, 0x0035, "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); goto skip_msix; } ret = qla24xx_enable_msix(ha, rsp); if (!ret) { ql_dbg(ql_dbg_init, vha, 0x0036, "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, ha->fw_attributes); goto clear_risc_ints; } ql_log(ql_log_info, vha, 0x0037, "MSI-X Falling back-to MSI mode -%d.\n", ret); skip_msix: if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha)) goto skip_msi; ret = pci_enable_msi(ha->pdev); if (!ret) { ql_dbg(ql_dbg_init, vha, 0x0038, "MSI: Enabled.\n"); ha->flags.msi_enabled = 1; } else ql_log(ql_log_warn, vha, 0x0039, "MSI-X; Falling back-to INTa mode -- %d.\n", ret); /* Skip INTx on ISP82xx. */ if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) return QLA_FUNCTION_FAILED; skip_msi: ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, ha->flags.msi_enabled ? 0 : IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp); if (ret) { ql_log(ql_log_warn, vha, 0x003a, "Failed to reserve interrupt %d already in use.\n", ha->pdev->irq); goto fail; } else if (!ha->flags.msi_enabled) { ql_dbg(ql_dbg_init, vha, 0x0125, "INTa mode: Enabled.\n"); ha->flags.mr_intr_valid = 1; } clear_risc_ints: spin_lock_irq(&ha->hardware_lock); if (!IS_FWI2_CAPABLE(ha)) WRT_REG_WORD(&reg->isp.semaphore, 0); spin_unlock_irq(&ha->hardware_lock); fail: return ret; } void qla2x00_free_irqs(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct rsp_que *rsp; /* * We need to check that ha->rsp_q_map is valid in case we are called * from a probe failure context. */ if (!ha->rsp_q_map || !ha->rsp_q_map[0]) return; rsp = ha->rsp_q_map[0]; if (ha->flags.msix_enabled) qla24xx_disable_msix(ha); else if (ha->flags.msi_enabled) { free_irq(ha->pdev->irq, rsp); pci_disable_msi(ha->pdev); } else free_irq(ha->pdev->irq, rsp); } int qla25xx_request_irq(struct rsp_que *rsp) { struct qla_hw_data *ha = rsp->hw; struct qla_init_msix_entry *intr = &msix_entries[2]; struct qla_msix_entry *msix = rsp->msix; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); int ret; ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); if (ret) { ql_log(ql_log_fatal, vha, 0x00e6, "MSI-X: Unable to register handler -- %x/%d.\n", msix->vector, ret); return ret; } msix->have_irq = 1; msix->rsp = rsp; return ret; }
gpl-2.0