repo_name
string
path
string
copies
string
size
string
content
string
license
string
touchpro/android_kernel_lge_ls770
drivers/usb/gadget/mv_udc_core.c
2158
58068
/* * Copyright (C) 2011 Marvell International Ltd. All rights reserved. * Author: Chao Xie <chao.xie@marvell.com> * Neil Zhang <zhangwm@marvell.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/otg.h> #include <linux/pm.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/platform_data/mv_usb.h> #include <asm/unaligned.h> #include "mv_udc.h" #define DRIVER_DESC "Marvell PXA USB Device Controller driver" #define DRIVER_VERSION "8 Nov 2010" #define ep_dir(ep) (((ep)->ep_num == 0) ? \ ((ep)->udc->ep0_dir) : ((ep)->direction)) /* timeout value -- usec */ #define RESET_TIMEOUT 10000 #define FLUSH_TIMEOUT 10000 #define EPSTATUS_TIMEOUT 10000 #define PRIME_TIMEOUT 10000 #define READSAFE_TIMEOUT 1000 #define LOOPS_USEC_SHIFT 1 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT) #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT) static DECLARE_COMPLETION(release_done); static const char driver_name[] = "mv_udc"; static const char driver_desc[] = DRIVER_DESC; static void nuke(struct mv_ep *ep, int status); static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver); /* for endpoint 0 operations */ static const struct usb_endpoint_descriptor mv_ep0_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = 0, .bmAttributes = USB_ENDPOINT_XFER_CONTROL, .wMaxPacketSize = EP0_MAX_PKT_SIZE, }; static void ep0_reset(struct mv_udc *udc) { struct mv_ep *ep; u32 epctrlx; int i = 0; /* ep0 in and out */ for (i = 0; i < 2; i++) { ep = &udc->eps[i]; ep->udc = udc; /* ep0 dQH */ ep->dqh = &udc->ep_dqh[i]; /* configure ep0 endpoint capabilities in dQH */ ep->dqh->max_packet_length = (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS) | EP_QUEUE_HEAD_IOS; ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE; epctrlx = readl(&udc->op_regs->epctrlx[0]); if (i) { /* TX */ epctrlx |= EPCTRL_TX_ENABLE | (USB_ENDPOINT_XFER_CONTROL << EPCTRL_TX_EP_TYPE_SHIFT); } else { /* RX */ epctrlx |= EPCTRL_RX_ENABLE | (USB_ENDPOINT_XFER_CONTROL << EPCTRL_RX_EP_TYPE_SHIFT); } writel(epctrlx, &udc->op_regs->epctrlx[0]); } } /* protocol ep0 stall, will automatically be cleared on new transaction */ static void ep0_stall(struct mv_udc *udc) { u32 epctrlx; /* set TX and RX to stall */ epctrlx = readl(&udc->op_regs->epctrlx[0]); epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL; writel(epctrlx, &udc->op_regs->epctrlx[0]); /* update ep0 state */ udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = EP_DIR_OUT; } static int process_ep_req(struct mv_udc *udc, int index, struct mv_req *curr_req) { struct mv_dtd *curr_dtd; struct mv_dqh *curr_dqh; int td_complete, actual, remaining_length; int i, direction; int retval = 0; u32 errors; u32 bit_pos; curr_dqh = &udc->ep_dqh[index]; direction = index % 2; curr_dtd = curr_req->head; td_complete = 0; actual = curr_req->req.length; for (i = 0; i < curr_req->dtd_count; i++) { if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) { dev_dbg(&udc->dev->dev, "%s, dTD not completed\n", udc->eps[index].name); return 1; } errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK; if (!errors) { remaining_length = (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE) >> DTD_LENGTH_BIT_POS; actual -= remaining_length; if (remaining_length) { if (direction) { dev_dbg(&udc->dev->dev, "TX dTD remains data\n"); retval = -EPROTO; break; } else break; } } else { dev_info(&udc->dev->dev, "complete_tr error: ep=%d %s: error = 0x%x\n", index >> 1, direction ? "SEND" : "RECV", errors); if (errors & DTD_STATUS_HALTED) { /* Clear the errors and Halt condition */ curr_dqh->size_ioc_int_sts &= ~errors; retval = -EPIPE; } else if (errors & DTD_STATUS_DATA_BUFF_ERR) { retval = -EPROTO; } else if (errors & DTD_STATUS_TRANSACTION_ERR) { retval = -EILSEQ; } } if (i != curr_req->dtd_count - 1) curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt; } if (retval) return retval; if (direction == EP_DIR_OUT) bit_pos = 1 << curr_req->ep->ep_num; else bit_pos = 1 << (16 + curr_req->ep->ep_num); while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) { if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) { while (readl(&udc->op_regs->epstatus) & bit_pos) udelay(1); break; } udelay(1); } curr_req->req.actual = actual; return 0; } /* * done() - retire a request; caller blocked irqs * @status : request status to be set, only works when * request is still in progress. */ static void done(struct mv_ep *ep, struct mv_req *req, int status) __releases(&ep->udc->lock) __acquires(&ep->udc->lock) { struct mv_udc *udc = NULL; unsigned char stopped = ep->stopped; struct mv_dtd *curr_td, *next_td; int j; udc = (struct mv_udc *)ep->udc; /* Removed the req from fsl_ep->queue */ list_del_init(&req->queue); /* req.status should be set as -EINPROGRESS in ep_queue() */ if (req->req.status == -EINPROGRESS) req->req.status = status; else status = req->req.status; /* Free dtd for the request */ next_td = req->head; for (j = 0; j < req->dtd_count; j++) { curr_td = next_td; if (j != req->dtd_count - 1) next_td = curr_td->next_dtd_virt; dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma); } usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep)); if (status && (status != -ESHUTDOWN)) dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u", ep->ep.name, &req->req, status, req->req.actual, req->req.length); ep->stopped = 1; spin_unlock(&ep->udc->lock); /* * complete() is from gadget layer, * eg fsg->bulk_in_complete() */ if (req->req.complete) req->req.complete(&ep->ep, &req->req); spin_lock(&ep->udc->lock); ep->stopped = stopped; } static int queue_dtd(struct mv_ep *ep, struct mv_req *req) { struct mv_udc *udc; struct mv_dqh *dqh; u32 bit_pos, direction; u32 usbcmd, epstatus; unsigned int loops; int retval = 0; udc = ep->udc; direction = ep_dir(ep); dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]); bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num); /* check if the pipe is empty */ if (!(list_empty(&ep->queue))) { struct mv_req *lastreq; lastreq = list_entry(ep->queue.prev, struct mv_req, queue); lastreq->tail->dtd_next = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; wmb(); if (readl(&udc->op_regs->epprime) & bit_pos) goto done; loops = LOOPS(READSAFE_TIMEOUT); while (1) { /* start with setting the semaphores */ usbcmd = readl(&udc->op_regs->usbcmd); usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET; writel(usbcmd, &udc->op_regs->usbcmd); /* read the endpoint status */ epstatus = readl(&udc->op_regs->epstatus) & bit_pos; /* * Reread the ATDTW semaphore bit to check if it is * cleared. When hardware see a hazard, it will clear * the bit or else we remain set to 1 and we can * proceed with priming of endpoint if not already * primed. */ if (readl(&udc->op_regs->usbcmd) & USBCMD_ATDTW_TRIPWIRE_SET) break; loops--; if (loops == 0) { dev_err(&udc->dev->dev, "Timeout for ATDTW_TRIPWIRE...\n"); retval = -ETIME; goto done; } udelay(LOOPS_USEC); } /* Clear the semaphore */ usbcmd = readl(&udc->op_regs->usbcmd); usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR; writel(usbcmd, &udc->op_regs->usbcmd); if (epstatus) goto done; } /* Write dQH next pointer and terminate bit to 0 */ dqh->next_dtd_ptr = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; /* clear active and halt bit, in case set from a previous error */ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED); /* Ensure that updates to the QH will occure before priming. */ wmb(); /* Prime the Endpoint */ writel(bit_pos, &udc->op_regs->epprime); done: return retval; } static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length, dma_addr_t *dma, int *is_last) { struct mv_dtd *dtd; struct mv_udc *udc; struct mv_dqh *dqh; u32 temp, mult = 0; /* how big will this transfer be? */ if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) { dqh = req->ep->dqh; mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS) & 0x3; *length = min(req->req.length - req->req.actual, (unsigned)(mult * req->ep->ep.maxpacket)); } else *length = min(req->req.length - req->req.actual, (unsigned)EP_MAX_LENGTH_TRANSFER); udc = req->ep->udc; /* * Be careful that no _GFP_HIGHMEM is set, * or we can not use dma_to_virt */ dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma); if (dtd == NULL) return dtd; dtd->td_dma = *dma; /* initialize buffer page pointers */ temp = (u32)(req->req.dma + req->req.actual); dtd->buff_ptr0 = cpu_to_le32(temp); temp &= ~0xFFF; dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000); dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000); dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000); dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000); req->req.actual += *length; /* zlp is needed if req->req.zero is set */ if (req->req.zero) { if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) *is_last = 1; else *is_last = 0; } else if (req->req.length == req->req.actual) *is_last = 1; else *is_last = 0; /* Fill in the transfer size; set active bit */ temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE); /* Enable interrupt for the last dtd of a request */ if (*is_last && !req->req.no_interrupt) temp |= DTD_IOC; temp |= mult << 10; dtd->size_ioc_sts = temp; mb(); return dtd; } /* generate dTD linked list for a request */ static int req_to_dtd(struct mv_req *req) { unsigned count; int is_last, is_first = 1; struct mv_dtd *dtd, *last_dtd = NULL; struct mv_udc *udc; dma_addr_t dma; udc = req->ep->udc; do { dtd = build_dtd(req, &count, &dma, &is_last); if (dtd == NULL) return -ENOMEM; if (is_first) { is_first = 0; req->head = dtd; } else { last_dtd->dtd_next = dma; last_dtd->next_dtd_virt = dtd; } last_dtd = dtd; req->dtd_count++; } while (!is_last); /* set terminate bit to 1 for the last dTD */ dtd->dtd_next = DTD_NEXT_TERMINATE; req->tail = dtd; return 0; } static int mv_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct mv_udc *udc; struct mv_ep *ep; struct mv_dqh *dqh; u16 max = 0; u32 bit_pos, epctrlx, direction; unsigned char zlt = 0, ios = 0, mult = 0; unsigned long flags; ep = container_of(_ep, struct mv_ep, ep); udc = ep->udc; if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) return -EINVAL; if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; direction = ep_dir(ep); max = usb_endpoint_maxp(desc); /* * disable HW zero length termination select * driver handles zero length packet through req->req.zero */ zlt = 1; bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num); /* Check if the Endpoint is Primed */ if ((readl(&udc->op_regs->epprime) & bit_pos) || (readl(&udc->op_regs->epstatus) & bit_pos)) { dev_info(&udc->dev->dev, "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x," " ENDPTSTATUS=0x%x, bit_pos=0x%x\n", (unsigned)ep->ep_num, direction ? "SEND" : "RECV", (unsigned)readl(&udc->op_regs->epprime), (unsigned)readl(&udc->op_regs->epstatus), (unsigned)bit_pos); goto en_done; } /* Set the max packet length, interrupt on Setup and Mult fields */ switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { case USB_ENDPOINT_XFER_BULK: zlt = 1; mult = 0; break; case USB_ENDPOINT_XFER_CONTROL: ios = 1; case USB_ENDPOINT_XFER_INT: mult = 0; break; case USB_ENDPOINT_XFER_ISOC: /* Calculate transactions needed for high bandwidth iso */ mult = (unsigned char)(1 + ((max >> 11) & 0x03)); max = max & 0x7ff; /* bit 0~10 */ /* 3 transactions at most */ if (mult > 3) goto en_done; break; default: goto en_done; } spin_lock_irqsave(&udc->lock, flags); /* Get the endpoint queue head address */ dqh = ep->dqh; dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS) | (mult << EP_QUEUE_HEAD_MULT_POS) | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0) | (ios ? EP_QUEUE_HEAD_IOS : 0); dqh->next_dtd_ptr = 1; dqh->size_ioc_int_sts = 0; ep->ep.maxpacket = max; ep->ep.desc = desc; ep->stopped = 0; /* Enable the endpoint for Rx or Tx and set the endpoint type */ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if (direction == EP_DIR_IN) { epctrlx &= ~EPCTRL_TX_ALL_MASK; epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) << EPCTRL_TX_EP_TYPE_SHIFT); } else { epctrlx &= ~EPCTRL_RX_ALL_MASK; epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) << EPCTRL_RX_EP_TYPE_SHIFT); } writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); /* * Implement Guideline (GL# USB-7) The unused endpoint type must * be programmed to bulk. */ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if ((epctrlx & EPCTRL_RX_ENABLE) == 0) { epctrlx |= (USB_ENDPOINT_XFER_BULK << EPCTRL_RX_EP_TYPE_SHIFT); writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); } epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if ((epctrlx & EPCTRL_TX_ENABLE) == 0) { epctrlx |= (USB_ENDPOINT_XFER_BULK << EPCTRL_TX_EP_TYPE_SHIFT); writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); } spin_unlock_irqrestore(&udc->lock, flags); return 0; en_done: return -EINVAL; } static int mv_ep_disable(struct usb_ep *_ep) { struct mv_udc *udc; struct mv_ep *ep; struct mv_dqh *dqh; u32 bit_pos, epctrlx, direction; unsigned long flags; ep = container_of(_ep, struct mv_ep, ep); if ((_ep == NULL) || !ep->ep.desc) return -EINVAL; udc = ep->udc; /* Get the endpoint queue head address */ dqh = ep->dqh; spin_lock_irqsave(&udc->lock, flags); direction = ep_dir(ep); bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num); /* Reset the max packet length and the interrupt on Setup */ dqh->max_packet_length = 0; /* Disable the endpoint for Rx or Tx and reset the endpoint type */ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); epctrlx &= ~((direction == EP_DIR_IN) ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE) : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE)); writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); /* nuke all pending requests (does flush) */ nuke(ep, -ESHUTDOWN); ep->ep.desc = NULL; ep->stopped = 1; spin_unlock_irqrestore(&udc->lock, flags); return 0; } static struct usb_request * mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { struct mv_req *req = NULL; req = kzalloc(sizeof *req, gfp_flags); if (!req) return NULL; req->req.dma = DMA_ADDR_INVALID; INIT_LIST_HEAD(&req->queue); return &req->req; } static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req) { struct mv_req *req = NULL; req = container_of(_req, struct mv_req, req); if (_req) kfree(req); } static void mv_ep_fifo_flush(struct usb_ep *_ep) { struct mv_udc *udc; u32 bit_pos, direction; struct mv_ep *ep; unsigned int loops; if (!_ep) return; ep = container_of(_ep, struct mv_ep, ep); if (!ep->ep.desc) return; udc = ep->udc; direction = ep_dir(ep); if (ep->ep_num == 0) bit_pos = (1 << 16) | 1; else if (direction == EP_DIR_OUT) bit_pos = 1 << ep->ep_num; else bit_pos = 1 << (16 + ep->ep_num); loops = LOOPS(EPSTATUS_TIMEOUT); do { unsigned int inter_loops; if (loops == 0) { dev_err(&udc->dev->dev, "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n", (unsigned)readl(&udc->op_regs->epstatus), (unsigned)bit_pos); return; } /* Write 1 to the Flush register */ writel(bit_pos, &udc->op_regs->epflush); /* Wait until flushing completed */ inter_loops = LOOPS(FLUSH_TIMEOUT); while (readl(&udc->op_regs->epflush)) { /* * ENDPTFLUSH bit should be cleared to indicate this * operation is complete */ if (inter_loops == 0) { dev_err(&udc->dev->dev, "TIMEOUT for ENDPTFLUSH=0x%x," "bit_pos=0x%x\n", (unsigned)readl(&udc->op_regs->epflush), (unsigned)bit_pos); return; } inter_loops--; udelay(LOOPS_USEC); } loops--; } while (readl(&udc->op_regs->epstatus) & bit_pos); } /* queues (submits) an I/O request to an endpoint */ static int mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct mv_ep *ep = container_of(_ep, struct mv_ep, ep); struct mv_req *req = container_of(_req, struct mv_req, req); struct mv_udc *udc = ep->udc; unsigned long flags; int retval; /* catch various bogus parameters */ if (!_req || !req->req.complete || !req->req.buf || !list_empty(&req->queue)) { dev_err(&udc->dev->dev, "%s, bad params", __func__); return -EINVAL; } if (unlikely(!_ep || !ep->ep.desc)) { dev_err(&udc->dev->dev, "%s, bad ep", __func__); return -EINVAL; } udc = ep->udc; if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; req->ep = ep; /* map virtual address to hardware */ retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep)); if (retval) return retval; req->req.status = -EINPROGRESS; req->req.actual = 0; req->dtd_count = 0; spin_lock_irqsave(&udc->lock, flags); /* build dtds and push them to device queue */ if (!req_to_dtd(req)) { retval = queue_dtd(ep, req); if (retval) { spin_unlock_irqrestore(&udc->lock, flags); dev_err(&udc->dev->dev, "Failed to queue dtd\n"); goto err_unmap_dma; } } else { spin_unlock_irqrestore(&udc->lock, flags); dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n"); retval = -ENOMEM; goto err_unmap_dma; } /* Update ep0 state */ if (ep->ep_num == 0) udc->ep0_state = DATA_STATE_XMIT; /* irq handler advances the queue */ list_add_tail(&req->queue, &ep->queue); spin_unlock_irqrestore(&udc->lock, flags); return 0; err_unmap_dma: usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep)); return retval; } static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req) { struct mv_dqh *dqh = ep->dqh; u32 bit_pos; /* Write dQH next pointer and terminate bit to 0 */ dqh->next_dtd_ptr = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; /* clear active and halt bit, in case set from a previous error */ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED); /* Ensure that updates to the QH will occure before priming. */ wmb(); bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num); /* Prime the Endpoint */ writel(bit_pos, &ep->udc->op_regs->epprime); } /* dequeues (cancels, unlinks) an I/O request from an endpoint */ static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct mv_ep *ep = container_of(_ep, struct mv_ep, ep); struct mv_req *req; struct mv_udc *udc = ep->udc; unsigned long flags; int stopped, ret = 0; u32 epctrlx; if (!_ep || !_req) return -EINVAL; spin_lock_irqsave(&ep->udc->lock, flags); stopped = ep->stopped; /* Stop the ep before we deal with the queue */ ep->stopped = 1; epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if (ep_dir(ep) == EP_DIR_IN) epctrlx &= ~EPCTRL_TX_ENABLE; else epctrlx &= ~EPCTRL_RX_ENABLE; writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); /* make sure it's actually queued on this endpoint */ list_for_each_entry(req, &ep->queue, queue) { if (&req->req == _req) break; } if (&req->req != _req) { ret = -EINVAL; goto out; } /* The request is in progress, or completed but not dequeued */ if (ep->queue.next == &req->queue) { _req->status = -ECONNRESET; mv_ep_fifo_flush(_ep); /* flush current transfer */ /* The request isn't the last request in this ep queue */ if (req->queue.next != &ep->queue) { struct mv_req *next_req; next_req = list_entry(req->queue.next, struct mv_req, queue); /* Point the QH to the first TD of next request */ mv_prime_ep(ep, next_req); } else { struct mv_dqh *qh; qh = ep->dqh; qh->next_dtd_ptr = 1; qh->size_ioc_int_sts = 0; } /* The request hasn't been processed, patch up the TD chain */ } else { struct mv_req *prev_req; prev_req = list_entry(req->queue.prev, struct mv_req, queue); writel(readl(&req->tail->dtd_next), &prev_req->tail->dtd_next); } done(ep, req, -ECONNRESET); /* Enable EP */ out: epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); if (ep_dir(ep) == EP_DIR_IN) epctrlx |= EPCTRL_TX_ENABLE; else epctrlx |= EPCTRL_RX_ENABLE; writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); ep->stopped = stopped; spin_unlock_irqrestore(&ep->udc->lock, flags); return ret; } static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall) { u32 epctrlx; epctrlx = readl(&udc->op_regs->epctrlx[ep_num]); if (stall) { if (direction == EP_DIR_IN) epctrlx |= EPCTRL_TX_EP_STALL; else epctrlx |= EPCTRL_RX_EP_STALL; } else { if (direction == EP_DIR_IN) { epctrlx &= ~EPCTRL_TX_EP_STALL; epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST; } else { epctrlx &= ~EPCTRL_RX_EP_STALL; epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST; } } writel(epctrlx, &udc->op_regs->epctrlx[ep_num]); } static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction) { u32 epctrlx; epctrlx = readl(&udc->op_regs->epctrlx[ep_num]); if (direction == EP_DIR_OUT) return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0; else return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0; } static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge) { struct mv_ep *ep; unsigned long flags = 0; int status = 0; struct mv_udc *udc; ep = container_of(_ep, struct mv_ep, ep); udc = ep->udc; if (!_ep || !ep->ep.desc) { status = -EINVAL; goto out; } if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { status = -EOPNOTSUPP; goto out; } /* * Attempt to halt IN ep will fail if any transfer requests * are still queue */ if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) { status = -EAGAIN; goto out; } spin_lock_irqsave(&ep->udc->lock, flags); ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt); if (halt && wedge) ep->wedge = 1; else if (!halt) ep->wedge = 0; spin_unlock_irqrestore(&ep->udc->lock, flags); if (ep->ep_num == 0) { udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = EP_DIR_OUT; } out: return status; } static int mv_ep_set_halt(struct usb_ep *_ep, int halt) { return mv_ep_set_halt_wedge(_ep, halt, 0); } static int mv_ep_set_wedge(struct usb_ep *_ep) { return mv_ep_set_halt_wedge(_ep, 1, 1); } static struct usb_ep_ops mv_ep_ops = { .enable = mv_ep_enable, .disable = mv_ep_disable, .alloc_request = mv_alloc_request, .free_request = mv_free_request, .queue = mv_ep_queue, .dequeue = mv_ep_dequeue, .set_wedge = mv_ep_set_wedge, .set_halt = mv_ep_set_halt, .fifo_flush = mv_ep_fifo_flush, /* flush fifo */ }; static void udc_clock_enable(struct mv_udc *udc) { clk_prepare_enable(udc->clk); } static void udc_clock_disable(struct mv_udc *udc) { clk_disable_unprepare(udc->clk); } static void udc_stop(struct mv_udc *udc) { u32 tmp; /* Disable interrupts */ tmp = readl(&udc->op_regs->usbintr); tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN | USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN); writel(tmp, &udc->op_regs->usbintr); udc->stopped = 1; /* Reset the Run the bit in the command register to stop VUSB */ tmp = readl(&udc->op_regs->usbcmd); tmp &= ~USBCMD_RUN_STOP; writel(tmp, &udc->op_regs->usbcmd); } static void udc_start(struct mv_udc *udc) { u32 usbintr; usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN | USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND; /* Enable interrupts */ writel(usbintr, &udc->op_regs->usbintr); udc->stopped = 0; /* Set the Run bit in the command register */ writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd); } static int udc_reset(struct mv_udc *udc) { unsigned int loops; u32 tmp, portsc; /* Stop the controller */ tmp = readl(&udc->op_regs->usbcmd); tmp &= ~USBCMD_RUN_STOP; writel(tmp, &udc->op_regs->usbcmd); /* Reset the controller to get default values */ writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd); /* wait for reset to complete */ loops = LOOPS(RESET_TIMEOUT); while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) { if (loops == 0) { dev_err(&udc->dev->dev, "Wait for RESET completed TIMEOUT\n"); return -ETIMEDOUT; } loops--; udelay(LOOPS_USEC); } /* set controller to device mode */ tmp = readl(&udc->op_regs->usbmode); tmp |= USBMODE_CTRL_MODE_DEVICE; /* turn setup lockout off, require setup tripwire in usbcmd */ tmp |= USBMODE_SETUP_LOCK_OFF; writel(tmp, &udc->op_regs->usbmode); writel(0x0, &udc->op_regs->epsetupstat); /* Configure the Endpoint List Address */ writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK, &udc->op_regs->eplistaddr); portsc = readl(&udc->op_regs->portsc[0]); if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC) portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER); if (udc->force_fs) portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT; else portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT); writel(portsc, &udc->op_regs->portsc[0]); tmp = readl(&udc->op_regs->epctrlx[0]); tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL); writel(tmp, &udc->op_regs->epctrlx[0]); return 0; } static int mv_udc_enable_internal(struct mv_udc *udc) { int retval; if (udc->active) return 0; dev_dbg(&udc->dev->dev, "enable udc\n"); udc_clock_enable(udc); if (udc->pdata->phy_init) { retval = udc->pdata->phy_init(udc->phy_regs); if (retval) { dev_err(&udc->dev->dev, "init phy error %d\n", retval); udc_clock_disable(udc); return retval; } } udc->active = 1; return 0; } static int mv_udc_enable(struct mv_udc *udc) { if (udc->clock_gating) return mv_udc_enable_internal(udc); return 0; } static void mv_udc_disable_internal(struct mv_udc *udc) { if (udc->active) { dev_dbg(&udc->dev->dev, "disable udc\n"); if (udc->pdata->phy_deinit) udc->pdata->phy_deinit(udc->phy_regs); udc_clock_disable(udc); udc->active = 0; } } static void mv_udc_disable(struct mv_udc *udc) { if (udc->clock_gating) mv_udc_disable_internal(udc); } static int mv_udc_get_frame(struct usb_gadget *gadget) { struct mv_udc *udc; u16 retval; if (!gadget) return -ENODEV; udc = container_of(gadget, struct mv_udc, gadget); retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS; return retval; } /* Tries to wake up the host connected to this gadget */ static int mv_udc_wakeup(struct usb_gadget *gadget) { struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget); u32 portsc; /* Remote wakeup feature not enabled by host */ if (!udc->remote_wakeup) return -ENOTSUPP; portsc = readl(&udc->op_regs->portsc); /* not suspended? */ if (!(portsc & PORTSCX_PORT_SUSPEND)) return 0; /* trigger force resume */ portsc |= PORTSCX_PORT_FORCE_RESUME; writel(portsc, &udc->op_regs->portsc[0]); return 0; } static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active) { struct mv_udc *udc; unsigned long flags; int retval = 0; udc = container_of(gadget, struct mv_udc, gadget); spin_lock_irqsave(&udc->lock, flags); udc->vbus_active = (is_active != 0); dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n", __func__, udc->softconnect, udc->vbus_active); if (udc->driver && udc->softconnect && udc->vbus_active) { retval = mv_udc_enable(udc); if (retval == 0) { /* Clock is disabled, need re-init registers */ udc_reset(udc); ep0_reset(udc); udc_start(udc); } } else if (udc->driver && udc->softconnect) { if (!udc->active) goto out; /* stop all the transfer in queue*/ stop_activity(udc, udc->driver); udc_stop(udc); mv_udc_disable(udc); } out: spin_unlock_irqrestore(&udc->lock, flags); return retval; } static int mv_udc_pullup(struct usb_gadget *gadget, int is_on) { struct mv_udc *udc; unsigned long flags; int retval = 0; udc = container_of(gadget, struct mv_udc, gadget); spin_lock_irqsave(&udc->lock, flags); udc->softconnect = (is_on != 0); dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n", __func__, udc->softconnect, udc->vbus_active); if (udc->driver && udc->softconnect && udc->vbus_active) { retval = mv_udc_enable(udc); if (retval == 0) { /* Clock is disabled, need re-init registers */ udc_reset(udc); ep0_reset(udc); udc_start(udc); } } else if (udc->driver && udc->vbus_active) { /* stop all the transfer in queue*/ stop_activity(udc, udc->driver); udc_stop(udc); mv_udc_disable(udc); } spin_unlock_irqrestore(&udc->lock, flags); return retval; } static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *); static int mv_udc_stop(struct usb_gadget *, struct usb_gadget_driver *); /* device controller usb_gadget_ops structure */ static const struct usb_gadget_ops mv_ops = { /* returns the current frame number */ .get_frame = mv_udc_get_frame, /* tries to wake up the host connected to this gadget */ .wakeup = mv_udc_wakeup, /* notify controller that VBUS is powered or not */ .vbus_session = mv_udc_vbus_session, /* D+ pullup, software-controlled connect/disconnect to USB host */ .pullup = mv_udc_pullup, .udc_start = mv_udc_start, .udc_stop = mv_udc_stop, }; static int eps_init(struct mv_udc *udc) { struct mv_ep *ep; char name[14]; int i; /* initialize ep0 */ ep = &udc->eps[0]; ep->udc = udc; strncpy(ep->name, "ep0", sizeof(ep->name)); ep->ep.name = ep->name; ep->ep.ops = &mv_ep_ops; ep->wedge = 0; ep->stopped = 0; ep->ep.maxpacket = EP0_MAX_PKT_SIZE; ep->ep_num = 0; ep->ep.desc = &mv_ep0_desc; INIT_LIST_HEAD(&ep->queue); ep->ep_type = USB_ENDPOINT_XFER_CONTROL; /* initialize other endpoints */ for (i = 2; i < udc->max_eps * 2; i++) { ep = &udc->eps[i]; if (i % 2) { snprintf(name, sizeof(name), "ep%din", i / 2); ep->direction = EP_DIR_IN; } else { snprintf(name, sizeof(name), "ep%dout", i / 2); ep->direction = EP_DIR_OUT; } ep->udc = udc; strncpy(ep->name, name, sizeof(ep->name)); ep->ep.name = ep->name; ep->ep.ops = &mv_ep_ops; ep->stopped = 0; ep->ep.maxpacket = (unsigned short) ~0; ep->ep_num = i / 2; INIT_LIST_HEAD(&ep->queue); list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); ep->dqh = &udc->ep_dqh[i]; } return 0; } /* delete all endpoint requests, called with spinlock held */ static void nuke(struct mv_ep *ep, int status) { /* called with spinlock held */ ep->stopped = 1; /* endpoint fifo flush */ mv_ep_fifo_flush(&ep->ep); while (!list_empty(&ep->queue)) { struct mv_req *req = NULL; req = list_entry(ep->queue.next, struct mv_req, queue); done(ep, req, status); } } /* stop all USB activities */ static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver) { struct mv_ep *ep; nuke(&udc->eps[0], -ESHUTDOWN); list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { nuke(ep, -ESHUTDOWN); } /* report disconnect; the driver is already quiesced */ if (driver) { spin_unlock(&udc->lock); driver->disconnect(&udc->gadget); spin_lock(&udc->lock); } } static int mv_udc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct mv_udc *udc; int retval = 0; unsigned long flags; udc = container_of(gadget, struct mv_udc, gadget); if (udc->driver) return -EBUSY; spin_lock_irqsave(&udc->lock, flags); /* hook up the driver ... */ driver->driver.bus = NULL; udc->driver = driver; udc->usb_state = USB_STATE_ATTACHED; udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = EP_DIR_OUT; spin_unlock_irqrestore(&udc->lock, flags); if (udc->transceiver) { retval = otg_set_peripheral(udc->transceiver->otg, &udc->gadget); if (retval) { dev_err(&udc->dev->dev, "unable to register peripheral to otg\n"); udc->driver = NULL; return retval; } } /* pullup is always on */ mv_udc_pullup(&udc->gadget, 1); /* When boot with cable attached, there will be no vbus irq occurred */ if (udc->qwork) queue_work(udc->qwork, &udc->vbus_work); return 0; } static int mv_udc_stop(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct mv_udc *udc; unsigned long flags; udc = container_of(gadget, struct mv_udc, gadget); spin_lock_irqsave(&udc->lock, flags); mv_udc_enable(udc); udc_stop(udc); /* stop all usb activities */ udc->gadget.speed = USB_SPEED_UNKNOWN; stop_activity(udc, driver); mv_udc_disable(udc); spin_unlock_irqrestore(&udc->lock, flags); /* unbind gadget driver */ udc->driver = NULL; return 0; } static void mv_set_ptc(struct mv_udc *udc, u32 mode) { u32 portsc; portsc = readl(&udc->op_regs->portsc[0]); portsc |= mode << 16; writel(portsc, &udc->op_regs->portsc[0]); } static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req) { struct mv_ep *mvep = container_of(ep, struct mv_ep, ep); struct mv_req *req = container_of(_req, struct mv_req, req); struct mv_udc *udc; unsigned long flags; udc = mvep->udc; dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode); spin_lock_irqsave(&udc->lock, flags); if (req->test_mode) { mv_set_ptc(udc, req->test_mode); req->test_mode = 0; } spin_unlock_irqrestore(&udc->lock, flags); } static int udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty) { int retval = 0; struct mv_req *req; struct mv_ep *ep; ep = &udc->eps[0]; udc->ep0_dir = direction; udc->ep0_state = WAIT_FOR_OUT_STATUS; req = udc->status_req; /* fill in the reqest structure */ if (empty == false) { *((u16 *) req->req.buf) = cpu_to_le16(status); req->req.length = 2; } else req->req.length = 0; req->ep = ep; req->req.status = -EINPROGRESS; req->req.actual = 0; if (udc->test_mode) { req->req.complete = prime_status_complete; req->test_mode = udc->test_mode; udc->test_mode = 0; } else req->req.complete = NULL; req->dtd_count = 0; if (req->req.dma == DMA_ADDR_INVALID) { req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, req->req.buf, req->req.length, ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->mapped = 1; } /* prime the data phase */ if (!req_to_dtd(req)) { retval = queue_dtd(ep, req); if (retval) { dev_err(&udc->dev->dev, "Failed to queue dtd when prime status\n"); goto out; } } else{ /* no mem */ retval = -ENOMEM; dev_err(&udc->dev->dev, "Failed to dma_pool_alloc when prime status\n"); goto out; } list_add_tail(&req->queue, &ep->queue); return 0; out: usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep)); return retval; } static void mv_udc_testmode(struct mv_udc *udc, u16 index) { if (index <= TEST_FORCE_EN) { udc->test_mode = index; if (udc_prime_status(udc, EP_DIR_IN, 0, true)) ep0_stall(udc); } else dev_err(&udc->dev->dev, "This test mode(%d) is not supported\n", index); } static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup) { udc->dev_addr = (u8)setup->wValue; /* update usb state */ udc->usb_state = USB_STATE_ADDRESS; if (udc_prime_status(udc, EP_DIR_IN, 0, true)) ep0_stall(udc); } static void ch9getstatus(struct mv_udc *udc, u8 ep_num, struct usb_ctrlrequest *setup) { u16 status = 0; int retval; if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) != (USB_DIR_IN | USB_TYPE_STANDARD)) return; if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) { status = 1 << USB_DEVICE_SELF_POWERED; status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP; } else if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_INTERFACE) { /* get interface status */ status = 0; } else if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) { u8 ep_num, direction; ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) ? EP_DIR_IN : EP_DIR_OUT; status = ep_is_stall(udc, ep_num, direction) << USB_ENDPOINT_HALT; } retval = udc_prime_status(udc, EP_DIR_IN, status, false); if (retval) ep0_stall(udc); else udc->ep0_state = DATA_STATE_XMIT; } static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup) { u8 ep_num; u8 direction; struct mv_ep *ep; if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) { switch (setup->wValue) { case USB_DEVICE_REMOTE_WAKEUP: udc->remote_wakeup = 0; break; default: goto out; } } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) { switch (setup->wValue) { case USB_ENDPOINT_HALT: ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) ? EP_DIR_IN : EP_DIR_OUT; if (setup->wValue != 0 || setup->wLength != 0 || ep_num > udc->max_eps) goto out; ep = &udc->eps[ep_num * 2 + direction]; if (ep->wedge == 1) break; spin_unlock(&udc->lock); ep_set_stall(udc, ep_num, direction, 0); spin_lock(&udc->lock); break; default: goto out; } } else goto out; if (udc_prime_status(udc, EP_DIR_IN, 0, true)) ep0_stall(udc); out: return; } static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup) { u8 ep_num; u8 direction; if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) { switch (setup->wValue) { case USB_DEVICE_REMOTE_WAKEUP: udc->remote_wakeup = 1; break; case USB_DEVICE_TEST_MODE: if (setup->wIndex & 0xFF || udc->gadget.speed != USB_SPEED_HIGH) ep0_stall(udc); if (udc->usb_state != USB_STATE_CONFIGURED && udc->usb_state != USB_STATE_ADDRESS && udc->usb_state != USB_STATE_DEFAULT) ep0_stall(udc); mv_udc_testmode(udc, (setup->wIndex >> 8)); goto out; default: goto out; } } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) { switch (setup->wValue) { case USB_ENDPOINT_HALT: ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) ? EP_DIR_IN : EP_DIR_OUT; if (setup->wValue != 0 || setup->wLength != 0 || ep_num > udc->max_eps) goto out; spin_unlock(&udc->lock); ep_set_stall(udc, ep_num, direction, 1); spin_lock(&udc->lock); break; default: goto out; } } else goto out; if (udc_prime_status(udc, EP_DIR_IN, 0, true)) ep0_stall(udc); out: return; } static void handle_setup_packet(struct mv_udc *udc, u8 ep_num, struct usb_ctrlrequest *setup) __releases(&ep->udc->lock) __acquires(&ep->udc->lock) { bool delegate = false; nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN); dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n", setup->bRequestType, setup->bRequest, setup->wValue, setup->wIndex, setup->wLength); /* We process some stardard setup requests here */ if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (setup->bRequest) { case USB_REQ_GET_STATUS: ch9getstatus(udc, ep_num, setup); break; case USB_REQ_SET_ADDRESS: ch9setaddress(udc, setup); break; case USB_REQ_CLEAR_FEATURE: ch9clearfeature(udc, setup); break; case USB_REQ_SET_FEATURE: ch9setfeature(udc, setup); break; default: delegate = true; } } else delegate = true; /* delegate USB standard requests to the gadget driver */ if (delegate == true) { /* USB requests handled by gadget */ if (setup->wLength) { /* DATA phase from gadget, STATUS phase from udc */ udc->ep0_dir = (setup->bRequestType & USB_DIR_IN) ? EP_DIR_IN : EP_DIR_OUT; spin_unlock(&udc->lock); if (udc->driver->setup(&udc->gadget, &udc->local_setup_buff) < 0) ep0_stall(udc); spin_lock(&udc->lock); udc->ep0_state = (setup->bRequestType & USB_DIR_IN) ? DATA_STATE_XMIT : DATA_STATE_RECV; } else { /* no DATA phase, IN STATUS phase from gadget */ udc->ep0_dir = EP_DIR_IN; spin_unlock(&udc->lock); if (udc->driver->setup(&udc->gadget, &udc->local_setup_buff) < 0) ep0_stall(udc); spin_lock(&udc->lock); udc->ep0_state = WAIT_FOR_OUT_STATUS; } } } /* complete DATA or STATUS phase of ep0 prime status phase if needed */ static void ep0_req_complete(struct mv_udc *udc, struct mv_ep *ep0, struct mv_req *req) { u32 new_addr; if (udc->usb_state == USB_STATE_ADDRESS) { /* set the new address */ new_addr = (u32)udc->dev_addr; writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT, &udc->op_regs->deviceaddr); } done(ep0, req, 0); switch (udc->ep0_state) { case DATA_STATE_XMIT: /* receive status phase */ if (udc_prime_status(udc, EP_DIR_OUT, 0, true)) ep0_stall(udc); break; case DATA_STATE_RECV: /* send status phase */ if (udc_prime_status(udc, EP_DIR_IN, 0 , true)) ep0_stall(udc); break; case WAIT_FOR_OUT_STATUS: udc->ep0_state = WAIT_FOR_SETUP; break; case WAIT_FOR_SETUP: dev_err(&udc->dev->dev, "unexpect ep0 packets\n"); break; default: ep0_stall(udc); break; } } static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr) { u32 temp; struct mv_dqh *dqh; dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT]; /* Clear bit in ENDPTSETUPSTAT */ writel((1 << ep_num), &udc->op_regs->epsetupstat); /* while a hazard exists when setup package arrives */ do { /* Set Setup Tripwire */ temp = readl(&udc->op_regs->usbcmd); writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd); /* Copy the setup packet to local buffer */ memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8); } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET)); /* Clear Setup Tripwire */ temp = readl(&udc->op_regs->usbcmd); writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd); } static void irq_process_tr_complete(struct mv_udc *udc) { u32 tmp, bit_pos; int i, ep_num = 0, direction = 0; struct mv_ep *curr_ep; struct mv_req *curr_req, *temp_req; int status; /* * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE * because the setup packets are to be read ASAP */ /* Process all Setup packet received interrupts */ tmp = readl(&udc->op_regs->epsetupstat); if (tmp) { for (i = 0; i < udc->max_eps; i++) { if (tmp & (1 << i)) { get_setup_data(udc, i, (u8 *)(&udc->local_setup_buff)); handle_setup_packet(udc, i, &udc->local_setup_buff); } } } /* Don't clear the endpoint setup status register here. * It is cleared as a setup packet is read out of the buffer */ /* Process non-setup transaction complete interrupts */ tmp = readl(&udc->op_regs->epcomplete); if (!tmp) return; writel(tmp, &udc->op_regs->epcomplete); for (i = 0; i < udc->max_eps * 2; i++) { ep_num = i >> 1; direction = i % 2; bit_pos = 1 << (ep_num + 16 * direction); if (!(bit_pos & tmp)) continue; if (i == 1) curr_ep = &udc->eps[0]; else curr_ep = &udc->eps[i]; /* process the req queue until an uncomplete request */ list_for_each_entry_safe(curr_req, temp_req, &curr_ep->queue, queue) { status = process_ep_req(udc, i, curr_req); if (status) break; /* write back status to req */ curr_req->req.status = status; /* ep0 request completion */ if (ep_num == 0) { ep0_req_complete(udc, curr_ep, curr_req); break; } else { done(curr_ep, curr_req, status); } } } } static void irq_process_reset(struct mv_udc *udc) { u32 tmp; unsigned int loops; udc->ep0_dir = EP_DIR_OUT; udc->ep0_state = WAIT_FOR_SETUP; udc->remote_wakeup = 0; /* default to 0 on reset */ /* The address bits are past bit 25-31. Set the address */ tmp = readl(&udc->op_regs->deviceaddr); tmp &= ~(USB_DEVICE_ADDRESS_MASK); writel(tmp, &udc->op_regs->deviceaddr); /* Clear all the setup token semaphores */ tmp = readl(&udc->op_regs->epsetupstat); writel(tmp, &udc->op_regs->epsetupstat); /* Clear all the endpoint complete status bits */ tmp = readl(&udc->op_regs->epcomplete); writel(tmp, &udc->op_regs->epcomplete); /* wait until all endptprime bits cleared */ loops = LOOPS(PRIME_TIMEOUT); while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) { if (loops == 0) { dev_err(&udc->dev->dev, "Timeout for ENDPTPRIME = 0x%x\n", readl(&udc->op_regs->epprime)); break; } loops--; udelay(LOOPS_USEC); } /* Write 1s to the Flush register */ writel((u32)~0, &udc->op_regs->epflush); if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) { dev_info(&udc->dev->dev, "usb bus reset\n"); udc->usb_state = USB_STATE_DEFAULT; /* reset all the queues, stop all USB activities */ stop_activity(udc, udc->driver); } else { dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n", readl(&udc->op_regs->portsc)); /* * re-initialize * controller reset */ udc_reset(udc); /* reset all the queues, stop all USB activities */ stop_activity(udc, udc->driver); /* reset ep0 dQH and endptctrl */ ep0_reset(udc); /* enable interrupt and set controller to run state */ udc_start(udc); udc->usb_state = USB_STATE_ATTACHED; } } static void handle_bus_resume(struct mv_udc *udc) { udc->usb_state = udc->resume_state; udc->resume_state = 0; /* report resume to the driver */ if (udc->driver) { if (udc->driver->resume) { spin_unlock(&udc->lock); udc->driver->resume(&udc->gadget); spin_lock(&udc->lock); } } } static void irq_process_suspend(struct mv_udc *udc) { udc->resume_state = udc->usb_state; udc->usb_state = USB_STATE_SUSPENDED; if (udc->driver->suspend) { spin_unlock(&udc->lock); udc->driver->suspend(&udc->gadget); spin_lock(&udc->lock); } } static void irq_process_port_change(struct mv_udc *udc) { u32 portsc; portsc = readl(&udc->op_regs->portsc[0]); if (!(portsc & PORTSCX_PORT_RESET)) { /* Get the speed */ u32 speed = portsc & PORTSCX_PORT_SPEED_MASK; switch (speed) { case PORTSCX_PORT_SPEED_HIGH: udc->gadget.speed = USB_SPEED_HIGH; break; case PORTSCX_PORT_SPEED_FULL: udc->gadget.speed = USB_SPEED_FULL; break; case PORTSCX_PORT_SPEED_LOW: udc->gadget.speed = USB_SPEED_LOW; break; default: udc->gadget.speed = USB_SPEED_UNKNOWN; break; } } if (portsc & PORTSCX_PORT_SUSPEND) { udc->resume_state = udc->usb_state; udc->usb_state = USB_STATE_SUSPENDED; if (udc->driver->suspend) { spin_unlock(&udc->lock); udc->driver->suspend(&udc->gadget); spin_lock(&udc->lock); } } if (!(portsc & PORTSCX_PORT_SUSPEND) && udc->usb_state == USB_STATE_SUSPENDED) { handle_bus_resume(udc); } if (!udc->resume_state) udc->usb_state = USB_STATE_DEFAULT; } static void irq_process_error(struct mv_udc *udc) { /* Increment the error count */ udc->errors++; } static irqreturn_t mv_udc_irq(int irq, void *dev) { struct mv_udc *udc = (struct mv_udc *)dev; u32 status, intr; /* Disable ISR when stopped bit is set */ if (udc->stopped) return IRQ_NONE; spin_lock(&udc->lock); status = readl(&udc->op_regs->usbsts); intr = readl(&udc->op_regs->usbintr); status &= intr; if (status == 0) { spin_unlock(&udc->lock); return IRQ_NONE; } /* Clear all the interrupts occurred */ writel(status, &udc->op_regs->usbsts); if (status & USBSTS_ERR) irq_process_error(udc); if (status & USBSTS_RESET) irq_process_reset(udc); if (status & USBSTS_PORT_CHANGE) irq_process_port_change(udc); if (status & USBSTS_INT) irq_process_tr_complete(udc); if (status & USBSTS_SUSPEND) irq_process_suspend(udc); spin_unlock(&udc->lock); return IRQ_HANDLED; } static irqreturn_t mv_udc_vbus_irq(int irq, void *dev) { struct mv_udc *udc = (struct mv_udc *)dev; /* polling VBUS and init phy may cause too much time*/ if (udc->qwork) queue_work(udc->qwork, &udc->vbus_work); return IRQ_HANDLED; } static void mv_udc_vbus_work(struct work_struct *work) { struct mv_udc *udc; unsigned int vbus; udc = container_of(work, struct mv_udc, vbus_work); if (!udc->pdata->vbus) return; vbus = udc->pdata->vbus->poll(); dev_info(&udc->dev->dev, "vbus is %d\n", vbus); if (vbus == VBUS_HIGH) mv_udc_vbus_session(&udc->gadget, 1); else if (vbus == VBUS_LOW) mv_udc_vbus_session(&udc->gadget, 0); } /* release device structure */ static void gadget_release(struct device *_dev) { struct mv_udc *udc; udc = dev_get_drvdata(_dev); complete(udc->done); } static int mv_udc_remove(struct platform_device *pdev) { struct mv_udc *udc; udc = platform_get_drvdata(pdev); usb_del_gadget_udc(&udc->gadget); if (udc->qwork) { flush_workqueue(udc->qwork); destroy_workqueue(udc->qwork); } /* free memory allocated in probe */ if (udc->dtd_pool) dma_pool_destroy(udc->dtd_pool); if (udc->ep_dqh) dma_free_coherent(&pdev->dev, udc->ep_dqh_size, udc->ep_dqh, udc->ep_dqh_dma); mv_udc_disable(udc); /* free dev, wait for the release() finished */ wait_for_completion(udc->done); return 0; } static int mv_udc_probe(struct platform_device *pdev) { struct mv_usb_platform_data *pdata = pdev->dev.platform_data; struct mv_udc *udc; int retval = 0; struct resource *r; size_t size; if (pdata == NULL) { dev_err(&pdev->dev, "missing platform_data\n"); return -ENODEV; } udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL); if (udc == NULL) { dev_err(&pdev->dev, "failed to allocate memory for udc\n"); return -ENOMEM; } udc->done = &release_done; udc->pdata = pdev->dev.platform_data; spin_lock_init(&udc->lock); udc->dev = pdev; if (pdata->mode == MV_USB_MODE_OTG) { udc->transceiver = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2); if (IS_ERR(udc->transceiver)) { retval = PTR_ERR(udc->transceiver); if (retval == -ENXIO) return retval; udc->transceiver = NULL; return -EPROBE_DEFER; } } /* udc only have one sysclk. */ udc->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(udc->clk)) return PTR_ERR(udc->clk); r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs"); if (r == NULL) { dev_err(&pdev->dev, "no I/O memory resource defined\n"); return -ENODEV; } udc->cap_regs = (struct mv_cap_regs __iomem *) devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (udc->cap_regs == NULL) { dev_err(&pdev->dev, "failed to map I/O memory\n"); return -EBUSY; } r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs"); if (r == NULL) { dev_err(&pdev->dev, "no phy I/O memory resource defined\n"); return -ENODEV; } udc->phy_regs = ioremap(r->start, resource_size(r)); if (udc->phy_regs == NULL) { dev_err(&pdev->dev, "failed to map phy I/O memory\n"); return -EBUSY; } /* we will acces controller register, so enable the clk */ retval = mv_udc_enable_internal(udc); if (retval) return retval; udc->op_regs = (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs + (readl(&udc->cap_regs->caplength_hciversion) & CAPLENGTH_MASK)); udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK; /* * some platform will use usb to download image, it may not disconnect * usb gadget before loading kernel. So first stop udc here. */ udc_stop(udc); writel(0xFFFFFFFF, &udc->op_regs->usbsts); size = udc->max_eps * sizeof(struct mv_dqh) *2; size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1); udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size, &udc->ep_dqh_dma, GFP_KERNEL); if (udc->ep_dqh == NULL) { dev_err(&pdev->dev, "allocate dQH memory failed\n"); retval = -ENOMEM; goto err_disable_clock; } udc->ep_dqh_size = size; /* create dTD dma_pool resource */ udc->dtd_pool = dma_pool_create("mv_dtd", &pdev->dev, sizeof(struct mv_dtd), DTD_ALIGNMENT, DMA_BOUNDARY); if (!udc->dtd_pool) { retval = -ENOMEM; goto err_free_dma; } size = udc->max_eps * sizeof(struct mv_ep) *2; udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); if (udc->eps == NULL) { dev_err(&pdev->dev, "allocate ep memory failed\n"); retval = -ENOMEM; goto err_destroy_dma; } /* initialize ep0 status request structure */ udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req), GFP_KERNEL); if (!udc->status_req) { dev_err(&pdev->dev, "allocate status_req memory failed\n"); retval = -ENOMEM; goto err_destroy_dma; } INIT_LIST_HEAD(&udc->status_req->queue); /* allocate a small amount of memory to get valid address */ udc->status_req->req.buf = kzalloc(8, GFP_KERNEL); udc->status_req->req.dma = DMA_ADDR_INVALID; udc->resume_state = USB_STATE_NOTATTACHED; udc->usb_state = USB_STATE_POWERED; udc->ep0_dir = EP_DIR_OUT; udc->remote_wakeup = 0; r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0); if (r == NULL) { dev_err(&pdev->dev, "no IRQ resource defined\n"); retval = -ENODEV; goto err_destroy_dma; } udc->irq = r->start; if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq, IRQF_SHARED, driver_name, udc)) { dev_err(&pdev->dev, "Request irq %d for UDC failed\n", udc->irq); retval = -ENODEV; goto err_destroy_dma; } /* initialize gadget structure */ udc->gadget.ops = &mv_ops; /* usb_gadget_ops */ udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */ INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */ udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */ udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */ /* the "gadget" abstracts/virtualizes the controller */ udc->gadget.name = driver_name; /* gadget name */ eps_init(udc); /* VBUS detect: we can disable/enable clock on demand.*/ if (udc->transceiver) udc->clock_gating = 1; else if (pdata->vbus) { udc->clock_gating = 1; retval = devm_request_threaded_irq(&pdev->dev, pdata->vbus->irq, NULL, mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc); if (retval) { dev_info(&pdev->dev, "Can not request irq for VBUS, " "disable clock gating\n"); udc->clock_gating = 0; } udc->qwork = create_singlethread_workqueue("mv_udc_queue"); if (!udc->qwork) { dev_err(&pdev->dev, "cannot create workqueue\n"); retval = -ENOMEM; goto err_destroy_dma; } INIT_WORK(&udc->vbus_work, mv_udc_vbus_work); } /* * When clock gating is supported, we can disable clk and phy. * If not, it means that VBUS detection is not supported, we * have to enable vbus active all the time to let controller work. */ if (udc->clock_gating) mv_udc_disable_internal(udc); else udc->vbus_active = 1; retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget, gadget_release); if (retval) goto err_create_workqueue; platform_set_drvdata(pdev, udc); dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n", udc->clock_gating ? "with" : "without"); return 0; err_create_workqueue: destroy_workqueue(udc->qwork); err_destroy_dma: dma_pool_destroy(udc->dtd_pool); err_free_dma: dma_free_coherent(&pdev->dev, udc->ep_dqh_size, udc->ep_dqh, udc->ep_dqh_dma); err_disable_clock: mv_udc_disable_internal(udc); return retval; } #ifdef CONFIG_PM static int mv_udc_suspend(struct device *dev) { struct mv_udc *udc; udc = dev_get_drvdata(dev); /* if OTG is enabled, the following will be done in OTG driver*/ if (udc->transceiver) return 0; if (udc->pdata->vbus && udc->pdata->vbus->poll) if (udc->pdata->vbus->poll() == VBUS_HIGH) { dev_info(&udc->dev->dev, "USB cable is connected!\n"); return -EAGAIN; } /* * only cable is unplugged, udc can suspend. * So do not care about clock_gating == 1. */ if (!udc->clock_gating) { udc_stop(udc); spin_lock_irq(&udc->lock); /* stop all usb activities */ stop_activity(udc, udc->driver); spin_unlock_irq(&udc->lock); mv_udc_disable_internal(udc); } return 0; } static int mv_udc_resume(struct device *dev) { struct mv_udc *udc; int retval; udc = dev_get_drvdata(dev); /* if OTG is enabled, the following will be done in OTG driver*/ if (udc->transceiver) return 0; if (!udc->clock_gating) { retval = mv_udc_enable_internal(udc); if (retval) return retval; if (udc->driver && udc->softconnect) { udc_reset(udc); ep0_reset(udc); udc_start(udc); } } return 0; } static const struct dev_pm_ops mv_udc_pm_ops = { .suspend = mv_udc_suspend, .resume = mv_udc_resume, }; #endif static void mv_udc_shutdown(struct platform_device *pdev) { struct mv_udc *udc; u32 mode; udc = platform_get_drvdata(pdev); /* reset controller mode to IDLE */ mv_udc_enable(udc); mode = readl(&udc->op_regs->usbmode); mode &= ~3; writel(mode, &udc->op_regs->usbmode); mv_udc_disable(udc); } static struct platform_driver udc_driver = { .probe = mv_udc_probe, .remove = mv_udc_remove, .shutdown = mv_udc_shutdown, .driver = { .owner = THIS_MODULE, .name = "mv-udc", #ifdef CONFIG_PM .pm = &mv_udc_pm_ops, #endif }, }; module_platform_driver(udc_driver); MODULE_ALIAS("platform:mv-udc"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>"); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
BenefitA3/android_kernel_ark_msm8916
net/netfilter/xt_bpf.c
2670
1854
/* Xtables module to match packets using a BPF filter. * Copyright 2013 Google Inc. * Written by Willem de Bruijn <willemb@google.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/filter.h> #include <linux/netfilter/xt_bpf.h> #include <linux/netfilter/x_tables.h> MODULE_AUTHOR("Willem de Bruijn <willemb@google.com>"); MODULE_DESCRIPTION("Xtables: BPF filter match"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_bpf"); MODULE_ALIAS("ip6t_bpf"); static int bpf_mt_check(const struct xt_mtchk_param *par) { struct xt_bpf_info *info = par->matchinfo; struct sock_fprog program; program.len = info->bpf_program_num_elem; program.filter = (struct sock_filter __user *) info->bpf_program; if (sk_unattached_filter_create(&info->filter, &program)) { pr_info("bpf: check failed: parse error\n"); return -EINVAL; } return 0; } static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_bpf_info *info = par->matchinfo; return SK_RUN_FILTER(info->filter, skb); } static void bpf_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_bpf_info *info = par->matchinfo; sk_unattached_filter_destroy(info->filter); } static struct xt_match bpf_mt_reg __read_mostly = { .name = "bpf", .revision = 0, .family = NFPROTO_UNSPEC, .checkentry = bpf_mt_check, .match = bpf_mt, .destroy = bpf_mt_destroy, .matchsize = sizeof(struct xt_bpf_info), .me = THIS_MODULE, }; static int __init bpf_mt_init(void) { return xt_register_match(&bpf_mt_reg); } static void __exit bpf_mt_exit(void) { xt_unregister_match(&bpf_mt_reg); } module_init(bpf_mt_init); module_exit(bpf_mt_exit);
gpl-2.0
Vajnar/linux-stable-hx4700
fs/sysfs/group.c
4718
5576
/* * fs/sysfs/group.c - Operations for adding/removing multiple files at once. * * Copyright (c) 2003 Patrick Mochel * Copyright (c) 2003 Open Source Development Lab * * This file is released undert the GPL v2. * */ #include <linux/kobject.h> #include <linux/module.h> #include <linux/dcache.h> #include <linux/namei.h> #include <linux/err.h> #include "sysfs.h" static void remove_files(struct sysfs_dirent *dir_sd, struct kobject *kobj, const struct attribute_group *grp) { struct attribute *const* attr; int i; for (i = 0, attr = grp->attrs; *attr; i++, attr++) sysfs_hash_and_remove(dir_sd, NULL, (*attr)->name); } static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj, const struct attribute_group *grp, int update) { struct attribute *const* attr; int error = 0, i; for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++) { umode_t mode = 0; /* in update mode, we're changing the permissions or * visibility. Do this by first removing then * re-adding (if required) the file */ if (update) sysfs_hash_and_remove(dir_sd, NULL, (*attr)->name); if (grp->is_visible) { mode = grp->is_visible(kobj, *attr, i); if (!mode) continue; } error = sysfs_add_file_mode(dir_sd, *attr, SYSFS_KOBJ_ATTR, (*attr)->mode | mode); if (unlikely(error)) break; } if (error) remove_files(dir_sd, kobj, grp); return error; } static int internal_create_group(struct kobject *kobj, int update, const struct attribute_group *grp) { struct sysfs_dirent *sd; int error; BUG_ON(!kobj || (!update && !kobj->sd)); /* Updates may happen before the object has been instantiated */ if (unlikely(update && !kobj->sd)) return -EINVAL; if (!grp->attrs) { WARN(1, "sysfs: attrs not set by subsystem for group: %s/%s\n", kobj->name, grp->name ? "" : grp->name); return -EINVAL; } if (grp->name) { error = sysfs_create_subdir(kobj, grp->name, &sd); if (error) return error; } else sd = kobj->sd; sysfs_get(sd); error = create_files(sd, kobj, grp, update); if (error) { if (grp->name) sysfs_remove_subdir(sd); } sysfs_put(sd); return error; } /** * sysfs_create_group - given a directory kobject, create an attribute group * @kobj: The kobject to create the group on * @grp: The attribute group to create * * This function creates a group for the first time. It will explicitly * warn and error if any of the attribute files being created already exist. * * Returns 0 on success or error. */ int sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp) { return internal_create_group(kobj, 0, grp); } /** * sysfs_update_group - given a directory kobject, update an attribute group * @kobj: The kobject to update the group on * @grp: The attribute group to update * * This function updates an attribute group. Unlike * sysfs_create_group(), it will explicitly not warn or error if any * of the attribute files being created already exist. Furthermore, * if the visibility of the files has changed through the is_visible() * callback, it will update the permissions and add or remove the * relevant files. * * The primary use for this function is to call it after making a change * that affects group visibility. * * Returns 0 on success or error. */ int sysfs_update_group(struct kobject *kobj, const struct attribute_group *grp) { return internal_create_group(kobj, 1, grp); } void sysfs_remove_group(struct kobject * kobj, const struct attribute_group * grp) { struct sysfs_dirent *dir_sd = kobj->sd; struct sysfs_dirent *sd; if (grp->name) { sd = sysfs_get_dirent(dir_sd, NULL, grp->name); if (!sd) { WARN(!sd, KERN_WARNING "sysfs group %p not found for " "kobject '%s'\n", grp, kobject_name(kobj)); return; } } else sd = sysfs_get(dir_sd); remove_files(sd, kobj, grp); if (grp->name) sysfs_remove_subdir(sd); sysfs_put(sd); } /** * sysfs_merge_group - merge files into a pre-existing attribute group. * @kobj: The kobject containing the group. * @grp: The files to create and the attribute group they belong to. * * This function returns an error if the group doesn't exist or any of the * files already exist in that group, in which case none of the new files * are created. */ int sysfs_merge_group(struct kobject *kobj, const struct attribute_group *grp) { struct sysfs_dirent *dir_sd; int error = 0; struct attribute *const *attr; int i; dir_sd = sysfs_get_dirent(kobj->sd, NULL, grp->name); if (!dir_sd) return -ENOENT; for ((i = 0, attr = grp->attrs); *attr && !error; (++i, ++attr)) error = sysfs_add_file(dir_sd, *attr, SYSFS_KOBJ_ATTR); if (error) { while (--i >= 0) sysfs_hash_and_remove(dir_sd, NULL, (*--attr)->name); } sysfs_put(dir_sd); return error; } EXPORT_SYMBOL_GPL(sysfs_merge_group); /** * sysfs_unmerge_group - remove files from a pre-existing attribute group. * @kobj: The kobject containing the group. * @grp: The files to remove and the attribute group they belong to. */ void sysfs_unmerge_group(struct kobject *kobj, const struct attribute_group *grp) { struct sysfs_dirent *dir_sd; struct attribute *const *attr; dir_sd = sysfs_get_dirent(kobj->sd, NULL, grp->name); if (dir_sd) { for (attr = grp->attrs; *attr; ++attr) sysfs_hash_and_remove(dir_sd, NULL, (*attr)->name); sysfs_put(dir_sd); } } EXPORT_SYMBOL_GPL(sysfs_unmerge_group); EXPORT_SYMBOL_GPL(sysfs_create_group); EXPORT_SYMBOL_GPL(sysfs_update_group); EXPORT_SYMBOL_GPL(sysfs_remove_group);
gpl-2.0
GameTheory-/android_kernel_lge_fx1s
drivers/net/ethernet/i825xx/ether1.c
4974
27611
/* * linux/drivers/acorn/net/ether1.c * * Copyright (C) 1996-2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Acorn ether1 driver (82586 chip) for Acorn machines * * We basically keep two queues in the cards memory - one for transmit * and one for receive. Each has a head and a tail. The head is where * we/the chip adds packets to be transmitted/received, and the tail * is where the transmitter has got to/where the receiver will stop. * Both of these queues are circular, and since the chip is running * all the time, we have to be careful when we modify the pointers etc * so that the buffer memory contents is valid all the time. * * Change log: * 1.00 RMK Released * 1.01 RMK 19/03/1996 Transfers the last odd byte onto/off of the card now. * 1.02 RMK 25/05/1997 Added code to restart RU if it goes not ready * 1.03 RMK 14/09/1997 Cleaned up the handling of a reset during the TX interrupt. * Should prevent lockup. * 1.04 RMK 17/09/1997 Added more info when initialsation of chip goes wrong. * TDR now only reports failure when chip reports non-zero * TDR time-distance. * 1.05 RMK 31/12/1997 Removed calls to dev_tint for 2.1 * 1.06 RMK 10/02/2000 Updated for 2.3.43 * 1.07 RMK 13/05/2000 Updated for 2.3.99-pre8 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/ecard.h> #define __ETHER1_C #include "ether1.h" static unsigned int net_debug = NET_DEBUG; #define BUFFER_SIZE 0x10000 #define TX_AREA_START 0x00100 #define TX_AREA_END 0x05000 #define RX_AREA_START 0x05000 #define RX_AREA_END 0x0fc00 static int ether1_open(struct net_device *dev); static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev); static irqreturn_t ether1_interrupt(int irq, void *dev_id); static int ether1_close(struct net_device *dev); static void ether1_setmulticastlist(struct net_device *dev); static void ether1_timeout(struct net_device *dev); /* ------------------------------------------------------------------------- */ static char version[] __devinitdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n"; #define BUS_16 16 #define BUS_8 8 /* ------------------------------------------------------------------------- */ #define DISABLEIRQS 1 #define NORMALIRQS 0 #define ether1_readw(dev, addr, type, offset, svflgs) ether1_inw_p (dev, addr + (int)(&((type *)0)->offset), svflgs) #define ether1_writew(dev, val, addr, type, offset, svflgs) ether1_outw_p (dev, val, addr + (int)(&((type *)0)->offset), svflgs) static inline unsigned short ether1_inw_p (struct net_device *dev, int addr, int svflgs) { unsigned long flags; unsigned short ret; if (svflgs) local_irq_save (flags); writeb(addr >> 12, REG_PAGE); ret = readw(ETHER1_RAM + ((addr & 4095) << 1)); if (svflgs) local_irq_restore (flags); return ret; } static inline void ether1_outw_p (struct net_device *dev, unsigned short val, int addr, int svflgs) { unsigned long flags; if (svflgs) local_irq_save (flags); writeb(addr >> 12, REG_PAGE); writew(val, ETHER1_RAM + ((addr & 4095) << 1)); if (svflgs) local_irq_restore (flags); } /* * Some inline assembler to allow fast transfers on to/off of the card. * Since this driver depends on some features presented by the ARM * specific architecture, and that you can't configure this driver * without specifiing ARM mode, this is not a problem. * * This routine is essentially an optimised memcpy from the card's * onboard RAM to kernel memory. */ static void ether1_writebuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length) { unsigned int page, thislen, offset; void __iomem *addr; offset = start & 4095; page = start >> 12; addr = ETHER1_RAM + (offset << 1); if (offset + length > 4096) thislen = 4096 - offset; else thislen = length; do { int used; writeb(page, REG_PAGE); length -= thislen; __asm__ __volatile__( "subs %3, %3, #2\n\ bmi 2f\n\ 1: ldr %0, [%1], #2\n\ mov %0, %0, lsl #16\n\ orr %0, %0, %0, lsr #16\n\ str %0, [%2], #4\n\ subs %3, %3, #2\n\ bmi 2f\n\ ldr %0, [%1], #2\n\ mov %0, %0, lsl #16\n\ orr %0, %0, %0, lsr #16\n\ str %0, [%2], #4\n\ subs %3, %3, #2\n\ bmi 2f\n\ ldr %0, [%1], #2\n\ mov %0, %0, lsl #16\n\ orr %0, %0, %0, lsr #16\n\ str %0, [%2], #4\n\ subs %3, %3, #2\n\ bmi 2f\n\ ldr %0, [%1], #2\n\ mov %0, %0, lsl #16\n\ orr %0, %0, %0, lsr #16\n\ str %0, [%2], #4\n\ subs %3, %3, #2\n\ bpl 1b\n\ 2: adds %3, %3, #1\n\ ldreqb %0, [%1]\n\ streqb %0, [%2]" : "=&r" (used), "=&r" (data) : "r" (addr), "r" (thislen), "1" (data)); addr = ETHER1_RAM; thislen = length; if (thislen > 4096) thislen = 4096; page++; } while (thislen); } static void ether1_readbuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length) { unsigned int page, thislen, offset; void __iomem *addr; offset = start & 4095; page = start >> 12; addr = ETHER1_RAM + (offset << 1); if (offset + length > 4096) thislen = 4096 - offset; else thislen = length; do { int used; writeb(page, REG_PAGE); length -= thislen; __asm__ __volatile__( "subs %3, %3, #2\n\ bmi 2f\n\ 1: ldr %0, [%2], #4\n\ strb %0, [%1], #1\n\ mov %0, %0, lsr #8\n\ strb %0, [%1], #1\n\ subs %3, %3, #2\n\ bmi 2f\n\ ldr %0, [%2], #4\n\ strb %0, [%1], #1\n\ mov %0, %0, lsr #8\n\ strb %0, [%1], #1\n\ subs %3, %3, #2\n\ bmi 2f\n\ ldr %0, [%2], #4\n\ strb %0, [%1], #1\n\ mov %0, %0, lsr #8\n\ strb %0, [%1], #1\n\ subs %3, %3, #2\n\ bmi 2f\n\ ldr %0, [%2], #4\n\ strb %0, [%1], #1\n\ mov %0, %0, lsr #8\n\ strb %0, [%1], #1\n\ subs %3, %3, #2\n\ bpl 1b\n\ 2: adds %3, %3, #1\n\ ldreqb %0, [%2]\n\ streqb %0, [%1]" : "=&r" (used), "=&r" (data) : "r" (addr), "r" (thislen), "1" (data)); addr = ETHER1_RAM; thislen = length; if (thislen > 4096) thislen = 4096; page++; } while (thislen); } static int __devinit ether1_ramtest(struct net_device *dev, unsigned char byte) { unsigned char *buffer = kmalloc (BUFFER_SIZE, GFP_KERNEL); int i, ret = BUFFER_SIZE; int max_errors = 15; int bad = -1; int bad_start = 0; if (!buffer) return 1; memset (buffer, byte, BUFFER_SIZE); ether1_writebuffer (dev, buffer, 0, BUFFER_SIZE); memset (buffer, byte ^ 0xff, BUFFER_SIZE); ether1_readbuffer (dev, buffer, 0, BUFFER_SIZE); for (i = 0; i < BUFFER_SIZE; i++) { if (buffer[i] != byte) { if (max_errors >= 0 && bad != buffer[i]) { if (bad != -1) printk ("\n"); printk (KERN_CRIT "%s: RAM failed with (%02X instead of %02X) at 0x%04X", dev->name, buffer[i], byte, i); ret = -ENODEV; max_errors --; bad = buffer[i]; bad_start = i; } } else { if (bad != -1) { if (bad_start == i - 1) printk ("\n"); else printk (" - 0x%04X\n", i - 1); bad = -1; } } } if (bad != -1) printk (" - 0x%04X\n", BUFFER_SIZE); kfree (buffer); return ret; } static int ether1_reset (struct net_device *dev) { writeb(CTRL_RST|CTRL_ACK, REG_CONTROL); return BUS_16; } static int __devinit ether1_init_2(struct net_device *dev) { int i; dev->mem_start = 0; i = ether1_ramtest (dev, 0x5a); if (i > 0) i = ether1_ramtest (dev, 0x1e); if (i <= 0) return -ENODEV; dev->mem_end = i; return 0; } /* * These are the structures that are loaded into the ether RAM card to * initialise the 82586 */ /* at 0x0100 */ #define NOP_ADDR (TX_AREA_START) #define NOP_SIZE (0x06) static nop_t init_nop = { 0, CMD_NOP, NOP_ADDR }; /* at 0x003a */ #define TDR_ADDR (0x003a) #define TDR_SIZE (0x08) static tdr_t init_tdr = { 0, CMD_TDR | CMD_INTR, NOP_ADDR, 0 }; /* at 0x002e */ #define MC_ADDR (0x002e) #define MC_SIZE (0x0c) static mc_t init_mc = { 0, CMD_SETMULTICAST, TDR_ADDR, 0, { { 0, } } }; /* at 0x0022 */ #define SA_ADDR (0x0022) #define SA_SIZE (0x0c) static sa_t init_sa = { 0, CMD_SETADDRESS, MC_ADDR, { 0, } }; /* at 0x0010 */ #define CFG_ADDR (0x0010) #define CFG_SIZE (0x12) static cfg_t init_cfg = { 0, CMD_CONFIG, SA_ADDR, 8, 8, CFG8_SRDY, CFG9_PREAMB8 | CFG9_ADDRLENBUF | CFG9_ADDRLEN(6), 0, 0x60, 0, CFG13_RETRY(15) | CFG13_SLOTH(2), 0, }; /* at 0x0000 */ #define SCB_ADDR (0x0000) #define SCB_SIZE (0x10) static scb_t init_scb = { 0, SCB_CMDACKRNR | SCB_CMDACKCNA | SCB_CMDACKFR | SCB_CMDACKCX, CFG_ADDR, RX_AREA_START, 0, 0, 0, 0 }; /* at 0xffee */ #define ISCP_ADDR (0xffee) #define ISCP_SIZE (0x08) static iscp_t init_iscp = { 1, SCB_ADDR, 0x0000, 0x0000 }; /* at 0xfff6 */ #define SCP_ADDR (0xfff6) #define SCP_SIZE (0x0a) static scp_t init_scp = { SCP_SY_16BBUS, { 0, 0 }, ISCP_ADDR, 0 }; #define RFD_SIZE (0x16) static rfd_t init_rfd = { 0, 0, 0, 0, { 0, }, { 0, }, 0 }; #define RBD_SIZE (0x0a) static rbd_t init_rbd = { 0, 0, 0, 0, ETH_FRAME_LEN + 8 }; #define TX_SIZE (0x08) #define TBD_SIZE (0x08) static int ether1_init_for_open (struct net_device *dev) { int i, status, addr, next, next2; int failures = 0; unsigned long timeout; writeb(CTRL_RST|CTRL_ACK, REG_CONTROL); for (i = 0; i < 6; i++) init_sa.sa_addr[i] = dev->dev_addr[i]; /* load data structures into ether1 RAM */ ether1_writebuffer (dev, &init_scp, SCP_ADDR, SCP_SIZE); ether1_writebuffer (dev, &init_iscp, ISCP_ADDR, ISCP_SIZE); ether1_writebuffer (dev, &init_scb, SCB_ADDR, SCB_SIZE); ether1_writebuffer (dev, &init_cfg, CFG_ADDR, CFG_SIZE); ether1_writebuffer (dev, &init_sa, SA_ADDR, SA_SIZE); ether1_writebuffer (dev, &init_mc, MC_ADDR, MC_SIZE); ether1_writebuffer (dev, &init_tdr, TDR_ADDR, TDR_SIZE); ether1_writebuffer (dev, &init_nop, NOP_ADDR, NOP_SIZE); if (ether1_readw(dev, CFG_ADDR, cfg_t, cfg_command, NORMALIRQS) != CMD_CONFIG) { printk (KERN_ERR "%s: detected either RAM fault or compiler bug\n", dev->name); return 1; } /* * setup circularly linked list of { rfd, rbd, buffer }, with * all rfds circularly linked, rbds circularly linked. * First rfd is linked to scp, first rbd is linked to first * rfd. Last rbd has a suspend command. */ addr = RX_AREA_START; do { next = addr + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10; next2 = next + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10; if (next2 >= RX_AREA_END) { next = RX_AREA_START; init_rfd.rfd_command = RFD_CMDEL | RFD_CMDSUSPEND; priv(dev)->rx_tail = addr; } else init_rfd.rfd_command = 0; if (addr == RX_AREA_START) init_rfd.rfd_rbdoffset = addr + RFD_SIZE; else init_rfd.rfd_rbdoffset = 0; init_rfd.rfd_link = next; init_rbd.rbd_link = next + RFD_SIZE; init_rbd.rbd_bufl = addr + RFD_SIZE + RBD_SIZE; ether1_writebuffer (dev, &init_rfd, addr, RFD_SIZE); ether1_writebuffer (dev, &init_rbd, addr + RFD_SIZE, RBD_SIZE); addr = next; } while (next2 < RX_AREA_END); priv(dev)->tx_link = NOP_ADDR; priv(dev)->tx_head = NOP_ADDR + NOP_SIZE; priv(dev)->tx_tail = TDR_ADDR; priv(dev)->rx_head = RX_AREA_START; /* release reset & give 586 a prod */ priv(dev)->resetting = 1; priv(dev)->initialising = 1; writeb(CTRL_RST, REG_CONTROL); writeb(0, REG_CONTROL); writeb(CTRL_CA, REG_CONTROL); /* 586 should now unset iscp.busy */ timeout = jiffies + HZ/2; while (ether1_readw(dev, ISCP_ADDR, iscp_t, iscp_busy, DISABLEIRQS) == 1) { if (time_after(jiffies, timeout)) { printk (KERN_WARNING "%s: can't initialise 82586: iscp is busy\n", dev->name); return 1; } } /* check status of commands that we issued */ timeout += HZ/10; while (((status = ether1_readw(dev, CFG_ADDR, cfg_t, cfg_status, DISABLEIRQS)) & STAT_COMPLETE) == 0) { if (time_after(jiffies, timeout)) break; } if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { printk (KERN_WARNING "%s: can't initialise 82586: config status %04X\n", dev->name, status); printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); failures += 1; } timeout += HZ/10; while (((status = ether1_readw(dev, SA_ADDR, sa_t, sa_status, DISABLEIRQS)) & STAT_COMPLETE) == 0) { if (time_after(jiffies, timeout)) break; } if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { printk (KERN_WARNING "%s: can't initialise 82586: set address status %04X\n", dev->name, status); printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); failures += 1; } timeout += HZ/10; while (((status = ether1_readw(dev, MC_ADDR, mc_t, mc_status, DISABLEIRQS)) & STAT_COMPLETE) == 0) { if (time_after(jiffies, timeout)) break; } if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { printk (KERN_WARNING "%s: can't initialise 82586: set multicast status %04X\n", dev->name, status); printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); failures += 1; } timeout += HZ; while (((status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_status, DISABLEIRQS)) & STAT_COMPLETE) == 0) { if (time_after(jiffies, timeout)) break; } if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { printk (KERN_WARNING "%s: can't tdr (ignored)\n", dev->name); printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); } else { status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_result, DISABLEIRQS); if (status & TDR_XCVRPROB) printk (KERN_WARNING "%s: i/f failed tdr: transceiver problem\n", dev->name); else if ((status & (TDR_SHORT|TDR_OPEN)) && (status & TDR_TIME)) { #ifdef FANCY printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d.%d us away\n", dev->name, status & TDR_SHORT ? "short" : "open", (status & TDR_TIME) / 10, (status & TDR_TIME) % 10); #else printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d clks away\n", dev->name, status & TDR_SHORT ? "short" : "open", (status & TDR_TIME)); #endif } } if (failures) ether1_reset (dev); return failures ? 1 : 0; } /* ------------------------------------------------------------------------- */ static int ether1_txalloc (struct net_device *dev, int size) { int start, tail; size = (size + 1) & ~1; tail = priv(dev)->tx_tail; if (priv(dev)->tx_head + size > TX_AREA_END) { if (tail > priv(dev)->tx_head) return -1; start = TX_AREA_START; if (start + size > tail) return -1; priv(dev)->tx_head = start + size; } else { if (priv(dev)->tx_head < tail && (priv(dev)->tx_head + size) > tail) return -1; start = priv(dev)->tx_head; priv(dev)->tx_head += size; } return start; } static int ether1_open (struct net_device *dev) { if (!is_valid_ether_addr(dev->dev_addr)) { printk(KERN_WARNING "%s: invalid ethernet MAC address\n", dev->name); return -EINVAL; } if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev)) return -EAGAIN; if (ether1_init_for_open (dev)) { free_irq (dev->irq, dev); return -EAGAIN; } netif_start_queue(dev); return 0; } static void ether1_timeout(struct net_device *dev) { printk(KERN_WARNING "%s: transmit timeout, network cable problem?\n", dev->name); printk(KERN_WARNING "%s: resetting device\n", dev->name); ether1_reset (dev); if (ether1_init_for_open (dev)) printk (KERN_ERR "%s: unable to restart interface\n", dev->name); dev->stats.tx_errors++; netif_wake_queue(dev); } static int ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) { int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; unsigned long flags; tx_t tx; tbd_t tbd; nop_t nop; if (priv(dev)->restart) { printk(KERN_WARNING "%s: resetting device\n", dev->name); ether1_reset(dev); if (ether1_init_for_open(dev)) printk(KERN_ERR "%s: unable to restart interface\n", dev->name); else priv(dev)->restart = 0; } if (skb->len < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) goto out; } /* * insert packet followed by a nop */ txaddr = ether1_txalloc (dev, TX_SIZE); tbdaddr = ether1_txalloc (dev, TBD_SIZE); dataddr = ether1_txalloc (dev, skb->len); nopaddr = ether1_txalloc (dev, NOP_SIZE); tx.tx_status = 0; tx.tx_command = CMD_TX | CMD_INTR; tx.tx_link = nopaddr; tx.tx_tbdoffset = tbdaddr; tbd.tbd_opts = TBD_EOL | skb->len; tbd.tbd_link = I82586_NULL; tbd.tbd_bufl = dataddr; tbd.tbd_bufh = 0; nop.nop_status = 0; nop.nop_command = CMD_NOP; nop.nop_link = nopaddr; local_irq_save(flags); ether1_writebuffer (dev, &tx, txaddr, TX_SIZE); ether1_writebuffer (dev, &tbd, tbdaddr, TBD_SIZE); ether1_writebuffer (dev, skb->data, dataddr, skb->len); ether1_writebuffer (dev, &nop, nopaddr, NOP_SIZE); tmp = priv(dev)->tx_link; priv(dev)->tx_link = nopaddr; /* now reset the previous nop pointer */ ether1_writew(dev, txaddr, tmp, nop_t, nop_link, NORMALIRQS); local_irq_restore(flags); /* handle transmit */ /* check to see if we have room for a full sized ether frame */ tmp = priv(dev)->tx_head; tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN); priv(dev)->tx_head = tmp; dev_kfree_skb (skb); if (tst == -1) netif_stop_queue(dev); out: return NETDEV_TX_OK; } static void ether1_xmit_done (struct net_device *dev) { nop_t nop; int caddr, tst; caddr = priv(dev)->tx_tail; again: ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); switch (nop.nop_command & CMD_MASK) { case CMD_TDR: /* special case */ if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS) != (unsigned short)I82586_NULL) { ether1_writew(dev, SCB_CMDCUCSTART | SCB_CMDRXSTART, SCB_ADDR, scb_t, scb_command, NORMALIRQS); writeb(CTRL_CA, REG_CONTROL); } priv(dev)->tx_tail = NOP_ADDR; return; case CMD_NOP: if (nop.nop_link == caddr) { if (priv(dev)->initialising == 0) printk (KERN_WARNING "%s: strange command complete with no tx command!\n", dev->name); else priv(dev)->initialising = 0; return; } if (caddr == nop.nop_link) return; caddr = nop.nop_link; goto again; case CMD_TX: if (nop.nop_status & STAT_COMPLETE) break; printk (KERN_ERR "%s: strange command complete without completed command\n", dev->name); priv(dev)->restart = 1; return; default: printk (KERN_WARNING "%s: strange command %d complete! (offset %04X)", dev->name, nop.nop_command & CMD_MASK, caddr); priv(dev)->restart = 1; return; } while (nop.nop_status & STAT_COMPLETE) { if (nop.nop_status & STAT_OK) { dev->stats.tx_packets++; dev->stats.collisions += (nop.nop_status & STAT_COLLISIONS); } else { dev->stats.tx_errors++; if (nop.nop_status & STAT_COLLAFTERTX) dev->stats.collisions++; if (nop.nop_status & STAT_NOCARRIER) dev->stats.tx_carrier_errors++; if (nop.nop_status & STAT_TXLOSTCTS) printk (KERN_WARNING "%s: cts lost\n", dev->name); if (nop.nop_status & STAT_TXSLOWDMA) dev->stats.tx_fifo_errors++; if (nop.nop_status & STAT_COLLEXCESSIVE) dev->stats.collisions += 16; } if (nop.nop_link == caddr) { printk (KERN_ERR "%s: tx buffer chaining error: tx command points to itself\n", dev->name); break; } caddr = nop.nop_link; ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); if ((nop.nop_command & CMD_MASK) != CMD_NOP) { printk (KERN_ERR "%s: tx buffer chaining error: no nop after tx command\n", dev->name); break; } if (caddr == nop.nop_link) break; caddr = nop.nop_link; ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); if ((nop.nop_command & CMD_MASK) != CMD_TX) { printk (KERN_ERR "%s: tx buffer chaining error: no tx command after nop\n", dev->name); break; } } priv(dev)->tx_tail = caddr; caddr = priv(dev)->tx_head; tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN); priv(dev)->tx_head = caddr; if (tst != -1) netif_wake_queue(dev); } static void ether1_recv_done (struct net_device *dev) { int status; int nexttail, rbdaddr; rbd_t rbd; do { status = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_status, NORMALIRQS); if ((status & RFD_COMPLETE) == 0) break; rbdaddr = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_rbdoffset, NORMALIRQS); ether1_readbuffer (dev, &rbd, rbdaddr, RBD_SIZE); if ((rbd.rbd_status & (RBD_EOF | RBD_ACNTVALID)) == (RBD_EOF | RBD_ACNTVALID)) { int length = rbd.rbd_status & RBD_ACNT; struct sk_buff *skb; length = (length + 1) & ~1; skb = netdev_alloc_skb(dev, length + 2); if (skb) { skb_reserve (skb, 2); ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length); skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); dev->stats.rx_packets++; } else dev->stats.rx_dropped++; } else { printk(KERN_WARNING "%s: %s\n", dev->name, (rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid"); dev->stats.rx_dropped++; } nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS); /* nexttail should be rx_head */ if (nexttail != priv(dev)->rx_head) printk(KERN_ERR "%s: receiver buffer chaining error (%04X != %04X)\n", dev->name, nexttail, priv(dev)->rx_head); ether1_writew(dev, RFD_CMDEL | RFD_CMDSUSPEND, nexttail, rfd_t, rfd_command, NORMALIRQS); ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_command, NORMALIRQS); ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_status, NORMALIRQS); ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_rbdoffset, NORMALIRQS); priv(dev)->rx_tail = nexttail; priv(dev)->rx_head = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_link, NORMALIRQS); } while (1); } static irqreturn_t ether1_interrupt (int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; int status; status = ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS); if (status) { ether1_writew(dev, status & (SCB_STRNR | SCB_STCNA | SCB_STFR | SCB_STCX), SCB_ADDR, scb_t, scb_command, NORMALIRQS); writeb(CTRL_CA | CTRL_ACK, REG_CONTROL); if (status & SCB_STCX) { ether1_xmit_done (dev); } if (status & SCB_STCNA) { if (priv(dev)->resetting == 0) printk (KERN_WARNING "%s: CU went not ready ???\n", dev->name); else priv(dev)->resetting += 1; if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS) != (unsigned short)I82586_NULL) { ether1_writew(dev, SCB_CMDCUCSTART, SCB_ADDR, scb_t, scb_command, NORMALIRQS); writeb(CTRL_CA, REG_CONTROL); } if (priv(dev)->resetting == 2) priv(dev)->resetting = 0; } if (status & SCB_STFR) { ether1_recv_done (dev); } if (status & SCB_STRNR) { if (ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS) & SCB_STRXSUSP) { printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name); ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS); writeb(CTRL_CA, REG_CONTROL); dev->stats.rx_dropped++; /* we suspended due to lack of buffer space */ } else printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name, ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS)); printk (KERN_WARNING "RU ptr = %04X\n", ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); } } else writeb(CTRL_ACK, REG_CONTROL); return IRQ_HANDLED; } static int ether1_close (struct net_device *dev) { ether1_reset (dev); free_irq(dev->irq, dev); return 0; } /* * Set or clear the multicast filter for this adaptor. * num_addrs == -1 Promiscuous mode, receive all packets. * num_addrs == 0 Normal mode, clear multicast list. * num_addrs > 0 Multicast mode, receive normal and MC packets, and do * best-effort filtering. */ static void ether1_setmulticastlist (struct net_device *dev) { } /* ------------------------------------------------------------------------- */ static void __devinit ether1_banner(void) { static unsigned int version_printed = 0; if (net_debug && version_printed++ == 0) printk(KERN_INFO "%s", version); } static const struct net_device_ops ether1_netdev_ops = { .ndo_open = ether1_open, .ndo_stop = ether1_close, .ndo_start_xmit = ether1_sendpacket, .ndo_set_rx_mode = ether1_setmulticastlist, .ndo_tx_timeout = ether1_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, }; static int __devinit ether1_probe(struct expansion_card *ec, const struct ecard_id *id) { struct net_device *dev; int i, ret = 0; ether1_banner(); ret = ecard_request_resources(ec); if (ret) goto out; dev = alloc_etherdev(sizeof(struct ether1_priv)); if (!dev) { ret = -ENOMEM; goto release; } SET_NETDEV_DEV(dev, &ec->dev); dev->irq = ec->irq; priv(dev)->base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!priv(dev)->base) { ret = -ENOMEM; goto free; } if ((priv(dev)->bus_type = ether1_reset(dev)) == 0) { ret = -ENODEV; goto free; } for (i = 0; i < 6; i++) dev->dev_addr[i] = readb(IDPROM_ADDRESS + (i << 2)); if (ether1_init_2(dev)) { ret = -ENODEV; goto free; } dev->netdev_ops = &ether1_netdev_ops; dev->watchdog_timeo = 5 * HZ / 100; ret = register_netdev(dev); if (ret) goto free; printk(KERN_INFO "%s: ether1 in slot %d, %pM\n", dev->name, ec->slot_no, dev->dev_addr); ecard_set_drvdata(ec, dev); return 0; free: free_netdev(dev); release: ecard_release_resources(ec); out: return ret; } static void __devexit ether1_remove(struct expansion_card *ec) { struct net_device *dev = ecard_get_drvdata(ec); ecard_set_drvdata(ec, NULL); unregister_netdev(dev); free_netdev(dev); ecard_release_resources(ec); } static const struct ecard_id ether1_ids[] = { { MANU_ACORN, PROD_ACORN_ETHER1 }, { 0xffff, 0xffff } }; static struct ecard_driver ether1_driver = { .probe = ether1_probe, .remove = __devexit_p(ether1_remove), .id_table = ether1_ids, .drv = { .name = "ether1", }, }; static int __init ether1_init(void) { return ecard_register_driver(&ether1_driver); } static void __exit ether1_exit(void) { ecard_remove_driver(&ether1_driver); } module_init(ether1_init); module_exit(ether1_exit); MODULE_LICENSE("GPL");
gpl-2.0
bilalliberty/SebastianFM-kernel
drivers/tty/serial/sa1100.c
4974
23075
/* * Driver for SA11x0 serial ports * * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * Copyright (C) 2000 Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if defined(CONFIG_SERIAL_SA1100_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/platform_device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/io.h> #include <asm/irq.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <asm/mach/serial_sa1100.h> /* We've been assigned a range on the "Low-density serial ports" major */ #define SERIAL_SA1100_MAJOR 204 #define MINOR_START 5 #define NR_PORTS 3 #define SA1100_ISR_PASS_LIMIT 256 /* * Convert from ignore_status_mask or read_status_mask to UTSR[01] */ #define SM_TO_UTSR0(x) ((x) & 0xff) #define SM_TO_UTSR1(x) ((x) >> 8) #define UTSR0_TO_SM(x) ((x)) #define UTSR1_TO_SM(x) ((x) << 8) #define UART_GET_UTCR0(sport) __raw_readl((sport)->port.membase + UTCR0) #define UART_GET_UTCR1(sport) __raw_readl((sport)->port.membase + UTCR1) #define UART_GET_UTCR2(sport) __raw_readl((sport)->port.membase + UTCR2) #define UART_GET_UTCR3(sport) __raw_readl((sport)->port.membase + UTCR3) #define UART_GET_UTSR0(sport) __raw_readl((sport)->port.membase + UTSR0) #define UART_GET_UTSR1(sport) __raw_readl((sport)->port.membase + UTSR1) #define UART_GET_CHAR(sport) __raw_readl((sport)->port.membase + UTDR) #define UART_PUT_UTCR0(sport,v) __raw_writel((v),(sport)->port.membase + UTCR0) #define UART_PUT_UTCR1(sport,v) __raw_writel((v),(sport)->port.membase + UTCR1) #define UART_PUT_UTCR2(sport,v) __raw_writel((v),(sport)->port.membase + UTCR2) #define UART_PUT_UTCR3(sport,v) __raw_writel((v),(sport)->port.membase + UTCR3) #define UART_PUT_UTSR0(sport,v) __raw_writel((v),(sport)->port.membase + UTSR0) #define UART_PUT_UTSR1(sport,v) __raw_writel((v),(sport)->port.membase + UTSR1) #define UART_PUT_CHAR(sport,v) __raw_writel((v),(sport)->port.membase + UTDR) /* * This is the size of our serial port register set. */ #define UART_PORT_SIZE 0x24 /* * This determines how often we check the modem status signals * for any change. They generally aren't connected to an IRQ * so we have to poll them. We also check immediately before * filling the TX fifo incase CTS has been dropped. */ #define MCTRL_TIMEOUT (250*HZ/1000) struct sa1100_port { struct uart_port port; struct timer_list timer; unsigned int old_status; }; /* * Handle any change of modem status signal since we were last called. */ static void sa1100_mctrl_check(struct sa1100_port *sport) { unsigned int status, changed; status = sport->port.ops->get_mctrl(&sport->port); changed = status ^ sport->old_status; if (changed == 0) return; sport->old_status = status; if (changed & TIOCM_RI) sport->port.icount.rng++; if (changed & TIOCM_DSR) sport->port.icount.dsr++; if (changed & TIOCM_CAR) uart_handle_dcd_change(&sport->port, status & TIOCM_CAR); if (changed & TIOCM_CTS) uart_handle_cts_change(&sport->port, status & TIOCM_CTS); wake_up_interruptible(&sport->port.state->port.delta_msr_wait); } /* * This is our per-port timeout handler, for checking the * modem status signals. */ static void sa1100_timeout(unsigned long data) { struct sa1100_port *sport = (struct sa1100_port *)data; unsigned long flags; if (sport->port.state) { spin_lock_irqsave(&sport->port.lock, flags); sa1100_mctrl_check(sport); spin_unlock_irqrestore(&sport->port.lock, flags); mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT); } } /* * interrupts disabled on entry */ static void sa1100_stop_tx(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; u32 utcr3; utcr3 = UART_GET_UTCR3(sport); UART_PUT_UTCR3(sport, utcr3 & ~UTCR3_TIE); sport->port.read_status_mask &= ~UTSR0_TO_SM(UTSR0_TFS); } /* * port locked and interrupts disabled */ static void sa1100_start_tx(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; u32 utcr3; utcr3 = UART_GET_UTCR3(sport); sport->port.read_status_mask |= UTSR0_TO_SM(UTSR0_TFS); UART_PUT_UTCR3(sport, utcr3 | UTCR3_TIE); } /* * Interrupts enabled */ static void sa1100_stop_rx(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; u32 utcr3; utcr3 = UART_GET_UTCR3(sport); UART_PUT_UTCR3(sport, utcr3 & ~UTCR3_RIE); } /* * Set the modem control timer to fire immediately. */ static void sa1100_enable_ms(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; mod_timer(&sport->timer, jiffies); } static void sa1100_rx_chars(struct sa1100_port *sport) { struct tty_struct *tty = sport->port.state->port.tty; unsigned int status, ch, flg; status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) | UTSR0_TO_SM(UART_GET_UTSR0(sport)); while (status & UTSR1_TO_SM(UTSR1_RNE)) { ch = UART_GET_CHAR(sport); sport->port.icount.rx++; flg = TTY_NORMAL; /* * note that the error handling code is * out of the main execution path */ if (status & UTSR1_TO_SM(UTSR1_PRE | UTSR1_FRE | UTSR1_ROR)) { if (status & UTSR1_TO_SM(UTSR1_PRE)) sport->port.icount.parity++; else if (status & UTSR1_TO_SM(UTSR1_FRE)) sport->port.icount.frame++; if (status & UTSR1_TO_SM(UTSR1_ROR)) sport->port.icount.overrun++; status &= sport->port.read_status_mask; if (status & UTSR1_TO_SM(UTSR1_PRE)) flg = TTY_PARITY; else if (status & UTSR1_TO_SM(UTSR1_FRE)) flg = TTY_FRAME; #ifdef SUPPORT_SYSRQ sport->port.sysrq = 0; #endif } if (uart_handle_sysrq_char(&sport->port, ch)) goto ignore_char; uart_insert_char(&sport->port, status, UTSR1_TO_SM(UTSR1_ROR), ch, flg); ignore_char: status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) | UTSR0_TO_SM(UART_GET_UTSR0(sport)); } tty_flip_buffer_push(tty); } static void sa1100_tx_chars(struct sa1100_port *sport) { struct circ_buf *xmit = &sport->port.state->xmit; if (sport->port.x_char) { UART_PUT_CHAR(sport, sport->port.x_char); sport->port.icount.tx++; sport->port.x_char = 0; return; } /* * Check the modem control lines before * transmitting anything. */ sa1100_mctrl_check(sport); if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { sa1100_stop_tx(&sport->port); return; } /* * Tried using FIFO (not checking TNF) for fifo fill: * still had the '4 bytes repeated' problem. */ while (UART_GET_UTSR1(sport) & UTSR1_TNF) { UART_PUT_CHAR(sport, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); sport->port.icount.tx++; if (uart_circ_empty(xmit)) break; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&sport->port); if (uart_circ_empty(xmit)) sa1100_stop_tx(&sport->port); } static irqreturn_t sa1100_int(int irq, void *dev_id) { struct sa1100_port *sport = dev_id; unsigned int status, pass_counter = 0; spin_lock(&sport->port.lock); status = UART_GET_UTSR0(sport); status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS; do { if (status & (UTSR0_RFS | UTSR0_RID)) { /* Clear the receiver idle bit, if set */ if (status & UTSR0_RID) UART_PUT_UTSR0(sport, UTSR0_RID); sa1100_rx_chars(sport); } /* Clear the relevant break bits */ if (status & (UTSR0_RBB | UTSR0_REB)) UART_PUT_UTSR0(sport, status & (UTSR0_RBB | UTSR0_REB)); if (status & UTSR0_RBB) sport->port.icount.brk++; if (status & UTSR0_REB) uart_handle_break(&sport->port); if (status & UTSR0_TFS) sa1100_tx_chars(sport); if (pass_counter++ > SA1100_ISR_PASS_LIMIT) break; status = UART_GET_UTSR0(sport); status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS; } while (status & (UTSR0_TFS | UTSR0_RFS | UTSR0_RID)); spin_unlock(&sport->port.lock); return IRQ_HANDLED; } /* * Return TIOCSER_TEMT when transmitter is not busy. */ static unsigned int sa1100_tx_empty(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; return UART_GET_UTSR1(sport) & UTSR1_TBY ? 0 : TIOCSER_TEMT; } static unsigned int sa1100_get_mctrl(struct uart_port *port) { return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; } static void sa1100_set_mctrl(struct uart_port *port, unsigned int mctrl) { } /* * Interrupts always disabled. */ static void sa1100_break_ctl(struct uart_port *port, int break_state) { struct sa1100_port *sport = (struct sa1100_port *)port; unsigned long flags; unsigned int utcr3; spin_lock_irqsave(&sport->port.lock, flags); utcr3 = UART_GET_UTCR3(sport); if (break_state == -1) utcr3 |= UTCR3_BRK; else utcr3 &= ~UTCR3_BRK; UART_PUT_UTCR3(sport, utcr3); spin_unlock_irqrestore(&sport->port.lock, flags); } static int sa1100_startup(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; int retval; /* * Allocate the IRQ */ retval = request_irq(sport->port.irq, sa1100_int, 0, "sa11x0-uart", sport); if (retval) return retval; /* * Finally, clear and enable interrupts */ UART_PUT_UTSR0(sport, -1); UART_PUT_UTCR3(sport, UTCR3_RXE | UTCR3_TXE | UTCR3_RIE); /* * Enable modem status interrupts */ spin_lock_irq(&sport->port.lock); sa1100_enable_ms(&sport->port); spin_unlock_irq(&sport->port.lock); return 0; } static void sa1100_shutdown(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; /* * Stop our timer. */ del_timer_sync(&sport->timer); /* * Free the interrupt */ free_irq(sport->port.irq, sport); /* * Disable all interrupts, port and break condition. */ UART_PUT_UTCR3(sport, 0); } static void sa1100_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct sa1100_port *sport = (struct sa1100_port *)port; unsigned long flags; unsigned int utcr0, old_utcr3, baud, quot; unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; /* * We only support CS7 and CS8. */ while ((termios->c_cflag & CSIZE) != CS7 && (termios->c_cflag & CSIZE) != CS8) { termios->c_cflag &= ~CSIZE; termios->c_cflag |= old_csize; old_csize = CS8; } if ((termios->c_cflag & CSIZE) == CS8) utcr0 = UTCR0_DSS; else utcr0 = 0; if (termios->c_cflag & CSTOPB) utcr0 |= UTCR0_SBS; if (termios->c_cflag & PARENB) { utcr0 |= UTCR0_PE; if (!(termios->c_cflag & PARODD)) utcr0 |= UTCR0_OES; } /* * Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); quot = uart_get_divisor(port, baud); spin_lock_irqsave(&sport->port.lock, flags); sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS); sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR); if (termios->c_iflag & INPCK) sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE); if (termios->c_iflag & (BRKINT | PARMRK)) sport->port.read_status_mask |= UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB); /* * Characters to ignore */ sport->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) sport->port.ignore_status_mask |= UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE); if (termios->c_iflag & IGNBRK) { sport->port.ignore_status_mask |= UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB); /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) sport->port.ignore_status_mask |= UTSR1_TO_SM(UTSR1_ROR); } del_timer_sync(&sport->timer); /* * Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); /* * disable interrupts and drain transmitter */ old_utcr3 = UART_GET_UTCR3(sport); UART_PUT_UTCR3(sport, old_utcr3 & ~(UTCR3_RIE | UTCR3_TIE)); while (UART_GET_UTSR1(sport) & UTSR1_TBY) barrier(); /* then, disable everything */ UART_PUT_UTCR3(sport, 0); /* set the parity, stop bits and data size */ UART_PUT_UTCR0(sport, utcr0); /* set the baud rate */ quot -= 1; UART_PUT_UTCR1(sport, ((quot & 0xf00) >> 8)); UART_PUT_UTCR2(sport, (quot & 0xff)); UART_PUT_UTSR0(sport, -1); UART_PUT_UTCR3(sport, old_utcr3); if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) sa1100_enable_ms(&sport->port); spin_unlock_irqrestore(&sport->port.lock, flags); } static const char *sa1100_type(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; return sport->port.type == PORT_SA1100 ? "SA1100" : NULL; } /* * Release the memory region(s) being used by 'port'. */ static void sa1100_release_port(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; release_mem_region(sport->port.mapbase, UART_PORT_SIZE); } /* * Request the memory region(s) being used by 'port'. */ static int sa1100_request_port(struct uart_port *port) { struct sa1100_port *sport = (struct sa1100_port *)port; return request_mem_region(sport->port.mapbase, UART_PORT_SIZE, "sa11x0-uart") != NULL ? 0 : -EBUSY; } /* * Configure/autoconfigure the port. */ static void sa1100_config_port(struct uart_port *port, int flags) { struct sa1100_port *sport = (struct sa1100_port *)port; if (flags & UART_CONFIG_TYPE && sa1100_request_port(&sport->port) == 0) sport->port.type = PORT_SA1100; } /* * Verify the new serial_struct (for TIOCSSERIAL). * The only change we allow are to the flags and type, and * even then only between PORT_SA1100 and PORT_UNKNOWN */ static int sa1100_verify_port(struct uart_port *port, struct serial_struct *ser) { struct sa1100_port *sport = (struct sa1100_port *)port; int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_SA1100) ret = -EINVAL; if (sport->port.irq != ser->irq) ret = -EINVAL; if (ser->io_type != SERIAL_IO_MEM) ret = -EINVAL; if (sport->port.uartclk / 16 != ser->baud_base) ret = -EINVAL; if ((void *)sport->port.mapbase != ser->iomem_base) ret = -EINVAL; if (sport->port.iobase != ser->port) ret = -EINVAL; if (ser->hub6 != 0) ret = -EINVAL; return ret; } static struct uart_ops sa1100_pops = { .tx_empty = sa1100_tx_empty, .set_mctrl = sa1100_set_mctrl, .get_mctrl = sa1100_get_mctrl, .stop_tx = sa1100_stop_tx, .start_tx = sa1100_start_tx, .stop_rx = sa1100_stop_rx, .enable_ms = sa1100_enable_ms, .break_ctl = sa1100_break_ctl, .startup = sa1100_startup, .shutdown = sa1100_shutdown, .set_termios = sa1100_set_termios, .type = sa1100_type, .release_port = sa1100_release_port, .request_port = sa1100_request_port, .config_port = sa1100_config_port, .verify_port = sa1100_verify_port, }; static struct sa1100_port sa1100_ports[NR_PORTS]; /* * Setup the SA1100 serial ports. Note that we don't include the IrDA * port here since we have our own SIR/FIR driver (see drivers/net/irda) * * Note also that we support "console=ttySAx" where "x" is either 0 or 1. * Which serial port this ends up being depends on the machine you're * running this kernel on. I'm not convinced that this is a good idea, * but that's the way it traditionally works. * * Note that NanoEngine UART3 becomes UART2, and UART2 is no longer * used here. */ static void __init sa1100_init_ports(void) { static int first = 1; int i; if (!first) return; first = 0; for (i = 0; i < NR_PORTS; i++) { sa1100_ports[i].port.uartclk = 3686400; sa1100_ports[i].port.ops = &sa1100_pops; sa1100_ports[i].port.fifosize = 8; sa1100_ports[i].port.line = i; sa1100_ports[i].port.iotype = UPIO_MEM; init_timer(&sa1100_ports[i].timer); sa1100_ports[i].timer.function = sa1100_timeout; sa1100_ports[i].timer.data = (unsigned long)&sa1100_ports[i]; } /* * make transmit lines outputs, so that when the port * is closed, the output is in the MARK state. */ PPDR |= PPC_TXD1 | PPC_TXD3; PPSR |= PPC_TXD1 | PPC_TXD3; } void __devinit sa1100_register_uart_fns(struct sa1100_port_fns *fns) { if (fns->get_mctrl) sa1100_pops.get_mctrl = fns->get_mctrl; if (fns->set_mctrl) sa1100_pops.set_mctrl = fns->set_mctrl; sa1100_pops.pm = fns->pm; sa1100_pops.set_wake = fns->set_wake; } void __init sa1100_register_uart(int idx, int port) { if (idx >= NR_PORTS) { printk(KERN_ERR "%s: bad index number %d\n", __func__, idx); return; } switch (port) { case 1: sa1100_ports[idx].port.membase = (void __iomem *)&Ser1UTCR0; sa1100_ports[idx].port.mapbase = _Ser1UTCR0; sa1100_ports[idx].port.irq = IRQ_Ser1UART; sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF; break; case 2: sa1100_ports[idx].port.membase = (void __iomem *)&Ser2UTCR0; sa1100_ports[idx].port.mapbase = _Ser2UTCR0; sa1100_ports[idx].port.irq = IRQ_Ser2ICP; sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF; break; case 3: sa1100_ports[idx].port.membase = (void __iomem *)&Ser3UTCR0; sa1100_ports[idx].port.mapbase = _Ser3UTCR0; sa1100_ports[idx].port.irq = IRQ_Ser3UART; sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF; break; default: printk(KERN_ERR "%s: bad port number %d\n", __func__, port); } } #ifdef CONFIG_SERIAL_SA1100_CONSOLE static void sa1100_console_putchar(struct uart_port *port, int ch) { struct sa1100_port *sport = (struct sa1100_port *)port; while (!(UART_GET_UTSR1(sport) & UTSR1_TNF)) barrier(); UART_PUT_CHAR(sport, ch); } /* * Interrupts are disabled on entering */ static void sa1100_console_write(struct console *co, const char *s, unsigned int count) { struct sa1100_port *sport = &sa1100_ports[co->index]; unsigned int old_utcr3, status; /* * First, save UTCR3 and then disable interrupts */ old_utcr3 = UART_GET_UTCR3(sport); UART_PUT_UTCR3(sport, (old_utcr3 & ~(UTCR3_RIE | UTCR3_TIE)) | UTCR3_TXE); uart_console_write(&sport->port, s, count, sa1100_console_putchar); /* * Finally, wait for transmitter to become empty * and restore UTCR3 */ do { status = UART_GET_UTSR1(sport); } while (status & UTSR1_TBY); UART_PUT_UTCR3(sport, old_utcr3); } /* * If the port was already initialised (eg, by a boot loader), * try to determine the current setup. */ static void __init sa1100_console_get_options(struct sa1100_port *sport, int *baud, int *parity, int *bits) { unsigned int utcr3; utcr3 = UART_GET_UTCR3(sport) & (UTCR3_RXE | UTCR3_TXE); if (utcr3 == (UTCR3_RXE | UTCR3_TXE)) { /* ok, the port was enabled */ unsigned int utcr0, quot; utcr0 = UART_GET_UTCR0(sport); *parity = 'n'; if (utcr0 & UTCR0_PE) { if (utcr0 & UTCR0_OES) *parity = 'e'; else *parity = 'o'; } if (utcr0 & UTCR0_DSS) *bits = 8; else *bits = 7; quot = UART_GET_UTCR2(sport) | UART_GET_UTCR1(sport) << 8; quot &= 0xfff; *baud = sport->port.uartclk / (16 * (quot + 1)); } } static int __init sa1100_console_setup(struct console *co, char *options) { struct sa1100_port *sport; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (co->index == -1 || co->index >= NR_PORTS) co->index = 0; sport = &sa1100_ports[co->index]; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else sa1100_console_get_options(sport, &baud, &parity, &bits); return uart_set_options(&sport->port, co, baud, parity, bits, flow); } static struct uart_driver sa1100_reg; static struct console sa1100_console = { .name = "ttySA", .write = sa1100_console_write, .device = uart_console_device, .setup = sa1100_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &sa1100_reg, }; static int __init sa1100_rs_console_init(void) { sa1100_init_ports(); register_console(&sa1100_console); return 0; } console_initcall(sa1100_rs_console_init); #define SA1100_CONSOLE &sa1100_console #else #define SA1100_CONSOLE NULL #endif static struct uart_driver sa1100_reg = { .owner = THIS_MODULE, .driver_name = "ttySA", .dev_name = "ttySA", .major = SERIAL_SA1100_MAJOR, .minor = MINOR_START, .nr = NR_PORTS, .cons = SA1100_CONSOLE, }; static int sa1100_serial_suspend(struct platform_device *dev, pm_message_t state) { struct sa1100_port *sport = platform_get_drvdata(dev); if (sport) uart_suspend_port(&sa1100_reg, &sport->port); return 0; } static int sa1100_serial_resume(struct platform_device *dev) { struct sa1100_port *sport = platform_get_drvdata(dev); if (sport) uart_resume_port(&sa1100_reg, &sport->port); return 0; } static int sa1100_serial_probe(struct platform_device *dev) { struct resource *res = dev->resource; int i; for (i = 0; i < dev->num_resources; i++, res++) if (res->flags & IORESOURCE_MEM) break; if (i < dev->num_resources) { for (i = 0; i < NR_PORTS; i++) { if (sa1100_ports[i].port.mapbase != res->start) continue; sa1100_ports[i].port.dev = &dev->dev; uart_add_one_port(&sa1100_reg, &sa1100_ports[i].port); platform_set_drvdata(dev, &sa1100_ports[i]); break; } } return 0; } static int sa1100_serial_remove(struct platform_device *pdev) { struct sa1100_port *sport = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); if (sport) uart_remove_one_port(&sa1100_reg, &sport->port); return 0; } static struct platform_driver sa11x0_serial_driver = { .probe = sa1100_serial_probe, .remove = sa1100_serial_remove, .suspend = sa1100_serial_suspend, .resume = sa1100_serial_resume, .driver = { .name = "sa11x0-uart", .owner = THIS_MODULE, }, }; static int __init sa1100_serial_init(void) { int ret; printk(KERN_INFO "Serial: SA11x0 driver\n"); sa1100_init_ports(); ret = uart_register_driver(&sa1100_reg); if (ret == 0) { ret = platform_driver_register(&sa11x0_serial_driver); if (ret) uart_unregister_driver(&sa1100_reg); } return ret; } static void __exit sa1100_serial_exit(void) { platform_driver_unregister(&sa11x0_serial_driver); uart_unregister_driver(&sa1100_reg); } module_init(sa1100_serial_init); module_exit(sa1100_serial_exit); MODULE_AUTHOR("Deep Blue Solutions Ltd"); MODULE_DESCRIPTION("SA1100 generic serial port driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_SA1100_MAJOR); MODULE_ALIAS("platform:sa11x0-uart");
gpl-2.0
NamanArora/flamingo_kernel
drivers/gpio/gpio-rdc321x.c
4974
6392
/* * RDC321x GPIO driver * * Copyright (C) 2008, Volker Weiss <dev@tintuc.de> * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/gpio.h> #include <linux/mfd/rdc321x.h> #include <linux/slab.h> struct rdc321x_gpio { spinlock_t lock; struct pci_dev *sb_pdev; u32 data_reg[2]; int reg1_ctrl_base; int reg1_data_base; int reg2_ctrl_base; int reg2_data_base; struct gpio_chip chip; }; /* read GPIO pin */ static int rdc_gpio_get_value(struct gpio_chip *chip, unsigned gpio) { struct rdc321x_gpio *gpch; u32 value = 0; int reg; gpch = container_of(chip, struct rdc321x_gpio, chip); reg = gpio < 32 ? gpch->reg1_data_base : gpch->reg2_data_base; spin_lock(&gpch->lock); pci_write_config_dword(gpch->sb_pdev, reg, gpch->data_reg[gpio < 32 ? 0 : 1]); pci_read_config_dword(gpch->sb_pdev, reg, &value); spin_unlock(&gpch->lock); return (1 << (gpio & 0x1f)) & value ? 1 : 0; } static void rdc_gpio_set_value_impl(struct gpio_chip *chip, unsigned gpio, int value) { struct rdc321x_gpio *gpch; int reg = (gpio < 32) ? 0 : 1; gpch = container_of(chip, struct rdc321x_gpio, chip); if (value) gpch->data_reg[reg] |= 1 << (gpio & 0x1f); else gpch->data_reg[reg] &= ~(1 << (gpio & 0x1f)); pci_write_config_dword(gpch->sb_pdev, reg ? gpch->reg2_data_base : gpch->reg1_data_base, gpch->data_reg[reg]); } /* set GPIO pin to value */ static void rdc_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) { struct rdc321x_gpio *gpch; gpch = container_of(chip, struct rdc321x_gpio, chip); spin_lock(&gpch->lock); rdc_gpio_set_value_impl(chip, gpio, value); spin_unlock(&gpch->lock); } static int rdc_gpio_config(struct gpio_chip *chip, unsigned gpio, int value) { struct rdc321x_gpio *gpch; int err; u32 reg; gpch = container_of(chip, struct rdc321x_gpio, chip); spin_lock(&gpch->lock); err = pci_read_config_dword(gpch->sb_pdev, gpio < 32 ? gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, &reg); if (err) goto unlock; reg |= 1 << (gpio & 0x1f); err = pci_write_config_dword(gpch->sb_pdev, gpio < 32 ? gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, reg); if (err) goto unlock; rdc_gpio_set_value_impl(chip, gpio, value); unlock: spin_unlock(&gpch->lock); return err; } /* configure GPIO pin as input */ static int rdc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) { return rdc_gpio_config(chip, gpio, 1); } /* * Cache the initial value of both GPIO data registers */ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev) { int err; struct resource *r; struct rdc321x_gpio *rdc321x_gpio_dev; struct rdc321x_gpio_pdata *pdata; pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "no platform data supplied\n"); return -ENODEV; } rdc321x_gpio_dev = kzalloc(sizeof(struct rdc321x_gpio), GFP_KERNEL); if (!rdc321x_gpio_dev) { dev_err(&pdev->dev, "failed to allocate private data\n"); return -ENOMEM; } r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg1"); if (!r) { dev_err(&pdev->dev, "failed to get gpio-reg1 resource\n"); err = -ENODEV; goto out_free; } spin_lock_init(&rdc321x_gpio_dev->lock); rdc321x_gpio_dev->sb_pdev = pdata->sb_pdev; rdc321x_gpio_dev->reg1_ctrl_base = r->start; rdc321x_gpio_dev->reg1_data_base = r->start + 0x4; r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg2"); if (!r) { dev_err(&pdev->dev, "failed to get gpio-reg2 resource\n"); err = -ENODEV; goto out_free; } rdc321x_gpio_dev->reg2_ctrl_base = r->start; rdc321x_gpio_dev->reg2_data_base = r->start + 0x4; rdc321x_gpio_dev->chip.label = "rdc321x-gpio"; rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input; rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config; rdc321x_gpio_dev->chip.get = rdc_gpio_get_value; rdc321x_gpio_dev->chip.set = rdc_gpio_set_value; rdc321x_gpio_dev->chip.base = 0; rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios; platform_set_drvdata(pdev, rdc321x_gpio_dev); /* This might not be, what others (BIOS, bootloader, etc.) wrote to these registers before, but it's a good guess. Still better than just using 0xffffffff. */ err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev, rdc321x_gpio_dev->reg1_data_base, &rdc321x_gpio_dev->data_reg[0]); if (err) goto out_drvdata; err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev, rdc321x_gpio_dev->reg2_data_base, &rdc321x_gpio_dev->data_reg[1]); if (err) goto out_drvdata; dev_info(&pdev->dev, "registering %d GPIOs\n", rdc321x_gpio_dev->chip.ngpio); return gpiochip_add(&rdc321x_gpio_dev->chip); out_drvdata: platform_set_drvdata(pdev, NULL); out_free: kfree(rdc321x_gpio_dev); return err; } static int __devexit rdc321x_gpio_remove(struct platform_device *pdev) { int ret; struct rdc321x_gpio *rdc321x_gpio_dev = platform_get_drvdata(pdev); ret = gpiochip_remove(&rdc321x_gpio_dev->chip); if (ret) dev_err(&pdev->dev, "failed to unregister chip\n"); kfree(rdc321x_gpio_dev); platform_set_drvdata(pdev, NULL); return ret; } static struct platform_driver rdc321x_gpio_driver = { .driver.name = "rdc321x-gpio", .driver.owner = THIS_MODULE, .probe = rdc321x_gpio_probe, .remove = __devexit_p(rdc321x_gpio_remove), }; module_platform_driver(rdc321x_gpio_driver); MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); MODULE_DESCRIPTION("RDC321x GPIO driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:rdc321x-gpio");
gpl-2.0
Leoyzen/Charm-Eye
drivers/misc/atmel-ssc.c
5230
3647
/* * Atmel SSC driver * * Copyright (C) 2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/platform_device.h> #include <linux/list.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/atmel-ssc.h> #include <linux/slab.h> #include <linux/module.h> /* Serialize access to ssc_list and user count */ static DEFINE_SPINLOCK(user_lock); static LIST_HEAD(ssc_list); struct ssc_device *ssc_request(unsigned int ssc_num) { int ssc_valid = 0; struct ssc_device *ssc; spin_lock(&user_lock); list_for_each_entry(ssc, &ssc_list, list) { if (ssc->pdev->id == ssc_num) { ssc_valid = 1; break; } } if (!ssc_valid) { spin_unlock(&user_lock); pr_err("ssc: ssc%d platform device is missing\n", ssc_num); return ERR_PTR(-ENODEV); } if (ssc->user) { spin_unlock(&user_lock); dev_dbg(&ssc->pdev->dev, "module busy\n"); return ERR_PTR(-EBUSY); } ssc->user++; spin_unlock(&user_lock); clk_enable(ssc->clk); return ssc; } EXPORT_SYMBOL(ssc_request); void ssc_free(struct ssc_device *ssc) { spin_lock(&user_lock); if (ssc->user) { ssc->user--; clk_disable(ssc->clk); } else { dev_dbg(&ssc->pdev->dev, "device already free\n"); } spin_unlock(&user_lock); } EXPORT_SYMBOL(ssc_free); static int __init ssc_probe(struct platform_device *pdev) { int retval = 0; struct resource *regs; struct ssc_device *ssc; ssc = kzalloc(sizeof(struct ssc_device), GFP_KERNEL); if (!ssc) { dev_dbg(&pdev->dev, "out of memory\n"); retval = -ENOMEM; goto out; } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_dbg(&pdev->dev, "no mmio resource defined\n"); retval = -ENXIO; goto out_free; } ssc->clk = clk_get(&pdev->dev, "pclk"); if (IS_ERR(ssc->clk)) { dev_dbg(&pdev->dev, "no pclk clock defined\n"); retval = -ENXIO; goto out_free; } ssc->pdev = pdev; ssc->regs = ioremap(regs->start, resource_size(regs)); if (!ssc->regs) { dev_dbg(&pdev->dev, "ioremap failed\n"); retval = -EINVAL; goto out_clk; } /* disable all interrupts */ clk_enable(ssc->clk); ssc_writel(ssc->regs, IDR, ~0UL); ssc_readl(ssc->regs, SR); clk_disable(ssc->clk); ssc->irq = platform_get_irq(pdev, 0); if (!ssc->irq) { dev_dbg(&pdev->dev, "could not get irq\n"); retval = -ENXIO; goto out_unmap; } spin_lock(&user_lock); list_add_tail(&ssc->list, &ssc_list); spin_unlock(&user_lock); platform_set_drvdata(pdev, ssc); dev_info(&pdev->dev, "Atmel SSC device at 0x%p (irq %d)\n", ssc->regs, ssc->irq); goto out; out_unmap: iounmap(ssc->regs); out_clk: clk_put(ssc->clk); out_free: kfree(ssc); out: return retval; } static int __devexit ssc_remove(struct platform_device *pdev) { struct ssc_device *ssc = platform_get_drvdata(pdev); spin_lock(&user_lock); iounmap(ssc->regs); clk_put(ssc->clk); list_del(&ssc->list); kfree(ssc); spin_unlock(&user_lock); return 0; } static struct platform_driver ssc_driver = { .remove = __devexit_p(ssc_remove), .driver = { .name = "ssc", .owner = THIS_MODULE, }, }; static int __init ssc_init(void) { return platform_driver_probe(&ssc_driver, ssc_probe); } module_init(ssc_init); static void __exit ssc_exit(void) { platform_driver_unregister(&ssc_driver); } module_exit(ssc_exit); MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>"); MODULE_DESCRIPTION("SSC driver for Atmel AVR32 and AT91"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ssc");
gpl-2.0
BlackBox-Kernel/blackbox_sprout_kk
drivers/infiniband/hw/qib/qib_pio_copy.c
14702
2273
/* * Copyright (c) 2009 QLogic Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "qib.h" /** * qib_pio_copy - copy data to MMIO space, in multiples of 32-bits * @to: destination, in MMIO space (must be 64-bit aligned) * @from: source (must be 64-bit aligned) * @count: number of 32-bit quantities to copy * * Copy data from kernel space to MMIO space, in multiples of 32 bits at a * time. Order of access is not guaranteed, nor is a memory barrier * performed afterwards. */ void qib_pio_copy(void __iomem *to, const void *from, size_t count) { #ifdef CONFIG_64BIT u64 __iomem *dst = to; const u64 *src = from; const u64 *end = src + (count >> 1); while (src < end) __raw_writeq(*src++, dst++); if (count & 1) __raw_writel(*(const u32 *)src, dst); #else u32 __iomem *dst = to; const u32 *src = from; const u32 *end = src + count; while (src < end) __raw_writel(*src++, dst++); #endif }
gpl-2.0
hastalafiesta/Samsung_STE_Kernel
drivers/hwmon/abx500.c
111
21967
/* * Copyright (C) ST-Ericsson SA 2010 * Author: Martin Persson <martin.persson@stericsson.com> for * ST-Ericsson. * License terms: GNU Gereral Public License (GPL) version 2 * * Note: * * ABX500 does not provide auto ADC, so to monitor the required * temperatures, a periodic work is used. It is more important * to not wake up the CPU than to perform this job, hence the use * of a deferred delay. * * A deferred delay for thermal monitor is considered safe because: * If the chip gets too hot during a sleep state it's most likely * due to external factors, such as the surrounding temperature. * I.e. no SW decisions will make any difference. * * If/when the ABX500 thermal warning temperature is reached (threshold * cannot be changed by SW), an interrupt is set and the driver * notifies user space via a sysfs event. * * If/when ABX500 thermal shutdown temperature is reached a hardware * shutdown of the ABX500 will occur. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/hwmon.h> #include <linux/sysfs.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include <linux/pm.h> #include "abx500.h" #define DEFAULT_MONITOR_DELAY 1000 /* * Thresholds are considered inactive if set to 0. * To avoid confusion for user space applications, * the temp monitor delay is set to 0 if all thresholds * are 0. */ static bool find_active_thresholds(struct abx500_temp *data) { int i; for (i = 0; i < data->monitored_sensors; i++) if (data->max[i] != 0 || data->max_hyst[i] != 0 || data->min[i] != 0) return true; dev_dbg(&data->pdev->dev, "No active thresholds," "cancel deferred job (if it exists)" "and reset temp monitor delay\n"); cancel_delayed_work_sync(&data->work); data->work_active = false; return false; } static inline void schedule_monitor(struct abx500_temp *data) { unsigned long delay_in_jiffies; delay_in_jiffies = msecs_to_jiffies(data->gpadc_monitor_delay); data->work_active = true; schedule_delayed_work(&data->work, delay_in_jiffies); } static inline void gpadc_monitor_exit(struct abx500_temp *data) { cancel_delayed_work_sync(&data->work); data->work_active = false; } static void gpadc_monitor(struct work_struct *work) { unsigned long delay_in_jiffies; int val, i, ret; /* Container for alarm node name */ char alarm_node[30]; bool updated_min_alarm = false; bool updated_max_alarm = false; bool updated_max_hyst_alarm = false; struct abx500_temp *data = container_of(work, struct abx500_temp, work.work); for (i = 0; i < data->monitored_sensors; i++) { /* Thresholds are considered inactive if set to 0 */ if (data->max[i] == 0 && data->max_hyst[i] == 0 && data->min[i] == 0) continue; val = data->ops.read_sensor(data, data->gpadc_addr[i]); if (val < 0) { dev_err(&data->pdev->dev, "GPADC read failed\n"); continue; } mutex_lock(&data->lock); if (data->min[i] != 0) { if (val < data->min[i]) { if (data->min_alarm[i] == 0) { data->min_alarm[i] = 1; updated_min_alarm = true; } } else { if (data->min_alarm[i] == 1) { data->min_alarm[i] = 0; updated_min_alarm = true; } } } if (data->max[i] != 0) { if (val > data->max[i]) { if (data->max_alarm[i] == 0) { data->max_alarm[i] = 1; updated_max_alarm = true; } } else { if (data->max_alarm[i] == 1) { data->max_alarm[i] = 0; updated_max_alarm = true; } } } if (data->max_hyst[i] != 0) { if (val > data->max_hyst[i]) { if (data->max_hyst_alarm[i] == 0) { data->max_hyst_alarm[i] = 1; updated_max_hyst_alarm = true; } } else { if (data->max_hyst_alarm[i] == 1) { data->max_hyst_alarm[i] = 0; updated_max_hyst_alarm = true; } } } mutex_unlock(&data->lock); /* hwmon attr index starts at 1, thus "i+1" below */ if (updated_min_alarm) { ret = snprintf(alarm_node, 16, "temp%d_min_alarm", (i + 1)); if (ret < 0) { dev_err(&data->pdev->dev, "Unable to update alarm node (%d)", ret); break; } sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node); } if (updated_max_alarm) { ret = snprintf(alarm_node, 16, "temp%d_max_alarm", (i + 1)); if (ret < 0) { dev_err(&data->pdev->dev, "Unable to update alarm node (%d)", ret); break; } hwmon_notify(data->max_alarm[i], NULL); sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node); } if (updated_max_hyst_alarm) { ret = snprintf(alarm_node, 21, "temp%d_max_hyst_alarm", (i + 1)); if (ret < 0) { dev_err(&data->pdev->dev, "Unable to update alarm node (%d)", ret); break; } sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node); } } delay_in_jiffies = msecs_to_jiffies(data->gpadc_monitor_delay); data->work_active = true; schedule_delayed_work(&data->work, delay_in_jiffies); } static ssize_t set_temp_monitor_delay(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int res; unsigned long delay_in_s; struct abx500_temp *data = dev_get_drvdata(dev); res = strict_strtoul(buf, 10, &delay_in_s); if (res < 0) return res; mutex_lock(&data->lock); data->gpadc_monitor_delay = delay_in_s * 1000; if (find_active_thresholds(data)) schedule_monitor(data); mutex_unlock(&data->lock); return count; } static ssize_t set_temp_power_off_delay(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int res; unsigned long delay_in_s; struct abx500_temp *data = dev_get_drvdata(dev); res = strict_strtoul(buf, 10, &delay_in_s); if (res < 0) return res; mutex_lock(&data->lock); data->power_off_delay = delay_in_s * 1000; mutex_unlock(&data->lock); return count; } static ssize_t show_temp_monitor_delay(struct device *dev, struct device_attribute *devattr, char *buf) { struct abx500_temp *data = dev_get_drvdata(dev); /* return time in s, not ms */ return sprintf(buf, "%lu\n", (data->gpadc_monitor_delay) / 1000); } static ssize_t show_temp_power_off_delay(struct device *dev, struct device_attribute *devattr, char *buf) { struct abx500_temp *data = dev_get_drvdata(dev); /* return time in s, not ms */ return sprintf(buf, "%lu\n", (data->power_off_delay) / 1000); } /* HWMON sysfs interface */ static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { /* * To avoid confusion between sensor label and chip name, the function * "show_label" is not used to return the chip name. */ struct abx500_temp *data = dev_get_drvdata(dev); return data->ops.show_name(dev, devattr, buf); } static ssize_t show_label(struct device *dev, struct device_attribute *devattr, char *buf) { struct abx500_temp *data = dev_get_drvdata(dev); return data->ops.show_label(dev, devattr, buf); } static ssize_t show_input(struct device *dev, struct device_attribute *devattr, char *buf) { int val; struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); /* hwmon attr index starts at 1, thus "attr->index-1" below */ u8 gpadc_addr = data->gpadc_addr[attr->index - 1]; val = data->ops.read_sensor(data, gpadc_addr); if (val < 0) dev_err(&data->pdev->dev, "GPADC read failed\n"); return sprintf(buf, "%d\n", val); } /* set functions (RW nodes) */ static ssize_t set_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { unsigned long val; struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int res = strict_strtoul(buf, 10, &val); if (res < 0) return res; mutex_lock(&data->lock); /* * Threshold is considered inactive if set to 0 * hwmon attr index starts at 1, thus "attr->index-1" below */ if (val == 0) data->min_alarm[attr->index - 1] = 0; data->min[attr->index - 1] = val; if (val == 0) (void) find_active_thresholds(data); else schedule_monitor(data); mutex_unlock(&data->lock); return count; } static ssize_t set_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { unsigned long val; struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int res = strict_strtoul(buf, 10, &val); if (res < 0) return res; mutex_lock(&data->lock); /* * Threshold is considered inactive if set to 0 * hwmon attr index starts at 1, thus "attr->index-1" below */ if (val == 0) data->max_alarm[attr->index - 1] = 0; data->max[attr->index - 1] = val; if (val == 0) (void) find_active_thresholds(data); else schedule_monitor(data); mutex_unlock(&data->lock); return count; } static ssize_t set_max_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { unsigned long val; struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int res = strict_strtoul(buf, 10, &val); if (res < 0) return res; mutex_lock(&data->lock); /* * Threshold is considered inactive if set to 0 * hwmon attr index starts at 1, thus "attr->index-1" below */ if (val == 0) data->max_hyst_alarm[attr->index - 1] = 0; data->max_hyst[attr->index - 1] = val; if (val == 0) (void) find_active_thresholds(data); else schedule_monitor(data); mutex_unlock(&data->lock); return count; } /* * show functions (RO nodes) */ static ssize_t show_min(struct device *dev, struct device_attribute *devattr, char *buf) { struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); /* hwmon attr index starts at 1, thus "attr->index-1" below */ return sprintf(buf, "%ld\n", data->min[attr->index - 1]); } static ssize_t show_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); /* hwmon attr index starts at 1, thus "attr->index-1" below */ return sprintf(buf, "%ld\n", data->max[attr->index - 1]); } static ssize_t show_max_hyst(struct device *dev, struct device_attribute *devattr, char *buf) { struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); /* hwmon attr index starts at 1, thus "attr->index-1" below */ return sprintf(buf, "%ld\n", data->max_hyst[attr->index - 1]); } /* Alarms */ static ssize_t show_min_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); /* hwmon attr index starts at 1, thus "attr->index-1" below */ return sprintf(buf, "%ld\n", data->min_alarm[attr->index - 1]); } static ssize_t show_max_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); /* hwmon attr index starts at 1, thus "attr->index-1" below */ return sprintf(buf, "%ld\n", data->max_alarm[attr->index - 1]); } static ssize_t show_max_hyst_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); /* hwmon attr index starts at 1, thus "attr->index-1" below */ return sprintf(buf, "%ld\n", data->max_hyst_alarm[attr->index - 1]); } static ssize_t show_crit_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); /* hwmon attr index starts at 1, thus "attr->index-1" below */ return sprintf(buf, "%ld\n", data->crit_alarm[attr->index - 1]); } static mode_t abx500_attrs_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct abx500_temp *data = dev_get_drvdata(dev); return data->ops.is_visible(a, n); } static SENSOR_DEVICE_ATTR(temp_monitor_delay, S_IRUGO | S_IWUSR, show_temp_monitor_delay, set_temp_monitor_delay, 0); static SENSOR_DEVICE_ATTR(temp_power_off_delay, S_IRUGO | S_IWUSR, show_temp_power_off_delay, set_temp_power_off_delay, 0); /* Chip name, required by hwmon*/ static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0); /* GPADC - SENSOR1 */ static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, 1); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 1); static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_min, set_min, 1); static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_max, set_max, 1); static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, show_max_hyst, set_max_hyst, 1); static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_min_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_max_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp1_max_hyst_alarm, S_IRUGO, show_max_hyst_alarm, NULL, 1); /* GPADC - SENSOR2 */ static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, show_label, NULL, 2); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2); static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min, set_min, 2); static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max, set_max, 2); static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IWUSR | S_IRUGO, show_max_hyst, set_max_hyst, 2); static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_min_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_max_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(temp2_max_hyst_alarm, S_IRUGO, show_max_hyst_alarm, NULL, 2); /* GPADC - SENSOR3 */ static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, show_label, NULL, 3); static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_input, NULL, 3); static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min, set_min, 3); static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max, set_max, 3); static SENSOR_DEVICE_ATTR(temp3_max_hyst, S_IWUSR | S_IRUGO, show_max_hyst, set_max_hyst, 3); static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_min_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_max_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(temp3_max_hyst_alarm, S_IRUGO, show_max_hyst_alarm, NULL, 3); /* GPADC - SENSOR4 */ static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, show_label, NULL, 4); static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_input, NULL, 4); static SENSOR_DEVICE_ATTR(temp4_min, S_IWUSR | S_IRUGO, show_min, set_min, 4); static SENSOR_DEVICE_ATTR(temp4_max, S_IWUSR | S_IRUGO, show_max, set_max, 4); static SENSOR_DEVICE_ATTR(temp4_max_hyst, S_IWUSR | S_IRUGO, show_max_hyst, set_max_hyst, 4); static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_min_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_max_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp4_max_hyst_alarm, S_IRUGO, show_max_hyst_alarm, NULL, 4); /* GPADC - SENSOR5 */ static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO, show_label, NULL, 5); static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_input, NULL, 5); static SENSOR_DEVICE_ATTR(temp5_min, S_IWUSR | S_IRUGO, show_min, set_min, 5); static SENSOR_DEVICE_ATTR(temp5_max, S_IWUSR | S_IRUGO, show_max, set_max, 5); static SENSOR_DEVICE_ATTR(temp5_max_hyst, S_IWUSR | S_IRUGO, show_max_hyst, set_max_hyst, 5); static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, show_min_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_max_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp5_max_hyst_alarm, S_IRUGO, show_max_hyst_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp5_crit_alarm, S_IRUGO, show_crit_alarm, NULL, 5); struct attribute *abx500_temp_attributes[] = { &sensor_dev_attr_name.dev_attr.attr, &sensor_dev_attr_temp_monitor_delay.dev_attr.attr, &sensor_dev_attr_temp_power_off_delay.dev_attr.attr, /* GPADC SENSOR1 */ &sensor_dev_attr_temp1_label.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_hyst_alarm.dev_attr.attr, /* GPADC SENSOR2 */ &sensor_dev_attr_temp2_label.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max_hyst_alarm.dev_attr.attr, /* GPADC SENSOR3 */ &sensor_dev_attr_temp3_label.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_max_hyst.dev_attr.attr, &sensor_dev_attr_temp3_min_alarm.dev_attr.attr, &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, &sensor_dev_attr_temp3_max_hyst_alarm.dev_attr.attr, /* GPADC SENSOR4 */ &sensor_dev_attr_temp4_label.dev_attr.attr, &sensor_dev_attr_temp4_input.dev_attr.attr, &sensor_dev_attr_temp4_min.dev_attr.attr, &sensor_dev_attr_temp4_max.dev_attr.attr, &sensor_dev_attr_temp4_max_hyst.dev_attr.attr, &sensor_dev_attr_temp4_min_alarm.dev_attr.attr, &sensor_dev_attr_temp4_max_alarm.dev_attr.attr, &sensor_dev_attr_temp4_max_hyst_alarm.dev_attr.attr, /* GPADC SENSOR5*/ &sensor_dev_attr_temp5_label.dev_attr.attr, &sensor_dev_attr_temp5_input.dev_attr.attr, &sensor_dev_attr_temp5_min.dev_attr.attr, &sensor_dev_attr_temp5_max.dev_attr.attr, &sensor_dev_attr_temp5_max_hyst.dev_attr.attr, &sensor_dev_attr_temp5_min_alarm.dev_attr.attr, &sensor_dev_attr_temp5_max_alarm.dev_attr.attr, &sensor_dev_attr_temp5_max_hyst_alarm.dev_attr.attr, &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr, NULL }; static const struct attribute_group abx500_temp_group = { .attrs = abx500_temp_attributes, .is_visible = abx500_attrs_visible, }; static irqreturn_t abx500_temp_irq_handler(int irq, void *irq_data) { struct platform_device *pdev = irq_data; struct abx500_temp *data = platform_get_drvdata(pdev); data->ops.irq_handler(irq, data); return IRQ_HANDLED; } static int setup_irqs(struct platform_device *pdev) { int ret; int irq = platform_get_irq_byname(pdev, "ABX500_TEMP_WARM"); if (irq < 0) dev_err(&pdev->dev, "Get irq by name failed\n"); ret = request_threaded_irq(irq, NULL, abx500_temp_irq_handler, IRQF_NO_SUSPEND, "abx500-temp", pdev); if (ret < 0) dev_err(&pdev->dev, "Request threaded irq failed (%d)\n", ret); return ret; } static int __devinit abx500_temp_probe(struct platform_device *pdev) { struct abx500_temp *data; int err; data = kzalloc(sizeof(struct abx500_temp), GFP_KERNEL); if (!data) return -ENOMEM; data->pdev = pdev; mutex_init(&data->lock); /* Chip specific initialization */ err = abx500_hwmon_init(data); if (err < 0) { dev_err(&pdev->dev, "abx500 init failed"); goto exit; } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); dev_err(&pdev->dev, "Class registration failed (%d)\n", err); goto exit; } INIT_DELAYED_WORK_DEFERRABLE(&data->work, gpadc_monitor); data->gpadc_monitor_delay = DEFAULT_MONITOR_DELAY; platform_set_drvdata(pdev, data); err = sysfs_create_group(&pdev->dev.kobj, &abx500_temp_group); if (err < 0) { dev_err(&pdev->dev, "Create sysfs group failed (%d)\n", err); goto exit_platform_data; } err = setup_irqs(pdev); if (err < 0) { dev_err(&pdev->dev, "irq setup failed (%d)\n", err); goto exit_sysfs_group; } return 0; exit_sysfs_group: sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group); exit_platform_data: hwmon_device_unregister(data->hwmon_dev); platform_set_drvdata(pdev, NULL); exit: kfree(data->gpadc_auto); kfree(data); return err; } static int __devexit abx500_temp_remove(struct platform_device *pdev) { struct abx500_temp *data = platform_get_drvdata(pdev); gpadc_monitor_exit(data); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group); platform_set_drvdata(pdev, NULL); kfree(data->gpadc_auto); kfree(data); return 0; } static int abx500_temp_suspend(struct platform_device *pdev, pm_message_t state) { struct abx500_temp *data = platform_get_drvdata(pdev); if (data->work_active) cancel_delayed_work_sync(&data->work); return 0; } static int abx500_temp_resume(struct platform_device *pdev) { struct abx500_temp *data = platform_get_drvdata(pdev); if (data->work_active) schedule_monitor(data); return 0; } static struct platform_driver abx500_temp_driver = { .driver = { .owner = THIS_MODULE, .name = "abx500-temp", }, .suspend = abx500_temp_suspend, .resume = abx500_temp_resume, .probe = abx500_temp_probe, .remove = __devexit_p(abx500_temp_remove), }; static int __init abx500_temp_init(void) { return platform_driver_register(&abx500_temp_driver); } static void __exit abx500_temp_exit(void) { platform_driver_unregister(&abx500_temp_driver); } MODULE_AUTHOR("Martin Persson <martin.persson@stericsson.com>"); MODULE_DESCRIPTION("ABX500 temperature driver"); MODULE_LICENSE("GPL"); module_init(abx500_temp_init) module_exit(abx500_temp_exit)
gpl-2.0
rofirrim/gcc-tiny
gcc/testsuite/gcc.target/mips/mips-3d-2.c
111
10025
/* { dg-do run } */ /* { dg-options "-mips3d" } */ /* Test MIPS-3D branch-if-any-two builtin functions */ #include <stdlib.h> #include <stdio.h> typedef float v2sf __attribute__ ((vector_size(8))); NOMIPS16 int test0 (v2sf a, v2sf b); NOMIPS16 int test1 (v2sf a, v2sf b); NOMIPS16 int test2 (v2sf a, v2sf b); NOMIPS16 int test3 (v2sf a, v2sf b); NOMIPS16 int test4 (v2sf a, v2sf b); NOMIPS16 int test5 (v2sf a, v2sf b); NOMIPS16 int test6 (v2sf a, v2sf b); NOMIPS16 int test7 (v2sf a, v2sf b); NOMIPS16 int test8 (v2sf a, v2sf b); NOMIPS16 int test9 (v2sf a, v2sf b); NOMIPS16 int test10 (v2sf a, v2sf b); NOMIPS16 int test11 (v2sf a, v2sf b); NOMIPS16 int test12 (v2sf a, v2sf b); NOMIPS16 int test13 (v2sf a, v2sf b); NOMIPS16 int test14 (v2sf a, v2sf b); NOMIPS16 int test15 (v2sf a, v2sf b); NOMIPS16 int test16 (v2sf a, v2sf b); NOMIPS16 int test17 (v2sf a, v2sf b); NOMIPS16 int test18 (v2sf a, v2sf b); NOMIPS16 int test19 (v2sf a, v2sf b); NOMIPS16 int test20 (v2sf a, v2sf b); NOMIPS16 int test21 (v2sf a, v2sf b); NOMIPS16 int test22 (v2sf a, v2sf b); NOMIPS16 int test23 (v2sf a, v2sf b); NOMIPS16 int test24 (v2sf a, v2sf b); NOMIPS16 int test25 (v2sf a, v2sf b); NOMIPS16 int test26 (v2sf a, v2sf b); NOMIPS16 int test27 (v2sf a, v2sf b); NOMIPS16 int test28 (v2sf a, v2sf b); NOMIPS16 int test29 (v2sf a, v2sf b); NOMIPS16 int test30 (v2sf a, v2sf b); NOMIPS16 int test31 (v2sf a, v2sf b); float qnan = 1.0f/0.0f - 1.0f/0.0f; NOMIPS16 int main () { v2sf a, b, c, d; int i, j; /* c.eq.ps */ a = (v2sf) {12, 34}; b = (v2sf) {56, 78}; i = 0; j = 0; if (__builtin_mips_any_c_eq_ps(a, b) != i) abort (); if (__builtin_mips_all_c_eq_ps(a, b) != j) abort (); /* c.eq.ps */ a = (v2sf) {12, 34}; b = (v2sf) {12, 78}; i = 1; j = 0; if (__builtin_mips_any_c_eq_ps(a, b) != i) abort (); if (__builtin_mips_all_c_eq_ps(a, b) != j) abort (); /* c.eq.ps */ a = (v2sf) {12, 34}; b = (v2sf) {56, 34}; i = 1; j = 0; if (__builtin_mips_any_c_eq_ps(a, b) != i) abort (); if (__builtin_mips_all_c_eq_ps(a, b) != j) abort (); /* c.eq.ps */ a = (v2sf) {12, 34}; b = (v2sf) {12, 34}; i = 1; j = 1; if (__builtin_mips_any_c_eq_ps(a, b) != i) abort (); if (__builtin_mips_all_c_eq_ps(a, b) != j) abort (); /* Test with 16 operators */ a = (v2sf) {10.58, 1984.0}; b = (v2sf) {567.345, 1984.0}; i = test0 (a, b); if (i != 0) abort (); i = test1 (a, b); if (i != 0) abort (); i = test2 (a, b); if (i != 0) abort (); i = test3 (a, b); if (i != 0) abort (); i = test4 (a, b); if (i != 1) abort (); i = test5 (a, b); if (i != 0) abort (); i = test6 (a, b); if (i != 1) abort (); i = test7 (a, b); if (i != 0) abort (); i = test8 (a, b); if (i != 1) abort (); i = test9 (a, b); if (i != 0) abort (); i = test10 (a, b); if (i != 1) abort (); i = test11 (a, b); if (i != 0) abort (); i = test12 (a, b); if (i != 1) abort (); i = test13 (a, b); if (i != 1) abort (); i = test14 (a, b); if (i != 1) abort (); i = test15 (a, b); if (i != 1) abort (); i = test16 (a, b); if (i != 0) abort (); i = test17 (a, b); if (i != 0) abort (); i = test18 (a, b); if (i != 0) abort (); i = test19 (a, b); if (i != 0) abort (); i = test20 (a, b); if (i != 1) abort (); i = test21 (a, b); if (i != 0) abort (); i = test22 (a, b); if (i != 1) abort (); i = test23 (a, b); if (i != 0) abort (); i = test24 (a, b); if (i != 1) abort (); i = test25 (a, b); if (i != 0) abort (); i = test26 (a, b); if (i != 1) abort (); i = test27 (a, b); if (i != 0) abort (); i = test28 (a, b); if (i != 1) abort (); i = test29 (a, b); if (i != 1) abort (); i = test30 (a, b); if (i != 1) abort (); i = test31 (a, b); if (i != 1) abort (); /* Reverse arguments */ i = test0 (b, a); if (i != 0) abort (); i = test1 (b, a); if (i != 0) abort (); i = test2 (b, a); if (i != 0) abort (); i = test3 (b, a); if (i != 0) abort (); i = test4 (b, a); if (i != 1) abort (); i = test5 (b, a); if (i != 0) abort (); i = test6 (b, a); if (i != 1) abort (); i = test7 (b, a); if (i != 0) abort (); i = test8 (b, a); if (i != 0) abort (); i = test9 (b, a); if (i != 0) abort (); i = test10 (b, a); if (i != 0) abort (); i = test11 (b, a); if (i != 0) abort (); i = test12 (b, a); if (i != 1) abort (); i = test13 (b, a); if (i != 0) abort (); i = test14 (b, a); if (i != 1) abort (); i = test15 (b, a); if (i != 0) abort (); i = test16 (b, a); if (i != 0) abort (); i = test17 (b, a); if (i != 0) abort (); i = test18 (b, a); if (i != 0) abort (); i = test19 (b, a); if (i != 0) abort (); i = test20 (b, a); if (i != 1) abort (); i = test21 (b, a); if (i != 0) abort (); i = test22 (b, a); if (i != 1) abort (); i = test23 (b, a); if (i != 0) abort (); i = test24 (b, a); if (i != 0) abort (); i = test25 (b, a); if (i != 0) abort (); i = test26 (b, a); if (i != 0) abort (); i = test27 (b, a); if (i != 0) abort (); i = test28 (b, a); if (i != 1) abort (); i = test29 (b, a); if (i != 0) abort (); i = test30 (b, a); if (i != 1) abort (); i = test31 (b, a); if (i != 0) abort (); #ifndef __FAST_MATH__ /* Test with 16 operators */ a = (v2sf) {qnan, qnan}; b = (v2sf) {567.345, 1984.0}; i = test0 (a, b); if (i != 0) abort (); i = test1 (a, b); if (i != 0) abort (); i = test2 (a, b); if (i != 1) abort (); i = test3 (a, b); if (i != 1) abort (); i = test4 (a, b); if (i != 0) abort (); i = test5 (a, b); if (i != 0) abort (); i = test6 (a, b); if (i != 1) abort (); i = test7 (a, b); if (i != 1) abort (); i = test8 (a, b); if (i != 0) abort (); i = test9 (a, b); if (i != 0) abort (); i = test10 (a, b); if (i != 1) abort (); i = test11 (a, b); if (i != 1) abort (); i = test12 (a, b); if (i != 0) abort (); i = test13 (a, b); if (i != 0) abort (); i = test14 (a, b); if (i != 1) abort (); i = test15 (a, b); if (i != 1) abort (); i = test16 (a, b); if (i != 0) abort (); i = test17 (a, b); if (i != 0) abort (); i = test18 (a, b); if (i != 1) abort (); i = test19 (a, b); if (i != 1) abort (); i = test20 (a, b); if (i != 0) abort (); i = test21 (a, b); if (i != 0) abort (); i = test22 (a, b); if (i != 1) abort (); i = test23 (a, b); if (i != 1) abort (); i = test24 (a, b); if (i != 0) abort (); i = test25 (a, b); if (i != 0) abort (); i = test26 (a, b); if (i != 1) abort (); i = test27 (a, b); if (i != 1) abort (); i = test28 (a, b); if (i != 0) abort (); i = test29 (a, b); if (i != 0) abort (); i = test30 (a, b); if (i != 1) abort (); i = test31 (a, b); if (i != 1) abort (); #endif printf ("Test Passes\n"); exit (0); } NOMIPS16 int test0 (v2sf a, v2sf b) { return __builtin_mips_any_c_f_ps (a, b); } NOMIPS16 int test1 (v2sf a, v2sf b) { return __builtin_mips_all_c_f_ps (a, b); } NOMIPS16 int test2 (v2sf a, v2sf b) { return __builtin_mips_any_c_un_ps (a, b); } NOMIPS16 int test3 (v2sf a, v2sf b) { return __builtin_mips_all_c_un_ps (a, b); } NOMIPS16 int test4 (v2sf a, v2sf b) { return __builtin_mips_any_c_eq_ps (a, b); } NOMIPS16 int test5 (v2sf a, v2sf b) { return __builtin_mips_all_c_eq_ps (a, b); } NOMIPS16 int test6 (v2sf a, v2sf b) { return __builtin_mips_any_c_ueq_ps (a, b); } NOMIPS16 int test7 (v2sf a, v2sf b) { return __builtin_mips_all_c_ueq_ps (a, b); } NOMIPS16 int test8 (v2sf a, v2sf b) { return __builtin_mips_any_c_olt_ps (a, b); } NOMIPS16 int test9 (v2sf a, v2sf b) { return __builtin_mips_all_c_olt_ps (a, b); } NOMIPS16 int test10 (v2sf a, v2sf b) { return __builtin_mips_any_c_ult_ps (a, b); } NOMIPS16 int test11 (v2sf a, v2sf b) { return __builtin_mips_all_c_ult_ps (a, b); } NOMIPS16 int test12 (v2sf a, v2sf b) { return __builtin_mips_any_c_ole_ps (a, b); } NOMIPS16 int test13 (v2sf a, v2sf b) { return __builtin_mips_all_c_ole_ps (a, b); } NOMIPS16 int test14 (v2sf a, v2sf b) { return __builtin_mips_any_c_ule_ps (a, b); } NOMIPS16 int test15 (v2sf a, v2sf b) { return __builtin_mips_all_c_ule_ps (a, b); } NOMIPS16 int test16 (v2sf a, v2sf b) { return __builtin_mips_any_c_sf_ps (a, b); } NOMIPS16 int test17 (v2sf a, v2sf b) { return __builtin_mips_all_c_sf_ps (a, b); } NOMIPS16 int test18 (v2sf a, v2sf b) { return __builtin_mips_any_c_ngle_ps (a, b); } NOMIPS16 int test19 (v2sf a, v2sf b) { return __builtin_mips_all_c_ngle_ps (a, b); } NOMIPS16 int test20 (v2sf a, v2sf b) { return __builtin_mips_any_c_seq_ps (a, b); } NOMIPS16 int test21 (v2sf a, v2sf b) { return __builtin_mips_all_c_seq_ps (a, b); } NOMIPS16 int test22 (v2sf a, v2sf b) { return __builtin_mips_any_c_ngl_ps (a, b); } NOMIPS16 int test23 (v2sf a, v2sf b) { return __builtin_mips_all_c_ngl_ps (a, b); } NOMIPS16 int test24 (v2sf a, v2sf b) { return __builtin_mips_any_c_lt_ps (a, b); } NOMIPS16 int test25 (v2sf a, v2sf b) { return __builtin_mips_all_c_lt_ps (a, b); } NOMIPS16 int test26 (v2sf a, v2sf b) { return __builtin_mips_any_c_nge_ps (a, b); } NOMIPS16 int test27 (v2sf a, v2sf b) { return __builtin_mips_all_c_nge_ps (a, b); } NOMIPS16 int test28 (v2sf a, v2sf b) { return __builtin_mips_any_c_le_ps (a, b); } NOMIPS16 int test29 (v2sf a, v2sf b) { return __builtin_mips_all_c_le_ps (a, b); } NOMIPS16 int test30 (v2sf a, v2sf b) { return __builtin_mips_any_c_ngt_ps (a, b); } NOMIPS16 int test31 (v2sf a, v2sf b) { return __builtin_mips_all_c_ngt_ps (a, b); }
gpl-2.0
yyu168/linux
security/smack/smack_access.c
111
17202
/* * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2. * * Author: * Casey Schaufler <casey@schaufler-ca.com> * */ #include <linux/types.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/sched.h> #include "smack.h" struct smack_known smack_known_huh = { .smk_known = "?", .smk_secid = 2, }; struct smack_known smack_known_hat = { .smk_known = "^", .smk_secid = 3, }; struct smack_known smack_known_star = { .smk_known = "*", .smk_secid = 4, }; struct smack_known smack_known_floor = { .smk_known = "_", .smk_secid = 5, }; struct smack_known smack_known_web = { .smk_known = "@", .smk_secid = 7, }; LIST_HEAD(smack_known_list); /* * The initial value needs to be bigger than any of the * known values above. */ static u32 smack_next_secid = 10; /* * what events do we log * can be overwritten at run-time by /smack/logging */ int log_policy = SMACK_AUDIT_DENIED; /** * smk_access_entry - look up matching access rule * @subject_label: a pointer to the subject's Smack label * @object_label: a pointer to the object's Smack label * @rule_list: the list of rules to search * * This function looks up the subject/object pair in the * access rule list and returns the access mode. If no * entry is found returns -ENOENT. * * NOTE: * * Earlier versions of this function allowed for labels that * were not on the label list. This was done to allow for * labels to come over the network that had never been seen * before on this host. Unless the receiving socket has the * star label this will always result in a failure check. The * star labeled socket case is now handled in the networking * hooks so there is no case where the label is not on the * label list. Checking to see if the address of two labels * is the same is now a reliable test. * * Do the object check first because that is more * likely to differ. * * Allowing write access implies allowing locking. */ int smk_access_entry(char *subject_label, char *object_label, struct list_head *rule_list) { int may = -ENOENT; struct smack_rule *srp; list_for_each_entry_rcu(srp, rule_list, list) { if (srp->smk_object->smk_known == object_label && srp->smk_subject->smk_known == subject_label) { may = srp->smk_access; break; } } /* * MAY_WRITE implies MAY_LOCK. */ if ((may & MAY_WRITE) == MAY_WRITE) may |= MAY_LOCK; return may; } /** * smk_access - determine if a subject has a specific access to an object * @subject: a pointer to the subject's Smack label entry * @object: a pointer to the object's Smack label entry * @request: the access requested, in "MAY" format * @a : a pointer to the audit data * * This function looks up the subject/object pair in the * access rule list and returns 0 if the access is permitted, * non zero otherwise. * * Smack labels are shared on smack_list */ int smk_access(struct smack_known *subject, struct smack_known *object, int request, struct smk_audit_info *a) { int may = MAY_NOT; int rc = 0; /* * Hardcoded comparisons. */ /* * A star subject can't access any object. */ if (subject == &smack_known_star) { rc = -EACCES; goto out_audit; } /* * An internet object can be accessed by any subject. * Tasks cannot be assigned the internet label. * An internet subject can access any object. */ if (object == &smack_known_web || subject == &smack_known_web) goto out_audit; /* * A star object can be accessed by any subject. */ if (object == &smack_known_star) goto out_audit; /* * An object can be accessed in any way by a subject * with the same label. */ if (subject->smk_known == object->smk_known) goto out_audit; /* * A hat subject can read or lock any object. * A floor object can be read or locked by any subject. */ if ((request & MAY_ANYREAD) == request || (request & MAY_LOCK) == request) { if (object == &smack_known_floor) goto out_audit; if (subject == &smack_known_hat) goto out_audit; } /* * Beyond here an explicit relationship is required. * If the requested access is contained in the available * access (e.g. read is included in readwrite) it's * good. A negative response from smk_access_entry() * indicates there is no entry for this pair. */ rcu_read_lock(); may = smk_access_entry(subject->smk_known, object->smk_known, &subject->smk_rules); rcu_read_unlock(); if (may <= 0 || (request & may) != request) { rc = -EACCES; goto out_audit; } #ifdef CONFIG_SECURITY_SMACK_BRINGUP /* * Return a positive value if using bringup mode. * This allows the hooks to identify checks that * succeed because of "b" rules. */ if (may & MAY_BRINGUP) rc = SMACK_BRINGUP_ALLOW; #endif out_audit: #ifdef CONFIG_SECURITY_SMACK_BRINGUP if (rc < 0) { if (object == smack_unconfined) rc = SMACK_UNCONFINED_OBJECT; if (subject == smack_unconfined) rc = SMACK_UNCONFINED_SUBJECT; } #endif #ifdef CONFIG_AUDIT if (a) smack_log(subject->smk_known, object->smk_known, request, rc, a); #endif return rc; } /** * smk_tskacc - determine if a task has a specific access to an object * @tsp: a pointer to the subject's task * @obj_known: a pointer to the object's label entry * @mode: the access requested, in "MAY" format * @a : common audit data * * This function checks the subject task's label/object label pair * in the access rule list and returns 0 if the access is permitted, * non zero otherwise. It allows that the task may have the capability * to override the rules. */ int smk_tskacc(struct task_smack *tsp, struct smack_known *obj_known, u32 mode, struct smk_audit_info *a) { struct smack_known *sbj_known = smk_of_task(tsp); int may; int rc; /* * Check the global rule list */ rc = smk_access(sbj_known, obj_known, mode, NULL); if (rc >= 0) { /* * If there is an entry in the task's rule list * it can further restrict access. */ may = smk_access_entry(sbj_known->smk_known, obj_known->smk_known, &tsp->smk_rules); if (may < 0) goto out_audit; if ((mode & may) == mode) goto out_audit; rc = -EACCES; } /* * Allow for priviliged to override policy. */ if (rc != 0 && smack_privileged(CAP_MAC_OVERRIDE)) rc = 0; out_audit: #ifdef CONFIG_AUDIT if (a) smack_log(sbj_known->smk_known, obj_known->smk_known, mode, rc, a); #endif return rc; } /** * smk_curacc - determine if current has a specific access to an object * @obj_known: a pointer to the object's Smack label entry * @mode: the access requested, in "MAY" format * @a : common audit data * * This function checks the current subject label/object label pair * in the access rule list and returns 0 if the access is permitted, * non zero otherwise. It allows that current may have the capability * to override the rules. */ int smk_curacc(struct smack_known *obj_known, u32 mode, struct smk_audit_info *a) { struct task_smack *tsp = current_security(); return smk_tskacc(tsp, obj_known, mode, a); } #ifdef CONFIG_AUDIT /** * smack_str_from_perm : helper to transalate an int to a * readable string * @string : the string to fill * @access : the int * */ static inline void smack_str_from_perm(char *string, int access) { int i = 0; if (access & MAY_READ) string[i++] = 'r'; if (access & MAY_WRITE) string[i++] = 'w'; if (access & MAY_EXEC) string[i++] = 'x'; if (access & MAY_APPEND) string[i++] = 'a'; if (access & MAY_TRANSMUTE) string[i++] = 't'; if (access & MAY_LOCK) string[i++] = 'l'; string[i] = '\0'; } /** * smack_log_callback - SMACK specific information * will be called by generic audit code * @ab : the audit_buffer * @a : audit_data * */ static void smack_log_callback(struct audit_buffer *ab, void *a) { struct common_audit_data *ad = a; struct smack_audit_data *sad = ad->smack_audit_data; audit_log_format(ab, "lsm=SMACK fn=%s action=%s", ad->smack_audit_data->function, sad->result ? "denied" : "granted"); audit_log_format(ab, " subject="); audit_log_untrustedstring(ab, sad->subject); audit_log_format(ab, " object="); audit_log_untrustedstring(ab, sad->object); if (sad->request[0] == '\0') audit_log_format(ab, " labels_differ"); else audit_log_format(ab, " requested=%s", sad->request); } /** * smack_log - Audit the granting or denial of permissions. * @subject_label : smack label of the requester * @object_label : smack label of the object being accessed * @request: requested permissions * @result: result from smk_access * @a: auxiliary audit data * * Audit the granting or denial of permissions in accordance * with the policy. */ void smack_log(char *subject_label, char *object_label, int request, int result, struct smk_audit_info *ad) { #ifdef CONFIG_SECURITY_SMACK_BRINGUP char request_buffer[SMK_NUM_ACCESS_TYPE + 5]; #else char request_buffer[SMK_NUM_ACCESS_TYPE + 1]; #endif struct smack_audit_data *sad; struct common_audit_data *a = &ad->a; /* check if we have to log the current event */ if (result < 0 && (log_policy & SMACK_AUDIT_DENIED) == 0) return; if (result == 0 && (log_policy & SMACK_AUDIT_ACCEPT) == 0) return; sad = a->smack_audit_data; if (sad->function == NULL) sad->function = "unknown"; /* end preparing the audit data */ smack_str_from_perm(request_buffer, request); sad->subject = subject_label; sad->object = object_label; #ifdef CONFIG_SECURITY_SMACK_BRINGUP /* * The result may be positive in bringup mode. * A positive result is an allow, but not for normal reasons. * Mark it as successful, but don't filter it out even if * the logging policy says to do so. */ if (result == SMACK_UNCONFINED_SUBJECT) strcat(request_buffer, "(US)"); else if (result == SMACK_UNCONFINED_OBJECT) strcat(request_buffer, "(UO)"); if (result > 0) result = 0; #endif sad->request = request_buffer; sad->result = result; common_lsm_audit(a, smack_log_callback, NULL); } #else /* #ifdef CONFIG_AUDIT */ void smack_log(char *subject_label, char *object_label, int request, int result, struct smk_audit_info *ad) { } #endif DEFINE_MUTEX(smack_known_lock); struct hlist_head smack_known_hash[SMACK_HASH_SLOTS]; /** * smk_insert_entry - insert a smack label into a hash map, * * this function must be called under smack_known_lock */ void smk_insert_entry(struct smack_known *skp) { unsigned int hash; struct hlist_head *head; hash = full_name_hash(NULL, skp->smk_known, strlen(skp->smk_known)); head = &smack_known_hash[hash & (SMACK_HASH_SLOTS - 1)]; hlist_add_head_rcu(&skp->smk_hashed, head); list_add_rcu(&skp->list, &smack_known_list); } /** * smk_find_entry - find a label on the list, return the list entry * @string: a text string that might be a Smack label * * Returns a pointer to the entry in the label list that * matches the passed string or NULL if not found. */ struct smack_known *smk_find_entry(const char *string) { unsigned int hash; struct hlist_head *head; struct smack_known *skp; hash = full_name_hash(NULL, string, strlen(string)); head = &smack_known_hash[hash & (SMACK_HASH_SLOTS - 1)]; hlist_for_each_entry_rcu(skp, head, smk_hashed) if (strcmp(skp->smk_known, string) == 0) return skp; return NULL; } /** * smk_parse_smack - parse smack label from a text string * @string: a text string that might contain a Smack label * @len: the maximum size, or zero if it is NULL terminated. * * Returns a pointer to the clean label or an error code. */ char *smk_parse_smack(const char *string, int len) { char *smack; int i; if (len <= 0) len = strlen(string) + 1; /* * Reserve a leading '-' as an indicator that * this isn't a label, but an option to interfaces * including /smack/cipso and /smack/cipso2 */ if (string[0] == '-') return ERR_PTR(-EINVAL); for (i = 0; i < len; i++) if (string[i] > '~' || string[i] <= ' ' || string[i] == '/' || string[i] == '"' || string[i] == '\\' || string[i] == '\'') break; if (i == 0 || i >= SMK_LONGLABEL) return ERR_PTR(-EINVAL); smack = kzalloc(i + 1, GFP_KERNEL); if (smack == NULL) return ERR_PTR(-ENOMEM); strncpy(smack, string, i); return smack; } /** * smk_netlbl_mls - convert a catset to netlabel mls categories * @catset: the Smack categories * @sap: where to put the netlabel categories * * Allocates and fills attr.mls * Returns 0 on success, error code on failure. */ int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap, int len) { unsigned char *cp; unsigned char m; int cat; int rc; int byte; sap->flags |= NETLBL_SECATTR_MLS_CAT; sap->attr.mls.lvl = level; sap->attr.mls.cat = NULL; for (cat = 1, cp = catset, byte = 0; byte < len; cp++, byte++) for (m = 0x80; m != 0; m >>= 1, cat++) { if ((m & *cp) == 0) continue; rc = netlbl_catmap_setbit(&sap->attr.mls.cat, cat, GFP_KERNEL); if (rc < 0) { netlbl_catmap_free(sap->attr.mls.cat); return rc; } } return 0; } /** * smk_import_entry - import a label, return the list entry * @string: a text string that might be a Smack label * @len: the maximum size, or zero if it is NULL terminated. * * Returns a pointer to the entry in the label list that * matches the passed string, adding it if necessary, * or an error code. */ struct smack_known *smk_import_entry(const char *string, int len) { struct smack_known *skp; char *smack; int slen; int rc; smack = smk_parse_smack(string, len); if (IS_ERR(smack)) return ERR_CAST(smack); mutex_lock(&smack_known_lock); skp = smk_find_entry(smack); if (skp != NULL) goto freeout; skp = kzalloc(sizeof(*skp), GFP_KERNEL); if (skp == NULL) { skp = ERR_PTR(-ENOMEM); goto freeout; } skp->smk_known = smack; skp->smk_secid = smack_next_secid++; skp->smk_netlabel.domain = skp->smk_known; skp->smk_netlabel.flags = NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL; /* * If direct labeling works use it. * Otherwise use mapped labeling. */ slen = strlen(smack); if (slen < SMK_CIPSOLEN) rc = smk_netlbl_mls(smack_cipso_direct, skp->smk_known, &skp->smk_netlabel, slen); else rc = smk_netlbl_mls(smack_cipso_mapped, (char *)&skp->smk_secid, &skp->smk_netlabel, sizeof(skp->smk_secid)); if (rc >= 0) { INIT_LIST_HEAD(&skp->smk_rules); mutex_init(&skp->smk_rules_lock); /* * Make sure that the entry is actually * filled before putting it on the list. */ smk_insert_entry(skp); goto unlockout; } /* * smk_netlbl_mls failed. */ kfree(skp); skp = ERR_PTR(rc); freeout: kfree(smack); unlockout: mutex_unlock(&smack_known_lock); return skp; } /** * smack_from_secid - find the Smack label associated with a secid * @secid: an integer that might be associated with a Smack label * * Returns a pointer to the appropriate Smack label entry if there is one, * otherwise a pointer to the invalid Smack label. */ struct smack_known *smack_from_secid(const u32 secid) { struct smack_known *skp; rcu_read_lock(); list_for_each_entry_rcu(skp, &smack_known_list, list) { if (skp->smk_secid == secid) { rcu_read_unlock(); return skp; } } /* * If we got this far someone asked for the translation * of a secid that is not on the list. */ rcu_read_unlock(); return &smack_known_huh; } /* * Unless a process is running with one of these labels * even having CAP_MAC_OVERRIDE isn't enough to grant * privilege to violate MAC policy. If no labels are * designated (the empty list case) capabilities apply to * everyone. */ LIST_HEAD(smack_onlycap_list); DEFINE_MUTEX(smack_onlycap_lock); /** * smack_privileged_cred - are all privilege requirements met by cred * @cap: The requested capability * @cred: the credential to use * * Is the task privileged and allowed to be privileged * by the onlycap rule. * * Returns true if the task is allowed to be privileged, false if it's not. */ bool smack_privileged_cred(int cap, const struct cred *cred) { struct task_smack *tsp = cred->security; struct smack_known *skp = tsp->smk_task; struct smack_known_list_elem *sklep; int rc; rc = cap_capable(cred, &init_user_ns, cap, SECURITY_CAP_AUDIT); if (rc) return false; rcu_read_lock(); if (list_empty(&smack_onlycap_list)) { rcu_read_unlock(); return true; } list_for_each_entry_rcu(sklep, &smack_onlycap_list, list) { if (sklep->smk_label == skp) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; } /** * smack_privileged - are all privilege requirements met * @cap: The requested capability * * Is the task privileged and allowed to be privileged * by the onlycap rule. * * Returns true if the task is allowed to be privileged, false if it's not. */ bool smack_privileged(int cap) { /* * All kernel tasks are privileged */ if (unlikely(current->flags & PF_KTHREAD)) return true; return smack_privileged_cred(cap, current_cred()); }
gpl-2.0
MoKee/android_kernel_lge_hammerheadcaf
sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
111
23431
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/slab.h> #include <sound/apr_audio-v2.h> #include <sound/q6asm-v2.h> #include <sound/compress_params.h> #include "msm-audio-effects-q6-v2.h" int msm_audio_effects_virtualizer_handler(struct audio_client *ac, struct virtualizer_params *virtualizer, long *values) { int devices = *values++; int num_commands = *values++; char *params; int *updt_params, i, prev_enable_flag; uint32_t params_length = (MAX_INBAND_PARAM_SZ); int rc = 0; pr_debug("%s\n", __func__); if (!ac) { pr_err("%s: cannot set audio effects\n", __func__); return -EINVAL; } params = kzalloc(params_length, GFP_KERNEL); if (!params) { pr_err("%s, params memory alloc failed\n", __func__); return -ENOMEM; } pr_debug("%s: device: %d\n", __func__, devices); updt_params = (int *)params; params_length = 0; for (i = 0; i < num_commands; i++) { uint32_t command_id = *values++; uint32_t command_config_state = *values++; uint32_t index_offset = *values++; uint32_t length = *values++; switch (command_id) { case VIRTUALIZER_ENABLE: if (length != 1 || index_offset != 0) { pr_err("VIRT ENABLE:invalid params\n"); rc = -EINVAL; goto invalid_config; } prev_enable_flag = virtualizer->enable_flag; virtualizer->enable_flag = *values++; pr_debug("%s:VIRT ENABLE prev:%d, new:%d\n", __func__, prev_enable_flag, virtualizer->enable_flag); if (prev_enable_flag != virtualizer->enable_flag) { *updt_params++ = AUDPROC_MODULE_ID_VIRTUALIZER; *updt_params++ = AUDPROC_PARAM_ID_VIRTUALIZER_ENABLE; *updt_params++ = VIRTUALIZER_ENABLE_PARAM_SZ; *updt_params++ = virtualizer->enable_flag; params_length += COMMAND_PAYLOAD_SZ + VIRTUALIZER_ENABLE_PARAM_SZ; } break; case VIRTUALIZER_STRENGTH: if (length != 1 || index_offset != 0) { pr_err("VIRT STRENGTH:invalid params\n"); rc = -EINVAL; goto invalid_config; } virtualizer->strength = *values++; pr_debug("%s: VIRT STRENGTH val: %d\n", __func__, virtualizer->strength); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_VIRTUALIZER; *updt_params++ = AUDPROC_PARAM_ID_VIRTUALIZER_STRENGTH; *updt_params++ = VIRTUALIZER_STRENGTH_PARAM_SZ; *updt_params++ = virtualizer->strength; params_length += COMMAND_PAYLOAD_SZ + VIRTUALIZER_STRENGTH_PARAM_SZ; } break; case VIRTUALIZER_OUT_TYPE: if (length != 1 || index_offset != 0) { pr_err("VIRT OUT_TYPE:invalid params\n"); rc = -EINVAL; goto invalid_config; } virtualizer->out_type = *values++; pr_debug("%s: VIRT OUT_TYPE val:%d\n", __func__, virtualizer->out_type); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_VIRTUALIZER; *updt_params++ = AUDPROC_PARAM_ID_VIRTUALIZER_OUT_TYPE; *updt_params++ = VIRTUALIZER_OUT_TYPE_PARAM_SZ; *updt_params++ = virtualizer->out_type; params_length += COMMAND_PAYLOAD_SZ + VIRTUALIZER_OUT_TYPE_PARAM_SZ; } break; case VIRTUALIZER_GAIN_ADJUST: if (length != 1 || index_offset != 0) { pr_err("VIRT GAIN_ADJUST: invalid params\n"); rc = -EINVAL; goto invalid_config; } virtualizer->gain_adjust = *values++; pr_debug("%s: VIRT GAIN_ADJUST val:%d\n", __func__, virtualizer->gain_adjust); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_VIRTUALIZER; *updt_params++ = AUDPROC_PARAM_ID_VIRTUALIZER_GAIN_ADJUST; *updt_params++ = VIRTUALIZER_GAIN_ADJUST_PARAM_SZ; *updt_params++ = virtualizer->gain_adjust; params_length += COMMAND_PAYLOAD_SZ + VIRTUALIZER_GAIN_ADJUST_PARAM_SZ; } break; default: pr_err("%s: Invalid command to set config\n", __func__); break; } } if (params_length) q6asm_send_audio_effects_params(ac, params, params_length); invalid_config: kfree(params); return rc; } int msm_audio_effects_reverb_handler(struct audio_client *ac, struct reverb_params *reverb, long *values) { int devices = *values++; int num_commands = *values++; char *params; int *updt_params, i, prev_enable_flag; uint32_t params_length = (MAX_INBAND_PARAM_SZ); int rc = 0; pr_debug("%s\n", __func__); if (!ac) { pr_err("%s: cannot set audio effects\n", __func__); return -EINVAL; } params = kzalloc(params_length, GFP_KERNEL); if (!params) { pr_err("%s, params memory alloc failed\n", __func__); return -ENOMEM; } pr_debug("%s: device: %d\n", __func__, devices); updt_params = (int *)params; params_length = 0; for (i = 0; i < num_commands; i++) { uint32_t command_id = *values++; uint32_t command_config_state = *values++; uint32_t index_offset = *values++; uint32_t length = *values++; switch (command_id) { case REVERB_ENABLE: if (length != 1 || index_offset != 0) { pr_err("REVERB_ENABLE:invalid params\n"); rc = -EINVAL; goto invalid_config; } prev_enable_flag = reverb->enable_flag; reverb->enable_flag = *values++; pr_debug("%s:REVERB_ENABLE prev:%d,new:%d\n", __func__, prev_enable_flag, reverb->enable_flag); if (prev_enable_flag != reverb->enable_flag) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_ENABLE; *updt_params++ = REVERB_ENABLE_PARAM_SZ; *updt_params++ = reverb->enable_flag; params_length += COMMAND_PAYLOAD_SZ + REVERB_ENABLE_PARAM_SZ; } break; case REVERB_MODE: if (length != 1 || index_offset != 0) { pr_err("REVERB_MODE:invalid params\n"); rc = -EINVAL; goto invalid_config; } reverb->mode = *values++; pr_debug("%s: REVERB_MODE val:%d\n", __func__, reverb->mode); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_MODE; *updt_params++ = REVERB_MODE_PARAM_SZ; *updt_params++ = reverb->mode; params_length += COMMAND_PAYLOAD_SZ + REVERB_MODE_PARAM_SZ; } break; case REVERB_PRESET: if (length != 1 || index_offset != 0) { pr_err("REVERB_PRESET:invalid params\n"); rc = -EINVAL; goto invalid_config; } reverb->preset = *values++; pr_debug("%s: REVERB_PRESET val:%d\n", __func__, reverb->preset); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_PRESET; *updt_params++ = REVERB_PRESET_PARAM_SZ; *updt_params++ = reverb->preset; params_length += COMMAND_PAYLOAD_SZ + REVERB_PRESET_PARAM_SZ; } break; case REVERB_WET_MIX: if (length != 1 || index_offset != 0) { pr_err("REVERB_WET_MIX:invalid params\n"); rc = -EINVAL; goto invalid_config; } reverb->wet_mix = *values++; pr_debug("%s: REVERB_WET_MIX val:%d\n", __func__, reverb->wet_mix); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_WET_MIX; *updt_params++ = REVERB_WET_MIX_PARAM_SZ; *updt_params++ = reverb->wet_mix; params_length += COMMAND_PAYLOAD_SZ + REVERB_WET_MIX_PARAM_SZ; } break; case REVERB_GAIN_ADJUST: if (length != 1 || index_offset != 0) { pr_err("REVERB_GAIN_ADJUST:invalid params\n"); rc = -EINVAL; goto invalid_config; } reverb->gain_adjust = *values++; pr_debug("%s: REVERB_GAIN_ADJUST val:%d\n", __func__, reverb->gain_adjust); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_GAIN_ADJUST; *updt_params++ = REVERB_GAIN_ADJUST_PARAM_SZ; *updt_params++ = reverb->gain_adjust; params_length += COMMAND_PAYLOAD_SZ + REVERB_GAIN_ADJUST_PARAM_SZ; } break; case REVERB_ROOM_LEVEL: if (length != 1 || index_offset != 0) { pr_err("REVERB_ROOM_LEVEL:invalid params\n"); rc = -EINVAL; goto invalid_config; } reverb->room_level = *values++; pr_debug("%s: REVERB_ROOM_LEVEL val:%d\n", __func__, reverb->room_level); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_ROOM_LEVEL; *updt_params++ = REVERB_ROOM_LEVEL_PARAM_SZ; *updt_params++ = reverb->room_level; params_length += COMMAND_PAYLOAD_SZ + REVERB_ROOM_LEVEL_PARAM_SZ; } break; case REVERB_ROOM_HF_LEVEL: if (length != 1 || index_offset != 0) { pr_err("REVERB_ROOM_HF_LEVEL:invalid params\n"); rc = -EINVAL; goto invalid_config; } reverb->room_hf_level = *values++; pr_debug("%s: REVERB_ROOM_HF_LEVEL val%d\n", __func__, reverb->room_hf_level); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_ROOM_HF_LEVEL; *updt_params++ = REVERB_ROOM_HF_LEVEL_PARAM_SZ; *updt_params++ = reverb->room_hf_level; params_length += COMMAND_PAYLOAD_SZ + REVERB_ROOM_HF_LEVEL_PARAM_SZ; } break; case REVERB_DECAY_TIME: if (length != 1 || index_offset != 0) { pr_err("REVERB_DECAY_TIME:invalid params\n"); rc = -EINVAL; goto invalid_config; } reverb->decay_time = *values++; pr_debug("%s: REVERB_DECAY_TIME val:%d\n", __func__, reverb->decay_time); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_DECAY_TIME; *updt_params++ = REVERB_DECAY_TIME_PARAM_SZ; *updt_params++ = reverb->decay_time; params_length += COMMAND_PAYLOAD_SZ + REVERB_DECAY_TIME_PARAM_SZ; } break; case REVERB_DECAY_HF_RATIO: if (length != 1 || index_offset != 0) { pr_err("REVERB_DECAY_HF_RATIOinvalid params\n"); rc = -EINVAL; goto invalid_config; } reverb->decay_hf_ratio = *values++; pr_debug("%s: REVERB_DECAY_HF_RATIO val%d\n", __func__, reverb->decay_hf_ratio); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_DECAY_HF_RATIO; *updt_params++ = REVERB_DECAY_HF_RATIO_PARAM_SZ; *updt_params++ = reverb->decay_hf_ratio; params_length += COMMAND_PAYLOAD_SZ + REVERB_DECAY_HF_RATIO_PARAM_SZ; } break; case REVERB_REFLECTIONS_LEVEL: if (length != 1 || index_offset != 0) { pr_err("REVERB_REFLECTION_LVLinvalid params\n"); rc = -EINVAL; goto invalid_config; } reverb->reflections_level = *values++; pr_debug("%s: REVERB_REFLECTIONS_LEVEL val:%d\n", __func__, reverb->reflections_level); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_REFLECTIONS_LEVEL; *updt_params++ = REVERB_REFLECTIONS_LEVEL_PARAM_SZ; *updt_params++ = reverb->reflections_level; params_length += COMMAND_PAYLOAD_SZ + REVERB_REFLECTIONS_LEVEL_PARAM_SZ; } break; case REVERB_REFLECTIONS_DELAY: if (length != 1 || index_offset != 0) { pr_err("REVERB_REFLECTION_DLYinvalid params\n"); rc = -EINVAL; goto invalid_config; } reverb->reflections_delay = *values++; pr_debug("%s: REVERB_REFLECTIONS_DELAY val:%d\n", __func__, reverb->reflections_delay); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_REFLECTIONS_DELAY; *updt_params++ = REVERB_REFLECTIONS_DELAY_PARAM_SZ; *updt_params++ = reverb->reflections_delay; params_length += COMMAND_PAYLOAD_SZ + REVERB_REFLECTIONS_DELAY_PARAM_SZ; } break; case REVERB_LEVEL: if (length != 1 || index_offset != 0) { pr_err("REVERB_LEVEL:invalid params\n"); rc = -EINVAL; goto invalid_config; } reverb->level = *values++; pr_debug("%s: REVERB_LEVEL val:%d\n", __func__, reverb->level); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_LEVEL; *updt_params++ = REVERB_LEVEL_PARAM_SZ; *updt_params++ = reverb->level; params_length += COMMAND_PAYLOAD_SZ + REVERB_LEVEL_PARAM_SZ; } break; case REVERB_DELAY: if (length != 1 || index_offset != 0) { pr_err("REVERB_DELAY:invalid params\n"); rc = -EINVAL; goto invalid_config; } reverb->delay = *values++; pr_debug("%s:REVERB_DELAY val:%d\n", __func__, reverb->delay); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_DELAY; *updt_params++ = REVERB_DELAY_PARAM_SZ; *updt_params++ = reverb->delay; params_length += COMMAND_PAYLOAD_SZ + REVERB_DELAY_PARAM_SZ; } break; case REVERB_DIFFUSION: if (length != 1 || index_offset != 0) { pr_err("REVERB_DIFFUSION:invalid params\n"); rc = -EINVAL; goto invalid_config; } reverb->diffusion = *values++; pr_debug("%s: REVERB_DIFFUSION val:%d\n", __func__, reverb->diffusion); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_DIFFUSION; *updt_params++ = REVERB_DIFFUSION_PARAM_SZ; *updt_params++ = reverb->diffusion; params_length += COMMAND_PAYLOAD_SZ + REVERB_DIFFUSION_PARAM_SZ; } break; case REVERB_DENSITY: if (length != 1 || index_offset != 0) { pr_err("REVERB_DENSITY:invalid params\n"); rc = -EINVAL; goto invalid_config; } reverb->density = *values++; pr_debug("%s: REVERB_DENSITY val:%d\n", __func__, reverb->density); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = AUDPROC_PARAM_ID_REVERB_DENSITY; *updt_params++ = REVERB_DENSITY_PARAM_SZ; *updt_params++ = reverb->density; params_length += COMMAND_PAYLOAD_SZ + REVERB_DENSITY_PARAM_SZ; } break; default: pr_err("%s: Invalid command to set config\n", __func__); break; } } if (params_length) q6asm_send_audio_effects_params(ac, params, params_length); invalid_config: kfree(params); return rc; } int msm_audio_effects_bass_boost_handler(struct audio_client *ac, struct bass_boost_params *bass_boost, long *values) { int devices = *values++; int num_commands = *values++; char *params; int *updt_params, i, prev_enable_flag; uint32_t params_length = (MAX_INBAND_PARAM_SZ); int rc = 0; pr_debug("%s\n", __func__); if (!ac) { pr_err("%s: cannot set audio effects\n", __func__); return -EINVAL; } params = kzalloc(params_length, GFP_KERNEL); if (!params) { pr_err("%s, params memory alloc failed\n", __func__); return -ENOMEM; } pr_debug("%s: device: %d\n", __func__, devices); updt_params = (int *)params; params_length = 0; for (i = 0; i < num_commands; i++) { uint32_t command_id = *values++; uint32_t command_config_state = *values++; uint32_t index_offset = *values++; uint32_t length = *values++; switch (command_id) { case BASS_BOOST_ENABLE: if (length != 1 || index_offset != 0) { pr_err("BASS_BOOST_ENABLE:invalid params\n"); rc = -EINVAL; goto invalid_config; } prev_enable_flag = bass_boost->enable_flag; bass_boost->enable_flag = *values++; pr_debug("%s: BASS_BOOST_ENABLE prev:%d new:%d\n", __func__, prev_enable_flag, bass_boost->enable_flag); if (prev_enable_flag != bass_boost->enable_flag) { *updt_params++ = AUDPROC_MODULE_ID_BASS_BOOST; *updt_params++ = AUDPROC_PARAM_ID_BASS_BOOST_ENABLE; *updt_params++ = BASS_BOOST_ENABLE_PARAM_SZ; *updt_params++ = bass_boost->enable_flag; params_length += COMMAND_PAYLOAD_SZ + BASS_BOOST_ENABLE_PARAM_SZ; } break; case BASS_BOOST_MODE: if (length != 1 || index_offset != 0) { pr_err("BASS_BOOST_MODE:invalid params\n"); rc = -EINVAL; goto invalid_config; } bass_boost->mode = *values++; pr_debug("%s: BASS_BOOST_MODE val:%d\n", __func__, bass_boost->mode); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_BASS_BOOST; *updt_params++ = AUDPROC_PARAM_ID_BASS_BOOST_MODE; *updt_params++ = BASS_BOOST_MODE_PARAM_SZ; *updt_params++ = bass_boost->mode; params_length += COMMAND_PAYLOAD_SZ + BASS_BOOST_MODE_PARAM_SZ; } break; case BASS_BOOST_STRENGTH: if (length != 1 || index_offset != 0) { pr_err("BASS_BOOST_STRENGTH:invalid params\n"); rc = -EINVAL; goto invalid_config; } bass_boost->strength = *values++; pr_debug("%s: BASS_BOOST_STRENGTHi val:%d\n", __func__, bass_boost->strength); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_BASS_BOOST; *updt_params++ = AUDPROC_PARAM_ID_BASS_BOOST_STRENGTH; *updt_params++ = BASS_BOOST_STRENGTH_PARAM_SZ; *updt_params++ = bass_boost->strength; params_length += COMMAND_PAYLOAD_SZ + BASS_BOOST_STRENGTH_PARAM_SZ; } break; default: pr_err("%s: Invalid command to set config\n", __func__); break; } } if (params_length) q6asm_send_audio_effects_params(ac, params, params_length); invalid_config: kfree(params); return rc; } int msm_audio_effects_popless_eq_handler(struct audio_client *ac, struct eq_params *eq, long *values) { int devices = *values++; int num_commands = *values++; char *params; int *updt_params, i, prev_enable_flag; uint32_t params_length = (MAX_INBAND_PARAM_SZ); int rc = 0; pr_debug("%s\n", __func__); if (!ac) { pr_err("%s: cannot set audio effects\n", __func__); return -EINVAL; } params = kzalloc(params_length, GFP_KERNEL); if (!params) { pr_err("%s, params memory alloc failed\n", __func__); return -ENOMEM; } pr_debug("%s: device: %d\n", __func__, devices); updt_params = (int *)params; params_length = 0; for (i = 0; i < num_commands; i++) { uint32_t command_id = *values++; uint32_t command_config_state = *values++; uint32_t index_offset = *values++; uint32_t length = *values++; int idx, j; switch (command_id) { case EQ_ENABLE: if (length != 1 || index_offset != 0) { pr_err("EQ_ENABLE:invalid params\n"); rc = -EINVAL; goto invalid_config; } prev_enable_flag = eq->enable_flag; eq->enable_flag = *values++; pr_debug("%s: EQ_ENABLE prev:%d new:%d\n", __func__, prev_enable_flag, eq->enable_flag); if (prev_enable_flag != eq->enable_flag) { *updt_params++ = AUDPROC_MODULE_ID_POPLESS_EQUALIZER; *updt_params++ = AUDPROC_PARAM_ID_EQ_ENABLE; *updt_params++ = EQ_ENABLE_PARAM_SZ; *updt_params++ = eq->enable_flag; params_length += COMMAND_PAYLOAD_SZ + EQ_ENABLE_PARAM_SZ; } break; case EQ_CONFIG: if (length < EQ_CONFIG_PARAM_LEN || index_offset != 0) { pr_err("EQ_CONFIG:invalid params\n"); rc = -EINVAL; goto invalid_config; } pr_debug("%s: EQ_CONFIG bands:%d, pgain:%d, pset:%d\n", __func__, eq->config.num_bands, eq->config.eq_pregain, eq->config.preset_id); for (idx = 0; idx < MAX_EQ_BANDS; idx++) eq->per_band_cfg[idx].band_idx = -1; eq->config.eq_pregain = *values++; eq->config.preset_id = *values++; eq->config.num_bands = *values++; if (eq->config.num_bands > MAX_EQ_BANDS) { pr_err("EQ_CONFIG:invalid num of bands\n"); rc = -EINVAL; goto invalid_config; } if (eq->config.num_bands && (((length - EQ_CONFIG_PARAM_LEN)/ EQ_CONFIG_PER_BAND_PARAM_LEN) != eq->config.num_bands)) { pr_err("EQ_CONFIG:invalid length per band\n"); rc = -EINVAL; goto invalid_config; } for (j = 0; j < eq->config.num_bands; j++) { idx = *values++; if (idx >= MAX_EQ_BANDS) { pr_err("EQ_CONFIG:invalid band index\n"); rc = -EINVAL; goto invalid_config; } eq->per_band_cfg[idx].band_idx = idx; eq->per_band_cfg[idx].filter_type = *values++; eq->per_band_cfg[idx].freq_millihertz = *values++; eq->per_band_cfg[idx].gain_millibels = *values++; eq->per_band_cfg[idx].quality_factor = *values++; } if (command_config_state == CONFIG_SET) { int config_param_length = EQ_CONFIG_PARAM_SZ + (EQ_CONFIG_PER_BAND_PARAM_SZ* eq->config.num_bands); *updt_params++ = AUDPROC_MODULE_ID_POPLESS_EQUALIZER; *updt_params++ = AUDPROC_PARAM_ID_EQ_CONFIG; *updt_params++ = config_param_length; *updt_params++ = eq->config.eq_pregain; *updt_params++ = eq->config.preset_id; *updt_params++ = eq->config.num_bands; for (idx = 0; idx < MAX_EQ_BANDS; idx++) { if (eq->per_band_cfg[idx].band_idx < 0) continue; *updt_params++ = eq->per_band_cfg[idx].filter_type; *updt_params++ = eq->per_band_cfg[idx].freq_millihertz; *updt_params++ = eq->per_band_cfg[idx].gain_millibels; *updt_params++ = eq->per_band_cfg[idx].quality_factor; *updt_params++ = eq->per_band_cfg[idx].band_idx; } params_length += COMMAND_PAYLOAD_SZ + config_param_length; } break; case EQ_BAND_INDEX: if (length != 1 || index_offset != 0) { pr_err("EQ_BAND_INDEX:invalid params\n"); rc = -EINVAL; goto invalid_config; } idx = *values++; if (idx > MAX_EQ_BANDS) { pr_err("EQ_BAND_INDEX:invalid band index\n"); rc = -EINVAL; goto invalid_config; } eq->band_index = idx; pr_debug("%s: EQ_BAND_INDEX val:%d\n", __func__, eq->band_index); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_POPLESS_EQUALIZER; *updt_params++ = AUDPROC_PARAM_ID_EQ_BAND_INDEX; *updt_params++ = EQ_BAND_INDEX_PARAM_SZ; *updt_params++ = eq->band_index; params_length += COMMAND_PAYLOAD_SZ + EQ_BAND_INDEX_PARAM_SZ; } break; case EQ_SINGLE_BAND_FREQ: if (length != 1 || index_offset != 0) { pr_err("EQ_SINGLE_BAND_FREQ:invalid params\n"); rc = -EINVAL; goto invalid_config; } if (eq->band_index > MAX_EQ_BANDS) { pr_err("EQ_SINGLE_BAND_FREQ:invalid index\n"); break; } eq->freq_millihertz = *values++; pr_debug("%s: EQ_SINGLE_BAND_FREQ idx:%d, val:%d\n", __func__, eq->band_index, eq->freq_millihertz); if (command_config_state == CONFIG_SET) { *updt_params++ = AUDPROC_MODULE_ID_POPLESS_EQUALIZER; *updt_params++ = AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ; *updt_params++ = EQ_SINGLE_BAND_FREQ_PARAM_SZ; *updt_params++ = eq->freq_millihertz; params_length += COMMAND_PAYLOAD_SZ + EQ_SINGLE_BAND_FREQ_PARAM_SZ; } break; default: pr_err("%s: Invalid command to set config\n", __func__); break; } } if (params_length) q6asm_send_audio_effects_params(ac, params, params_length); invalid_config: kfree(params); return rc; }
gpl-2.0
clearwa/mypi
sound/soc/codecs/tas2552.c
111
14464
/* * tas2552.c - ALSA SoC Texas Instruments TAS2552 Mono Audio Amplifier * * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com * * Author: Dan Murphy <dmurphy@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/gpio/consumer.h> #include <linux/regulator/consumer.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/tlv.h> #include <sound/tas2552-plat.h> #include "tas2552.h" static struct reg_default tas2552_reg_defs[] = { {TAS2552_CFG_1, 0x22}, {TAS2552_CFG_3, 0x80}, {TAS2552_DOUT, 0x00}, {TAS2552_OUTPUT_DATA, 0xc0}, {TAS2552_PDM_CFG, 0x01}, {TAS2552_PGA_GAIN, 0x00}, {TAS2552_BOOST_PT_CTRL, 0x0f}, {TAS2552_RESERVED_0D, 0x00}, {TAS2552_LIMIT_RATE_HYS, 0x08}, {TAS2552_CFG_2, 0xef}, {TAS2552_SER_CTRL_1, 0x00}, {TAS2552_SER_CTRL_2, 0x00}, {TAS2552_PLL_CTRL_1, 0x10}, {TAS2552_PLL_CTRL_2, 0x00}, {TAS2552_PLL_CTRL_3, 0x00}, {TAS2552_BTIP, 0x8f}, {TAS2552_BTS_CTRL, 0x80}, {TAS2552_LIMIT_RELEASE, 0x04}, {TAS2552_LIMIT_INT_COUNT, 0x00}, {TAS2552_EDGE_RATE_CTRL, 0x40}, {TAS2552_VBAT_DATA, 0x00}, }; #define TAS2552_NUM_SUPPLIES 3 static const char *tas2552_supply_names[TAS2552_NUM_SUPPLIES] = { "vbat", /* vbat voltage */ "iovdd", /* I/O Voltage */ "avdd", /* Analog DAC Voltage */ }; struct tas2552_data { struct snd_soc_codec *codec; struct regmap *regmap; struct i2c_client *tas2552_client; struct regulator_bulk_data supplies[TAS2552_NUM_SUPPLIES]; struct gpio_desc *enable_gpio; unsigned char regs[TAS2552_VBAT_DATA]; unsigned int mclk; }; /* Input mux controls */ static const char *tas2552_input_texts[] = { "Digital", "Analog" }; static SOC_ENUM_SINGLE_DECL(tas2552_input_mux_enum, TAS2552_CFG_3, 7, tas2552_input_texts); static const struct snd_kcontrol_new tas2552_input_mux_control[] = { SOC_DAPM_ENUM("Input selection", tas2552_input_mux_enum) }; static const struct snd_soc_dapm_widget tas2552_dapm_widgets[] = { SND_SOC_DAPM_INPUT("IN"), /* MUX Controls */ SND_SOC_DAPM_MUX("Input selection", SND_SOC_NOPM, 0, 0, tas2552_input_mux_control), SND_SOC_DAPM_AIF_IN("DAC IN", "DAC Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_OUT_DRV("ClassD", TAS2552_CFG_2, 7, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("PLL", TAS2552_CFG_2, 3, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("OUT") }; static const struct snd_soc_dapm_route tas2552_audio_map[] = { {"DAC", NULL, "DAC IN"}, {"Input selection", "Digital", "DAC"}, {"Input selection", "Analog", "IN"}, {"ClassD", NULL, "Input selection"}, {"OUT", NULL, "ClassD"}, {"ClassD", NULL, "PLL"}, }; #ifdef CONFIG_PM static void tas2552_sw_shutdown(struct tas2552_data *tas_data, int sw_shutdown) { u8 cfg1_reg; if (!tas_data->codec) return; if (sw_shutdown) cfg1_reg = 0; else cfg1_reg = TAS2552_SWS_MASK; snd_soc_update_bits(tas_data->codec, TAS2552_CFG_1, TAS2552_SWS_MASK, cfg1_reg); } #endif static int tas2552_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct tas2552_data *tas2552 = dev_get_drvdata(codec->dev); int sample_rate, pll_clk; int d; u8 p, j; if (!tas2552->mclk) return -EINVAL; snd_soc_update_bits(codec, TAS2552_CFG_2, TAS2552_PLL_ENABLE, 0); if (tas2552->mclk == TAS2552_245MHZ_CLK || tas2552->mclk == TAS2552_225MHZ_CLK) { /* By pass the PLL configuration */ snd_soc_update_bits(codec, TAS2552_PLL_CTRL_2, TAS2552_PLL_BYPASS_MASK, TAS2552_PLL_BYPASS); } else { /* Fill in the PLL control registers for J & D * PLL_CLK = (.5 * freq * J.D) / 2^p * Need to fill in J and D here based on incoming freq */ p = snd_soc_read(codec, TAS2552_PLL_CTRL_1); p = (p >> 7); sample_rate = params_rate(params); if (sample_rate == 48000) pll_clk = TAS2552_245MHZ_CLK; else if (sample_rate == 44100) pll_clk = TAS2552_225MHZ_CLK; else { dev_vdbg(codec->dev, "Substream sample rate is not found %i\n", params_rate(params)); return -EINVAL; } j = (pll_clk * 2 * (1 << p)) / tas2552->mclk; d = (pll_clk * 2 * (1 << p)) % tas2552->mclk; snd_soc_update_bits(codec, TAS2552_PLL_CTRL_1, TAS2552_PLL_J_MASK, j); snd_soc_write(codec, TAS2552_PLL_CTRL_2, (d >> 7) & TAS2552_PLL_D_UPPER_MASK); snd_soc_write(codec, TAS2552_PLL_CTRL_3, d & TAS2552_PLL_D_LOWER_MASK); } return 0; } static int tas2552_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_codec *codec = dai->codec; u8 serial_format; u8 serial_control_mask; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: serial_format = 0x00; break; case SND_SOC_DAIFMT_CBS_CFM: serial_format = TAS2552_WORD_CLK_MASK; break; case SND_SOC_DAIFMT_CBM_CFS: serial_format = TAS2552_BIT_CLK_MASK; break; case SND_SOC_DAIFMT_CBM_CFM: serial_format = (TAS2552_BIT_CLK_MASK | TAS2552_WORD_CLK_MASK); break; default: dev_vdbg(codec->dev, "DAI Format master is not found\n"); return -EINVAL; } serial_control_mask = TAS2552_BIT_CLK_MASK | TAS2552_WORD_CLK_MASK; switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: serial_format &= TAS2552_DAIFMT_I2S_MASK; break; case SND_SOC_DAIFMT_DSP_A: serial_format |= TAS2552_DAIFMT_DSP; break; case SND_SOC_DAIFMT_RIGHT_J: serial_format |= TAS2552_DAIFMT_RIGHT_J; break; case SND_SOC_DAIFMT_LEFT_J: serial_format |= TAS2552_DAIFMT_LEFT_J; break; default: dev_vdbg(codec->dev, "DAI Format is not found\n"); return -EINVAL; } if (fmt & SND_SOC_DAIFMT_FORMAT_MASK) serial_control_mask |= TAS2552_DATA_FORMAT_MASK; snd_soc_update_bits(codec, TAS2552_SER_CTRL_1, serial_control_mask, serial_format); return 0; } static int tas2552_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = dai->codec; struct tas2552_data *tas2552 = dev_get_drvdata(codec->dev); tas2552->mclk = freq; return 0; } static int tas2552_mute(struct snd_soc_dai *dai, int mute) { u8 cfg1_reg; struct snd_soc_codec *codec = dai->codec; if (mute) cfg1_reg = TAS2552_MUTE_MASK; else cfg1_reg = ~TAS2552_MUTE_MASK; snd_soc_update_bits(codec, TAS2552_CFG_1, TAS2552_MUTE_MASK, cfg1_reg); return 0; } #ifdef CONFIG_PM static int tas2552_runtime_suspend(struct device *dev) { struct tas2552_data *tas2552 = dev_get_drvdata(dev); tas2552_sw_shutdown(tas2552, 0); regcache_cache_only(tas2552->regmap, true); regcache_mark_dirty(tas2552->regmap); if (tas2552->enable_gpio) gpiod_set_value(tas2552->enable_gpio, 0); return 0; } static int tas2552_runtime_resume(struct device *dev) { struct tas2552_data *tas2552 = dev_get_drvdata(dev); if (tas2552->enable_gpio) gpiod_set_value(tas2552->enable_gpio, 1); tas2552_sw_shutdown(tas2552, 1); regcache_cache_only(tas2552->regmap, false); regcache_sync(tas2552->regmap); return 0; } #endif static const struct dev_pm_ops tas2552_pm = { SET_RUNTIME_PM_OPS(tas2552_runtime_suspend, tas2552_runtime_resume, NULL) }; static struct snd_soc_dai_ops tas2552_speaker_dai_ops = { .hw_params = tas2552_hw_params, .set_sysclk = tas2552_set_dai_sysclk, .set_fmt = tas2552_set_dai_fmt, .digital_mute = tas2552_mute, }; /* Formats supported by TAS2552 driver. */ #define TAS2552_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) /* TAS2552 dai structure. */ static struct snd_soc_dai_driver tas2552_dai[] = { { .name = "tas2552-amplifier", .playback = { .stream_name = "Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = TAS2552_FORMATS, }, .ops = &tas2552_speaker_dai_ops, }, }; /* * DAC digital volumes. From -7 to 24 dB in 1 dB steps */ static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 24); static const struct snd_kcontrol_new tas2552_snd_controls[] = { SOC_SINGLE_TLV("Speaker Driver Playback Volume", TAS2552_PGA_GAIN, 0, 0x1f, 1, dac_tlv), }; static const struct reg_default tas2552_init_regs[] = { { TAS2552_RESERVED_0D, 0xc0 }, }; static int tas2552_codec_probe(struct snd_soc_codec *codec) { struct tas2552_data *tas2552 = snd_soc_codec_get_drvdata(codec); int ret; tas2552->codec = codec; ret = regulator_bulk_enable(ARRAY_SIZE(tas2552->supplies), tas2552->supplies); if (ret != 0) { dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); return ret; } if (tas2552->enable_gpio) gpiod_set_value(tas2552->enable_gpio, 1); ret = pm_runtime_get_sync(codec->dev); if (ret < 0) { dev_err(codec->dev, "Enabling device failed: %d\n", ret); goto probe_fail; } snd_soc_write(codec, TAS2552_CFG_1, TAS2552_MUTE_MASK | TAS2552_PLL_SRC_BCLK); snd_soc_write(codec, TAS2552_CFG_3, TAS2552_I2S_OUT_SEL | TAS2552_DIN_SRC_SEL_AVG_L_R | TAS2552_88_96KHZ); snd_soc_write(codec, TAS2552_DOUT, TAS2552_PDM_DATA_I); snd_soc_write(codec, TAS2552_OUTPUT_DATA, TAS2552_PDM_DATA_V_I | 0x8); snd_soc_write(codec, TAS2552_PDM_CFG, TAS2552_PDM_BCLK_SEL); snd_soc_write(codec, TAS2552_BOOST_PT_CTRL, TAS2552_APT_DELAY_200 | TAS2552_APT_THRESH_2_1_7); ret = regmap_register_patch(tas2552->regmap, tas2552_init_regs, ARRAY_SIZE(tas2552_init_regs)); if (ret != 0) { dev_err(codec->dev, "Failed to write init registers: %d\n", ret); goto patch_fail; } snd_soc_write(codec, TAS2552_CFG_2, TAS2552_BOOST_EN | TAS2552_APT_EN | TAS2552_LIM_EN); return 0; patch_fail: pm_runtime_put(codec->dev); probe_fail: if (tas2552->enable_gpio) gpiod_set_value(tas2552->enable_gpio, 0); regulator_bulk_disable(ARRAY_SIZE(tas2552->supplies), tas2552->supplies); return -EIO; } static int tas2552_codec_remove(struct snd_soc_codec *codec) { struct tas2552_data *tas2552 = snd_soc_codec_get_drvdata(codec); pm_runtime_put(codec->dev); if (tas2552->enable_gpio) gpiod_set_value(tas2552->enable_gpio, 0); return 0; }; #ifdef CONFIG_PM static int tas2552_suspend(struct snd_soc_codec *codec) { struct tas2552_data *tas2552 = snd_soc_codec_get_drvdata(codec); int ret; ret = regulator_bulk_disable(ARRAY_SIZE(tas2552->supplies), tas2552->supplies); if (ret != 0) dev_err(codec->dev, "Failed to disable supplies: %d\n", ret); return 0; } static int tas2552_resume(struct snd_soc_codec *codec) { struct tas2552_data *tas2552 = snd_soc_codec_get_drvdata(codec); int ret; ret = regulator_bulk_enable(ARRAY_SIZE(tas2552->supplies), tas2552->supplies); if (ret != 0) { dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); } return 0; } #else #define tas2552_suspend NULL #define tas2552_resume NULL #endif static struct snd_soc_codec_driver soc_codec_dev_tas2552 = { .probe = tas2552_codec_probe, .remove = tas2552_codec_remove, .suspend = tas2552_suspend, .resume = tas2552_resume, .controls = tas2552_snd_controls, .num_controls = ARRAY_SIZE(tas2552_snd_controls), .dapm_widgets = tas2552_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(tas2552_dapm_widgets), .dapm_routes = tas2552_audio_map, .num_dapm_routes = ARRAY_SIZE(tas2552_audio_map), }; static const struct regmap_config tas2552_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = TAS2552_MAX_REG, .reg_defaults = tas2552_reg_defs, .num_reg_defaults = ARRAY_SIZE(tas2552_reg_defs), .cache_type = REGCACHE_RBTREE, }; static int tas2552_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev; struct tas2552_data *data; int ret; int i; dev = &client->dev; data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); if (data == NULL) return -ENOMEM; data->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(data->enable_gpio)) return PTR_ERR(data->enable_gpio); data->tas2552_client = client; data->regmap = devm_regmap_init_i2c(client, &tas2552_regmap_config); if (IS_ERR(data->regmap)) { ret = PTR_ERR(data->regmap); dev_err(&client->dev, "Failed to allocate register map: %d\n", ret); return ret; } for (i = 0; i < ARRAY_SIZE(data->supplies); i++) data->supplies[i].supply = tas2552_supply_names[i]; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->supplies), data->supplies); if (ret != 0) { dev_err(dev, "Failed to request supplies: %d\n", ret); return ret; } pm_runtime_set_active(&client->dev); pm_runtime_set_autosuspend_delay(&client->dev, 1000); pm_runtime_use_autosuspend(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_mark_last_busy(&client->dev); pm_runtime_put_sync_autosuspend(&client->dev); dev_set_drvdata(&client->dev, data); ret = snd_soc_register_codec(&client->dev, &soc_codec_dev_tas2552, tas2552_dai, ARRAY_SIZE(tas2552_dai)); if (ret < 0) dev_err(&client->dev, "Failed to register codec: %d\n", ret); return ret; } static int tas2552_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id tas2552_id[] = { { "tas2552", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tas2552_id); #if IS_ENABLED(CONFIG_OF) static const struct of_device_id tas2552_of_match[] = { { .compatible = "ti,tas2552", }, {}, }; MODULE_DEVICE_TABLE(of, tas2552_of_match); #endif static struct i2c_driver tas2552_i2c_driver = { .driver = { .name = "tas2552", .owner = THIS_MODULE, .of_match_table = of_match_ptr(tas2552_of_match), .pm = &tas2552_pm, }, .probe = tas2552_probe, .remove = tas2552_i2c_remove, .id_table = tas2552_id, }; module_i2c_driver(tas2552_i2c_driver); MODULE_AUTHOR("Dan Muprhy <dmurphy@ti.com>"); MODULE_DESCRIPTION("TAS2552 Audio amplifier driver"); MODULE_LICENSE("GPL");
gpl-2.0
TheTypoMaster/android_kernel_samsung_exynos5433
drivers/staging/comedi/drivers/8255_pci.c
367
8015
/* * COMEDI driver for generic PCI based 8255 digital i/o boards * Copyright (C) 2012 H Hartley Sweeten <hsweeten@visionengravers.com> * * Based on the tested adl_pci7296 driver written by: * Jon Grierson <jd@renko.co.uk> * and the experimental cb_pcidio driver written by: * Yoshiya Matsuzaka * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2000 David A. Schleef <ds@schleef.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: 8255_pci Description: Generic PCI based 8255 Digital I/O boards Devices: (ADLink) PCI-7224 [adl_pci-7224] - 24 channels (ADLink) PCI-7248 [adl_pci-7248] - 48 channels (ADLink) PCI-7296 [adl_pci-7296] - 96 channels (Measurement Computing) PCI-DIO24 [cb_pci-dio24] - 24 channels (Measurement Computing) PCI-DIO24H [cb_pci-dio24h] - 24 channels (Measurement Computing) PCI-DIO48H [cb_pci-dio48h] - 48 channels (Measurement Computing) PCI-DIO96H [cb_pci-dio96h] - 96 channels (National Instruments) PCI-DIO-96 [ni_pci-dio-96] - 96 channels (National Instruments) PCI-DIO-96B [ni_pci-dio-96b] - 96 channels (National Instruments) PXI-6508 [ni_pxi-6508] - 96 channels (National Instruments) PCI-6503 [ni_pci-6503] - 24 channels (National Instruments) PCI-6503B [ni_pci-6503b] - 24 channels (National Instruments) PCI-6503X [ni_pci-6503x] - 24 channels (National Instruments) PXI-6503 [ni_pxi-6503] - 24 channels Author: H Hartley Sweeten <hsweeten@visionengravers.com> Updated: Wed, 12 Sep 2012 11:52:01 -0700 Status: untested Some of these boards also have an 8254 programmable timer/counter chip. This chip is not currently supported by this driver. Interrupt support for these boards is also not currently supported. Configuration Options: not applicable, uses PCI auto config */ #include <linux/pci.h> #include "../comedidev.h" #include "8255.h" enum pci_8255_boardid { BOARD_ADLINK_PCI7224, BOARD_ADLINK_PCI7248, BOARD_ADLINK_PCI7296, BOARD_CB_PCIDIO24, BOARD_CB_PCIDIO24H, BOARD_CB_PCIDIO48H, BOARD_CB_PCIDIO96H, BOARD_NI_PCIDIO96, BOARD_NI_PCIDIO96B, BOARD_NI_PXI6508, BOARD_NI_PCI6503, BOARD_NI_PCI6503B, BOARD_NI_PCI6503X, BOARD_NI_PXI_6503, }; struct pci_8255_boardinfo { const char *name; int dio_badr; int n_8255; }; static const struct pci_8255_boardinfo pci_8255_boards[] = { [BOARD_ADLINK_PCI7224] = { .name = "adl_pci-7224", .dio_badr = 2, .n_8255 = 1, }, [BOARD_ADLINK_PCI7248] = { .name = "adl_pci-7248", .dio_badr = 2, .n_8255 = 2, }, [BOARD_ADLINK_PCI7296] = { .name = "adl_pci-7296", .dio_badr = 2, .n_8255 = 4, }, [BOARD_CB_PCIDIO24] = { .name = "cb_pci-dio24", .dio_badr = 2, .n_8255 = 1, }, [BOARD_CB_PCIDIO24H] = { .name = "cb_pci-dio24h", .dio_badr = 2, .n_8255 = 1, }, [BOARD_CB_PCIDIO48H] = { .name = "cb_pci-dio48h", .dio_badr = 1, .n_8255 = 2, }, [BOARD_CB_PCIDIO96H] = { .name = "cb_pci-dio96h", .dio_badr = 2, .n_8255 = 4, }, [BOARD_NI_PCIDIO96] = { .name = "ni_pci-dio-96", .dio_badr = 1, .n_8255 = 4, }, [BOARD_NI_PCIDIO96B] = { .name = "ni_pci-dio-96b", .dio_badr = 1, .n_8255 = 4, }, [BOARD_NI_PXI6508] = { .name = "ni_pxi-6508", .dio_badr = 1, .n_8255 = 4, }, [BOARD_NI_PCI6503] = { .name = "ni_pci-6503", .dio_badr = 1, .n_8255 = 1, }, [BOARD_NI_PCI6503B] = { .name = "ni_pci-6503b", .dio_badr = 1, .n_8255 = 1, }, [BOARD_NI_PCI6503X] = { .name = "ni_pci-6503x", .dio_badr = 1, .n_8255 = 1, }, [BOARD_NI_PXI_6503] = { .name = "ni_pxi-6503", .dio_badr = 1, .n_8255 = 1, }, }; struct pci_8255_private { void __iomem *mmio_base; }; static int pci_8255_mmio(int dir, int port, int data, unsigned long iobase) { void __iomem *mmio_base = (void __iomem *)iobase; if (dir) { writeb(data, mmio_base + port); return 0; } else { return readb(mmio_base + port); } } static int pci_8255_auto_attach(struct comedi_device *dev, unsigned long context) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); const struct pci_8255_boardinfo *board = NULL; struct pci_8255_private *devpriv; struct comedi_subdevice *s; bool is_mmio; int ret; int i; if (context < ARRAY_SIZE(pci_8255_boards)) board = &pci_8255_boards[context]; if (!board) return -ENODEV; dev->board_ptr = board; dev->board_name = board->name; devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL); if (!devpriv) return -ENOMEM; dev->private = devpriv; ret = comedi_pci_enable(dev); if (ret) return ret; is_mmio = (pci_resource_flags(pcidev, board->dio_badr) & IORESOURCE_MEM) != 0; if (is_mmio) { devpriv->mmio_base = pci_ioremap_bar(pcidev, board->dio_badr); if (!devpriv->mmio_base) return -ENOMEM; } else { dev->iobase = pci_resource_start(pcidev, board->dio_badr); } /* * One, two, or four subdevices are setup by this driver depending * on the number of channels provided by the board. Each subdevice * has 24 channels supported by the 8255 module. */ ret = comedi_alloc_subdevices(dev, board->n_8255); if (ret) return ret; for (i = 0; i < board->n_8255; i++) { unsigned long iobase; s = &dev->subdevices[i]; if (is_mmio) { iobase = (unsigned long)(devpriv->mmio_base + (i * 4)); ret = subdev_8255_init(dev, s, pci_8255_mmio, iobase); } else { iobase = dev->iobase + (i * 4); ret = subdev_8255_init(dev, s, NULL, iobase); } if (ret) return ret; } dev_info(dev->class_dev, "%s attached (%d digital i/o channels)\n", dev->board_name, board->n_8255 * 24); return 0; } static void pci_8255_detach(struct comedi_device *dev) { struct pci_8255_private *devpriv = dev->private; int i; for (i = 0; i < dev->n_subdevices; i++) comedi_spriv_free(dev, i); if (devpriv && devpriv->mmio_base) iounmap(devpriv->mmio_base); comedi_pci_disable(dev); } static struct comedi_driver pci_8255_driver = { .driver_name = "8255_pci", .module = THIS_MODULE, .auto_attach = pci_8255_auto_attach, .detach = pci_8255_detach, }; static int pci_8255_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &pci_8255_driver, id->driver_data); } static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = { { PCI_VDEVICE(ADLINK, 0x7224), BOARD_ADLINK_PCI7224 }, { PCI_VDEVICE(ADLINK, 0x7248), BOARD_ADLINK_PCI7248 }, { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 }, { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H }, { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H }, { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H }, { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 }, { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B }, { PCI_VDEVICE(NI, 0x13c0), BOARD_NI_PXI6508 }, { PCI_VDEVICE(NI, 0x0400), BOARD_NI_PCI6503 }, { PCI_VDEVICE(NI, 0x1250), BOARD_NI_PCI6503B }, { PCI_VDEVICE(NI, 0x17d0), BOARD_NI_PCI6503X }, { PCI_VDEVICE(NI, 0x1800), BOARD_NI_PXI_6503 }, { 0 } }; MODULE_DEVICE_TABLE(pci, pci_8255_pci_table); static struct pci_driver pci_8255_pci_driver = { .name = "8255_pci", .id_table = pci_8255_pci_table, .probe = pci_8255_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(pci_8255_driver, pci_8255_pci_driver); MODULE_DESCRIPTION("COMEDI - Generic PCI based 8255 Digital I/O boards"); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_LICENSE("GPL");
gpl-2.0
PennPanda/litmus-rt
drivers/mtd/ubi/fastmap.c
879
38296
/* * Copyright (c) 2012 Linutronix GmbH * Author: Richard Weinberger <richard@nod.at> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * */ #include <linux/crc32.h> #include "ubi.h" /** * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device. * @ubi: UBI device description object */ size_t ubi_calc_fm_size(struct ubi_device *ubi) { size_t size; size = sizeof(struct ubi_fm_hdr) + \ sizeof(struct ubi_fm_scan_pool) + \ sizeof(struct ubi_fm_scan_pool) + \ (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \ (sizeof(struct ubi_fm_eba) + \ (ubi->peb_count * sizeof(__be32))) + \ sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; return roundup(size, ubi->leb_size); } /** * new_fm_vhdr - allocate a new volume header for fastmap usage. * @ubi: UBI device description object * @vol_id: the VID of the new header * * Returns a new struct ubi_vid_hdr on success. * NULL indicates out of memory. */ static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id) { struct ubi_vid_hdr *new; new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); if (!new) goto out; new->vol_type = UBI_VID_DYNAMIC; new->vol_id = cpu_to_be32(vol_id); /* UBI implementations without fastmap support have to delete the * fastmap. */ new->compat = UBI_COMPAT_DELETE; out: return new; } /** * add_aeb - create and add a attach erase block to a given list. * @ai: UBI attach info object * @list: the target list * @pnum: PEB number of the new attach erase block * @ec: erease counter of the new LEB * @scrub: scrub this PEB after attaching * * Returns 0 on success, < 0 indicates an internal error. */ static int add_aeb(struct ubi_attach_info *ai, struct list_head *list, int pnum, int ec, int scrub) { struct ubi_ainf_peb *aeb; aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); if (!aeb) return -ENOMEM; aeb->pnum = pnum; aeb->ec = ec; aeb->lnum = -1; aeb->scrub = scrub; aeb->copy_flag = aeb->sqnum = 0; ai->ec_sum += aeb->ec; ai->ec_count++; if (ai->max_ec < aeb->ec) ai->max_ec = aeb->ec; if (ai->min_ec > aeb->ec) ai->min_ec = aeb->ec; list_add_tail(&aeb->u.list, list); return 0; } /** * add_vol - create and add a new volume to ubi_attach_info. * @ai: ubi_attach_info object * @vol_id: VID of the new volume * @used_ebs: number of used EBS * @data_pad: data padding value of the new volume * @vol_type: volume type * @last_eb_bytes: number of bytes in the last LEB * * Returns the new struct ubi_ainf_volume on success. * NULL indicates an error. */ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id, int used_ebs, int data_pad, u8 vol_type, int last_eb_bytes) { struct ubi_ainf_volume *av; struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; while (*p) { parent = *p; av = rb_entry(parent, struct ubi_ainf_volume, rb); if (vol_id > av->vol_id) p = &(*p)->rb_left; else if (vol_id > av->vol_id) p = &(*p)->rb_right; } av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL); if (!av) goto out; av->highest_lnum = av->leb_count = 0; av->vol_id = vol_id; av->used_ebs = used_ebs; av->data_pad = data_pad; av->last_data_size = last_eb_bytes; av->compat = 0; av->vol_type = vol_type; av->root = RB_ROOT; dbg_bld("found volume (ID %i)", vol_id); rb_link_node(&av->rb, parent, p); rb_insert_color(&av->rb, &ai->volumes); out: return av; } /** * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it * from it's original list. * @ai: ubi_attach_info object * @aeb: the to be assigned SEB * @av: target scan volume */ static void assign_aeb_to_av(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb, struct ubi_ainf_volume *av) { struct ubi_ainf_peb *tmp_aeb; struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; p = &av->root.rb_node; while (*p) { parent = *p; tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); if (aeb->lnum != tmp_aeb->lnum) { if (aeb->lnum < tmp_aeb->lnum) p = &(*p)->rb_left; else p = &(*p)->rb_right; continue; } else break; } list_del(&aeb->u.list); av->leb_count++; rb_link_node(&aeb->u.rb, parent, p); rb_insert_color(&aeb->u.rb, &av->root); } /** * update_vol - inserts or updates a LEB which was found a pool. * @ubi: the UBI device object * @ai: attach info object * @av: the volume this LEB belongs to * @new_vh: the volume header derived from new_aeb * @new_aeb: the AEB to be examined * * Returns 0 on success, < 0 indicates an internal error. */ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh, struct ubi_ainf_peb *new_aeb) { struct rb_node **p = &av->root.rb_node, *parent = NULL; struct ubi_ainf_peb *aeb, *victim; int cmp_res; while (*p) { parent = *p; aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); if (be32_to_cpu(new_vh->lnum) != aeb->lnum) { if (be32_to_cpu(new_vh->lnum) < aeb->lnum) p = &(*p)->rb_left; else p = &(*p)->rb_right; continue; } /* This case can happen if the fastmap gets written * because of a volume change (creation, deletion, ..). * Then a PEB can be within the persistent EBA and the pool. */ if (aeb->pnum == new_aeb->pnum) { ubi_assert(aeb->lnum == new_aeb->lnum); kmem_cache_free(ai->aeb_slab_cache, new_aeb); return 0; } cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh); if (cmp_res < 0) return cmp_res; /* new_aeb is newer */ if (cmp_res & 1) { victim = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); if (!victim) return -ENOMEM; victim->ec = aeb->ec; victim->pnum = aeb->pnum; list_add_tail(&victim->u.list, &ai->erase); if (av->highest_lnum == be32_to_cpu(new_vh->lnum)) av->last_data_size = \ be32_to_cpu(new_vh->data_size); dbg_bld("vol %i: AEB %i's PEB %i is the newer", av->vol_id, aeb->lnum, new_aeb->pnum); aeb->ec = new_aeb->ec; aeb->pnum = new_aeb->pnum; aeb->copy_flag = new_vh->copy_flag; aeb->scrub = new_aeb->scrub; kmem_cache_free(ai->aeb_slab_cache, new_aeb); /* new_aeb is older */ } else { dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it", av->vol_id, aeb->lnum, new_aeb->pnum); list_add_tail(&new_aeb->u.list, &ai->erase); } return 0; } /* This LEB is new, let's add it to the volume */ if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) { av->highest_lnum = be32_to_cpu(new_vh->lnum); av->last_data_size = be32_to_cpu(new_vh->data_size); } if (av->vol_type == UBI_STATIC_VOLUME) av->used_ebs = be32_to_cpu(new_vh->used_ebs); av->leb_count++; rb_link_node(&new_aeb->u.rb, parent, p); rb_insert_color(&new_aeb->u.rb, &av->root); return 0; } /** * process_pool_aeb - we found a non-empty PEB in a pool. * @ubi: UBI device object * @ai: attach info object * @new_vh: the volume header derived from new_aeb * @new_aeb: the AEB to be examined * * Returns 0 on success, < 0 indicates an internal error. */ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai, struct ubi_vid_hdr *new_vh, struct ubi_ainf_peb *new_aeb) { struct ubi_ainf_volume *av, *tmp_av = NULL; struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; int found = 0; if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID || be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) { kmem_cache_free(ai->aeb_slab_cache, new_aeb); return 0; } /* Find the volume this SEB belongs to */ while (*p) { parent = *p; tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb); if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id) p = &(*p)->rb_left; else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id) p = &(*p)->rb_right; else { found = 1; break; } } if (found) av = tmp_av; else { ubi_err("orphaned volume in fastmap pool!"); return UBI_BAD_FASTMAP; } ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id); return update_vol(ubi, ai, av, new_vh, new_aeb); } /** * unmap_peb - unmap a PEB. * If fastmap detects a free PEB in the pool it has to check whether * this PEB has been unmapped after writing the fastmap. * * @ai: UBI attach info object * @pnum: The PEB to be unmapped */ static void unmap_peb(struct ubi_attach_info *ai, int pnum) { struct ubi_ainf_volume *av; struct rb_node *node, *node2; struct ubi_ainf_peb *aeb; for (node = rb_first(&ai->volumes); node; node = rb_next(node)) { av = rb_entry(node, struct ubi_ainf_volume, rb); for (node2 = rb_first(&av->root); node2; node2 = rb_next(node2)) { aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb); if (aeb->pnum == pnum) { rb_erase(&aeb->u.rb, &av->root); kmem_cache_free(ai->aeb_slab_cache, aeb); return; } } } } /** * scan_pool - scans a pool for changed (no longer empty PEBs). * @ubi: UBI device object * @ai: attach info object * @pebs: an array of all PEB numbers in the to be scanned pool * @pool_size: size of the pool (number of entries in @pebs) * @max_sqnum: pointer to the maximal sequence number * @eba_orphans: list of PEBs which need to be scanned * @free: list of PEBs which are most likely free (and go into @ai->free) * * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned. * < 0 indicates an internal error. */ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, int *pebs, int pool_size, unsigned long long *max_sqnum, struct list_head *eba_orphans, struct list_head *free) { struct ubi_vid_hdr *vh; struct ubi_ec_hdr *ech; struct ubi_ainf_peb *new_aeb, *tmp_aeb; int i, pnum, err, found_orphan, ret = 0; ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ech) return -ENOMEM; vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); if (!vh) { kfree(ech); return -ENOMEM; } dbg_bld("scanning fastmap pool: size = %i", pool_size); /* * Now scan all PEBs in the pool to find changes which have been made * after the creation of the fastmap */ for (i = 0; i < pool_size; i++) { int scrub = 0; pnum = be32_to_cpu(pebs[i]); if (ubi_io_is_bad(ubi, pnum)) { ubi_err("bad PEB in fastmap pool!"); ret = UBI_BAD_FASTMAP; goto out; } err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); if (err && err != UBI_IO_BITFLIPS) { ubi_err("unable to read EC header! PEB:%i err:%i", pnum, err); ret = err > 0 ? UBI_BAD_FASTMAP : err; goto out; } else if (ret == UBI_IO_BITFLIPS) scrub = 1; if (be32_to_cpu(ech->image_seq) != ubi->image_seq) { ubi_err("bad image seq: 0x%x, expected: 0x%x", be32_to_cpu(ech->image_seq), ubi->image_seq); err = UBI_BAD_FASTMAP; goto out; } err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) { unsigned long long ec = be64_to_cpu(ech->ec); unmap_peb(ai, pnum); dbg_bld("Adding PEB to free: %i", pnum); if (err == UBI_IO_FF_BITFLIPS) add_aeb(ai, free, pnum, ec, 1); else add_aeb(ai, free, pnum, ec, 0); continue; } else if (err == 0 || err == UBI_IO_BITFLIPS) { dbg_bld("Found non empty PEB:%i in pool", pnum); if (err == UBI_IO_BITFLIPS) scrub = 1; found_orphan = 0; list_for_each_entry(tmp_aeb, eba_orphans, u.list) { if (tmp_aeb->pnum == pnum) { found_orphan = 1; break; } } if (found_orphan) { kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); list_del(&tmp_aeb->u.list); } new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); if (!new_aeb) { ret = -ENOMEM; goto out; } new_aeb->ec = be64_to_cpu(ech->ec); new_aeb->pnum = pnum; new_aeb->lnum = be32_to_cpu(vh->lnum); new_aeb->sqnum = be64_to_cpu(vh->sqnum); new_aeb->copy_flag = vh->copy_flag; new_aeb->scrub = scrub; if (*max_sqnum < new_aeb->sqnum) *max_sqnum = new_aeb->sqnum; err = process_pool_aeb(ubi, ai, vh, new_aeb); if (err) { ret = err > 0 ? UBI_BAD_FASTMAP : err; goto out; } } else { /* We are paranoid and fall back to scanning mode */ ubi_err("fastmap pool PEBs contains damaged PEBs!"); ret = err > 0 ? UBI_BAD_FASTMAP : err; goto out; } } out: ubi_free_vid_hdr(ubi, vh); kfree(ech); return ret; } /** * count_fastmap_pebs - Counts the PEBs found by fastmap. * @ai: The UBI attach info object */ static int count_fastmap_pebs(struct ubi_attach_info *ai) { struct ubi_ainf_peb *aeb; struct ubi_ainf_volume *av; struct rb_node *rb1, *rb2; int n = 0; list_for_each_entry(aeb, &ai->erase, u.list) n++; list_for_each_entry(aeb, &ai->free, u.list) n++; ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) n++; return n; } /** * ubi_attach_fastmap - creates ubi_attach_info from a fastmap. * @ubi: UBI device object * @ai: UBI attach info object * @fm: the fastmap to be attached * * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable. * < 0 indicates an internal error. */ static int ubi_attach_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, struct ubi_fastmap_layout *fm) { struct list_head used, eba_orphans, free; struct ubi_ainf_volume *av; struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb; struct ubi_ec_hdr *ech; struct ubi_fm_sb *fmsb; struct ubi_fm_hdr *fmhdr; struct ubi_fm_scan_pool *fmpl1, *fmpl2; struct ubi_fm_ec *fmec; struct ubi_fm_volhdr *fmvhdr; struct ubi_fm_eba *fm_eba; int ret, i, j, pool_size, wl_pool_size; size_t fm_pos = 0, fm_size = ubi->fm_size; unsigned long long max_sqnum = 0; void *fm_raw = ubi->fm_buf; INIT_LIST_HEAD(&used); INIT_LIST_HEAD(&free); INIT_LIST_HEAD(&eba_orphans); INIT_LIST_HEAD(&ai->corr); INIT_LIST_HEAD(&ai->free); INIT_LIST_HEAD(&ai->erase); INIT_LIST_HEAD(&ai->alien); ai->volumes = RB_ROOT; ai->min_ec = UBI_MAX_ERASECOUNTER; ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab", sizeof(struct ubi_ainf_peb), 0, 0, NULL); if (!ai->aeb_slab_cache) { ret = -ENOMEM; goto fail; } fmsb = (struct ubi_fm_sb *)(fm_raw); ai->max_sqnum = fmsb->sqnum; fm_pos += sizeof(struct ubi_fm_sb); if (fm_pos >= fm_size) goto fail_bad; fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos); fm_pos += sizeof(*fmhdr); if (fm_pos >= fm_size) goto fail_bad; if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) { ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x", be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC); goto fail_bad; } fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); fm_pos += sizeof(*fmpl1); if (fm_pos >= fm_size) goto fail_bad; if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) { ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC); goto fail_bad; } fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); fm_pos += sizeof(*fmpl2); if (fm_pos >= fm_size) goto fail_bad; if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) { ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC); goto fail_bad; } pool_size = be16_to_cpu(fmpl1->size); wl_pool_size = be16_to_cpu(fmpl2->size); fm->max_pool_size = be16_to_cpu(fmpl1->max_size); fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size); if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { ubi_err("bad pool size: %i", pool_size); goto fail_bad; } if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) { ubi_err("bad WL pool size: %i", wl_pool_size); goto fail_bad; } if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE || fm->max_pool_size < 0) { ubi_err("bad maximal pool size: %i", fm->max_pool_size); goto fail_bad; } if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE || fm->max_wl_pool_size < 0) { ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size); goto fail_bad; } /* read EC values from free list */ for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) { fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fm_pos += sizeof(*fmec); if (fm_pos >= fm_size) goto fail_bad; add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum), be32_to_cpu(fmec->ec), 0); } /* read EC values from used list */ for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) { fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fm_pos += sizeof(*fmec); if (fm_pos >= fm_size) goto fail_bad; add_aeb(ai, &used, be32_to_cpu(fmec->pnum), be32_to_cpu(fmec->ec), 0); } /* read EC values from scrub list */ for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) { fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fm_pos += sizeof(*fmec); if (fm_pos >= fm_size) goto fail_bad; add_aeb(ai, &used, be32_to_cpu(fmec->pnum), be32_to_cpu(fmec->ec), 1); } /* read EC values from erase list */ for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) { fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fm_pos += sizeof(*fmec); if (fm_pos >= fm_size) goto fail_bad; add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum), be32_to_cpu(fmec->ec), 1); } ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count); /* Iterate over all volumes and read their EBA table */ for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) { fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); fm_pos += sizeof(*fmvhdr); if (fm_pos >= fm_size) goto fail_bad; if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) { ubi_err("bad fastmap vol header magic: 0x%x, " \ "expected: 0x%x", be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC); goto fail_bad; } av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id), be32_to_cpu(fmvhdr->used_ebs), be32_to_cpu(fmvhdr->data_pad), fmvhdr->vol_type, be32_to_cpu(fmvhdr->last_eb_bytes)); if (!av) goto fail_bad; ai->vols_found++; if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id)) ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id); fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos); fm_pos += sizeof(*fm_eba); fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs)); if (fm_pos >= fm_size) goto fail_bad; if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) { ubi_err("bad fastmap EBA header magic: 0x%x, " \ "expected: 0x%x", be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC); goto fail_bad; } for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) { int pnum = be32_to_cpu(fm_eba->pnum[j]); if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0) continue; aeb = NULL; list_for_each_entry(tmp_aeb, &used, u.list) { if (tmp_aeb->pnum == pnum) aeb = tmp_aeb; } /* This can happen if a PEB is already in an EBA known * by this fastmap but the PEB itself is not in the used * list. * In this case the PEB can be within the fastmap pool * or while writing the fastmap it was in the protection * queue. */ if (!aeb) { aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); if (!aeb) { ret = -ENOMEM; goto fail; } aeb->lnum = j; aeb->pnum = be32_to_cpu(fm_eba->pnum[j]); aeb->ec = -1; aeb->scrub = aeb->copy_flag = aeb->sqnum = 0; list_add_tail(&aeb->u.list, &eba_orphans); continue; } aeb->lnum = j; if (av->highest_lnum <= aeb->lnum) av->highest_lnum = aeb->lnum; assign_aeb_to_av(ai, aeb, av); dbg_bld("inserting PEB:%i (LEB %i) to vol %i", aeb->pnum, aeb->lnum, av->vol_id); } ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ech) { ret = -ENOMEM; goto fail; } list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) { int err; if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) { ubi_err("bad PEB in fastmap EBA orphan list"); ret = UBI_BAD_FASTMAP; kfree(ech); goto fail; } err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0); if (err && err != UBI_IO_BITFLIPS) { ubi_err("unable to read EC header! PEB:%i " \ "err:%i", tmp_aeb->pnum, err); ret = err > 0 ? UBI_BAD_FASTMAP : err; kfree(ech); goto fail; } else if (err == UBI_IO_BITFLIPS) tmp_aeb->scrub = 1; tmp_aeb->ec = be64_to_cpu(ech->ec); assign_aeb_to_av(ai, tmp_aeb, av); } kfree(ech); } ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, &eba_orphans, &free); if (ret) goto fail; ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, &eba_orphans, &free); if (ret) goto fail; if (max_sqnum > ai->max_sqnum) ai->max_sqnum = max_sqnum; list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) list_move_tail(&tmp_aeb->u.list, &ai->free); /* * If fastmap is leaking PEBs (must not happen), raise a * fat warning and fall back to scanning mode. * We do this here because in ubi_wl_init() it's too late * and we cannot fall back to scanning. */ if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count - ai->bad_peb_count - fm->used_blocks)) goto fail_bad; return 0; fail_bad: ret = UBI_BAD_FASTMAP; fail: return ret; } /** * ubi_scan_fastmap - scan the fastmap. * @ubi: UBI device object * @ai: UBI attach info to be filled * @fm_anchor: The fastmap starts at this PEB * * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found, * UBI_BAD_FASTMAP if one was found but is not usable. * < 0 indicates an internal error. */ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, int fm_anchor) { struct ubi_fm_sb *fmsb, *fmsb2; struct ubi_vid_hdr *vh; struct ubi_ec_hdr *ech; struct ubi_fastmap_layout *fm; int i, used_blocks, pnum, ret = 0; size_t fm_size; __be32 crc, tmp_crc; unsigned long long sqnum = 0; mutex_lock(&ubi->fm_mutex); memset(ubi->fm_buf, 0, ubi->fm_size); fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL); if (!fmsb) { ret = -ENOMEM; goto out; } fm = kzalloc(sizeof(*fm), GFP_KERNEL); if (!fm) { ret = -ENOMEM; kfree(fmsb); goto out; } ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb)); if (ret && ret != UBI_IO_BITFLIPS) goto free_fm_sb; else if (ret == UBI_IO_BITFLIPS) fm->to_be_tortured[0] = 1; if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) { ubi_err("bad super block magic: 0x%x, expected: 0x%x", be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC); ret = UBI_BAD_FASTMAP; goto free_fm_sb; } if (fmsb->version != UBI_FM_FMT_VERSION) { ubi_err("bad fastmap version: %i, expected: %i", fmsb->version, UBI_FM_FMT_VERSION); ret = UBI_BAD_FASTMAP; goto free_fm_sb; } used_blocks = be32_to_cpu(fmsb->used_blocks); if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) { ubi_err("number of fastmap blocks is invalid: %i", used_blocks); ret = UBI_BAD_FASTMAP; goto free_fm_sb; } fm_size = ubi->leb_size * used_blocks; if (fm_size != ubi->fm_size) { ubi_err("bad fastmap size: %zi, expected: %zi", fm_size, ubi->fm_size); ret = UBI_BAD_FASTMAP; goto free_fm_sb; } ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ech) { ret = -ENOMEM; goto free_fm_sb; } vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); if (!vh) { ret = -ENOMEM; goto free_hdr; } for (i = 0; i < used_blocks; i++) { pnum = be32_to_cpu(fmsb->block_loc[i]); if (ubi_io_is_bad(ubi, pnum)) { ret = UBI_BAD_FASTMAP; goto free_hdr; } ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); if (ret && ret != UBI_IO_BITFLIPS) { ubi_err("unable to read fastmap block# %i EC (PEB: %i)", i, pnum); if (ret > 0) ret = UBI_BAD_FASTMAP; goto free_hdr; } else if (ret == UBI_IO_BITFLIPS) fm->to_be_tortured[i] = 1; if (!ubi->image_seq) ubi->image_seq = be32_to_cpu(ech->image_seq); if (be32_to_cpu(ech->image_seq) != ubi->image_seq) { ret = UBI_BAD_FASTMAP; goto free_hdr; } ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); if (ret && ret != UBI_IO_BITFLIPS) { ubi_err("unable to read fastmap block# %i (PEB: %i)", i, pnum); goto free_hdr; } if (i == 0) { if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) { ubi_err("bad fastmap anchor vol_id: 0x%x," \ " expected: 0x%x", be32_to_cpu(vh->vol_id), UBI_FM_SB_VOLUME_ID); ret = UBI_BAD_FASTMAP; goto free_hdr; } } else { if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) { ubi_err("bad fastmap data vol_id: 0x%x," \ " expected: 0x%x", be32_to_cpu(vh->vol_id), UBI_FM_DATA_VOLUME_ID); ret = UBI_BAD_FASTMAP; goto free_hdr; } } if (sqnum < be64_to_cpu(vh->sqnum)) sqnum = be64_to_cpu(vh->sqnum); ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum, ubi->leb_start, ubi->leb_size); if (ret && ret != UBI_IO_BITFLIPS) { ubi_err("unable to read fastmap block# %i (PEB: %i, " \ "err: %i)", i, pnum, ret); goto free_hdr; } } kfree(fmsb); fmsb = NULL; fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf); tmp_crc = be32_to_cpu(fmsb2->data_crc); fmsb2->data_crc = 0; crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); if (crc != tmp_crc) { ubi_err("fastmap data CRC is invalid"); ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc); ret = UBI_BAD_FASTMAP; goto free_hdr; } fmsb2->sqnum = sqnum; fm->used_blocks = used_blocks; ret = ubi_attach_fastmap(ubi, ai, fm); if (ret) { if (ret > 0) ret = UBI_BAD_FASTMAP; goto free_hdr; } for (i = 0; i < used_blocks; i++) { struct ubi_wl_entry *e; e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) { while (i--) kfree(fm->e[i]); ret = -ENOMEM; goto free_hdr; } e->pnum = be32_to_cpu(fmsb2->block_loc[i]); e->ec = be32_to_cpu(fmsb2->block_ec[i]); fm->e[i] = e; } ubi->fm = fm; ubi->fm_pool.max_size = ubi->fm->max_pool_size; ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size; ubi_msg("attached by fastmap"); ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size); ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); ubi->fm_disabled = 0; ubi_free_vid_hdr(ubi, vh); kfree(ech); out: mutex_unlock(&ubi->fm_mutex); if (ret == UBI_BAD_FASTMAP) ubi_err("Attach by fastmap failed, doing a full scan!"); return ret; free_hdr: ubi_free_vid_hdr(ubi, vh); kfree(ech); free_fm_sb: kfree(fmsb); kfree(fm); goto out; } /** * ubi_write_fastmap - writes a fastmap. * @ubi: UBI device object * @new_fm: the to be written fastmap * * Returns 0 on success, < 0 indicates an internal error. */ static int ubi_write_fastmap(struct ubi_device *ubi, struct ubi_fastmap_layout *new_fm) { size_t fm_pos = 0; void *fm_raw; struct ubi_fm_sb *fmsb; struct ubi_fm_hdr *fmh; struct ubi_fm_scan_pool *fmpl1, *fmpl2; struct ubi_fm_ec *fec; struct ubi_fm_volhdr *fvh; struct ubi_fm_eba *feba; struct rb_node *node; struct ubi_wl_entry *wl_e; struct ubi_volume *vol; struct ubi_vid_hdr *avhdr, *dvhdr; struct ubi_work *ubi_wrk; int ret, i, j, free_peb_count, used_peb_count, vol_count; int scrub_peb_count, erase_peb_count; fm_raw = ubi->fm_buf; memset(ubi->fm_buf, 0, ubi->fm_size); avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); if (!avhdr) { ret = -ENOMEM; goto out; } dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID); if (!dvhdr) { ret = -ENOMEM; goto out_kfree; } spin_lock(&ubi->volumes_lock); spin_lock(&ubi->wl_lock); fmsb = (struct ubi_fm_sb *)fm_raw; fm_pos += sizeof(*fmsb); ubi_assert(fm_pos <= ubi->fm_size); fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos); fm_pos += sizeof(*fmh); ubi_assert(fm_pos <= ubi->fm_size); fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC); fmsb->version = UBI_FM_FMT_VERSION; fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks); /* the max sqnum will be filled in while *reading* the fastmap */ fmsb->sqnum = 0; fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC); free_peb_count = 0; used_peb_count = 0; scrub_peb_count = 0; erase_peb_count = 0; vol_count = 0; fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); fm_pos += sizeof(*fmpl1); fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); fmpl1->size = cpu_to_be16(ubi->fm_pool.size); fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size); for (i = 0; i < ubi->fm_pool.size; i++) fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]); fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); fm_pos += sizeof(*fmpl2); fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size); fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size); for (i = 0; i < ubi->fm_wl_pool.size; i++) fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]); for (node = rb_first(&ubi->free); node; node = rb_next(node)) { wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fec->pnum = cpu_to_be32(wl_e->pnum); fec->ec = cpu_to_be32(wl_e->ec); free_peb_count++; fm_pos += sizeof(*fec); ubi_assert(fm_pos <= ubi->fm_size); } fmh->free_peb_count = cpu_to_be32(free_peb_count); for (node = rb_first(&ubi->used); node; node = rb_next(node)) { wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fec->pnum = cpu_to_be32(wl_e->pnum); fec->ec = cpu_to_be32(wl_e->ec); used_peb_count++; fm_pos += sizeof(*fec); ubi_assert(fm_pos <= ubi->fm_size); } fmh->used_peb_count = cpu_to_be32(used_peb_count); for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) { wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fec->pnum = cpu_to_be32(wl_e->pnum); fec->ec = cpu_to_be32(wl_e->ec); scrub_peb_count++; fm_pos += sizeof(*fec); ubi_assert(fm_pos <= ubi->fm_size); } fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count); list_for_each_entry(ubi_wrk, &ubi->works, list) { if (ubi_is_erase_work(ubi_wrk)) { wl_e = ubi_wrk->e; ubi_assert(wl_e); fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); fec->pnum = cpu_to_be32(wl_e->pnum); fec->ec = cpu_to_be32(wl_e->ec); erase_peb_count++; fm_pos += sizeof(*fec); ubi_assert(fm_pos <= ubi->fm_size); } } fmh->erase_peb_count = cpu_to_be32(erase_peb_count); for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) { vol = ubi->volumes[i]; if (!vol) continue; vol_count++; fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); fm_pos += sizeof(*fvh); ubi_assert(fm_pos <= ubi->fm_size); fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC); fvh->vol_id = cpu_to_be32(vol->vol_id); fvh->vol_type = vol->vol_type; fvh->used_ebs = cpu_to_be32(vol->used_ebs); fvh->data_pad = cpu_to_be32(vol->data_pad); fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes); ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME || vol->vol_type == UBI_STATIC_VOLUME); feba = (struct ubi_fm_eba *)(fm_raw + fm_pos); fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs); ubi_assert(fm_pos <= ubi->fm_size); for (j = 0; j < vol->reserved_pebs; j++) feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]); feba->reserved_pebs = cpu_to_be32(j); feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC); } fmh->vol_count = cpu_to_be32(vol_count); fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count); avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); avhdr->lnum = 0; spin_unlock(&ubi->wl_lock); spin_unlock(&ubi->volumes_lock); dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr); if (ret) { ubi_err("unable to write vid_hdr to fastmap SB!"); goto out_kfree; } for (i = 0; i < new_fm->used_blocks; i++) { fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum); fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec); } fmsb->data_crc = 0; fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw, ubi->fm_size)); for (i = 1; i < new_fm->used_blocks; i++) { dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); dvhdr->lnum = cpu_to_be32(i); dbg_bld("writing fastmap data to PEB %i sqnum %llu", new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr); if (ret) { ubi_err("unable to write vid_hdr to PEB %i!", new_fm->e[i]->pnum); goto out_kfree; } } for (i = 0; i < new_fm->used_blocks; i++) { ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size), new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size); if (ret) { ubi_err("unable to write fastmap to PEB %i!", new_fm->e[i]->pnum); goto out_kfree; } } ubi_assert(new_fm); ubi->fm = new_fm; dbg_bld("fastmap written!"); out_kfree: ubi_free_vid_hdr(ubi, avhdr); ubi_free_vid_hdr(ubi, dvhdr); out: return ret; } /** * erase_block - Manually erase a PEB. * @ubi: UBI device object * @pnum: PEB to be erased * * Returns the new EC value on success, < 0 indicates an internal error. */ static int erase_block(struct ubi_device *ubi, int pnum) { int ret; struct ubi_ec_hdr *ec_hdr; long long ec; ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ec_hdr) return -ENOMEM; ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); if (ret < 0) goto out; else if (ret && ret != UBI_IO_BITFLIPS) { ret = -EINVAL; goto out; } ret = ubi_io_sync_erase(ubi, pnum, 0); if (ret < 0) goto out; ec = be64_to_cpu(ec_hdr->ec); ec += ret; if (ec > UBI_MAX_ERASECOUNTER) { ret = -EINVAL; goto out; } ec_hdr->ec = cpu_to_be64(ec); ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); if (ret < 0) goto out; ret = ec; out: kfree(ec_hdr); return ret; } /** * invalidate_fastmap - destroys a fastmap. * @ubi: UBI device object * @fm: the fastmap to be destroyed * * Returns 0 on success, < 0 indicates an internal error. */ static int invalidate_fastmap(struct ubi_device *ubi, struct ubi_fastmap_layout *fm) { int ret, i; struct ubi_vid_hdr *vh; ret = erase_block(ubi, fm->e[0]->pnum); if (ret < 0) return ret; vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); if (!vh) return -ENOMEM; /* deleting the current fastmap SB is not enough, an old SB may exist, * so create a (corrupted) SB such that fastmap will find it and fall * back to scanning mode in any case */ vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh); for (i = 0; i < fm->used_blocks; i++) ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]); return ret; } /** * ubi_update_fastmap - will be called by UBI if a volume changes or * a fastmap pool becomes full. * @ubi: UBI device object * * Returns 0 on success, < 0 indicates an internal error. */ int ubi_update_fastmap(struct ubi_device *ubi) { int ret, i; struct ubi_fastmap_layout *new_fm, *old_fm; struct ubi_wl_entry *tmp_e; mutex_lock(&ubi->fm_mutex); ubi_refill_pools(ubi); if (ubi->ro_mode || ubi->fm_disabled) { mutex_unlock(&ubi->fm_mutex); return 0; } ret = ubi_ensure_anchor_pebs(ubi); if (ret) { mutex_unlock(&ubi->fm_mutex); return ret; } new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); if (!new_fm) { mutex_unlock(&ubi->fm_mutex); return -ENOMEM; } new_fm->used_blocks = ubi->fm_size / ubi->leb_size; for (i = 0; i < new_fm->used_blocks; i++) { new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!new_fm->e[i]) { while (i--) kfree(new_fm->e[i]); kfree(new_fm); mutex_unlock(&ubi->fm_mutex); return -ENOMEM; } } old_fm = ubi->fm; ubi->fm = NULL; if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) { ubi_err("fastmap too large"); ret = -ENOSPC; goto err; } for (i = 1; i < new_fm->used_blocks; i++) { spin_lock(&ubi->wl_lock); tmp_e = ubi_wl_get_fm_peb(ubi, 0); spin_unlock(&ubi->wl_lock); if (!tmp_e && !old_fm) { int j; ubi_err("could not get any free erase block"); for (j = 1; j < i; j++) ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); ret = -ENOSPC; goto err; } else if (!tmp_e && old_fm) { ret = erase_block(ubi, old_fm->e[i]->pnum); if (ret < 0) { int j; for (j = 1; j < i; j++) ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); ubi_err("could not erase old fastmap PEB"); goto err; } new_fm->e[i]->pnum = old_fm->e[i]->pnum; new_fm->e[i]->ec = old_fm->e[i]->ec; } else { new_fm->e[i]->pnum = tmp_e->pnum; new_fm->e[i]->ec = tmp_e->ec; if (old_fm) ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, old_fm->to_be_tortured[i]); } } spin_lock(&ubi->wl_lock); tmp_e = ubi_wl_get_fm_peb(ubi, 1); spin_unlock(&ubi->wl_lock); if (old_fm) { /* no fresh anchor PEB was found, reuse the old one */ if (!tmp_e) { ret = erase_block(ubi, old_fm->e[0]->pnum); if (ret < 0) { int i; ubi_err("could not erase old anchor PEB"); for (i = 1; i < new_fm->used_blocks; i++) ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); goto err; } new_fm->e[0]->pnum = old_fm->e[0]->pnum; new_fm->e[0]->ec = ret; } else { /* we've got a new anchor PEB, return the old one */ ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0, old_fm->to_be_tortured[0]); new_fm->e[0]->pnum = tmp_e->pnum; new_fm->e[0]->ec = tmp_e->ec; } } else { if (!tmp_e) { int i; ubi_err("could not find any anchor PEB"); for (i = 1; i < new_fm->used_blocks; i++) ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); ret = -ENOSPC; goto err; } new_fm->e[0]->pnum = tmp_e->pnum; new_fm->e[0]->ec = tmp_e->ec; } down_write(&ubi->work_sem); down_write(&ubi->fm_sem); ret = ubi_write_fastmap(ubi, new_fm); up_write(&ubi->fm_sem); up_write(&ubi->work_sem); if (ret) goto err; out_unlock: mutex_unlock(&ubi->fm_mutex); kfree(old_fm); return ret; err: kfree(new_fm); ubi_warn("Unable to write new fastmap, err=%i", ret); ret = 0; if (old_fm) { ret = invalidate_fastmap(ubi, old_fm); if (ret < 0) ubi_err("Unable to invalidiate current fastmap!"); else if (ret) ret = 0; } goto out_unlock; }
gpl-2.0
mrimp/SM-N910T_Kernel
drivers/scsi/be2iscsi/be_mgmt.c
1391
42179
/** * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) * * Contact Information: * linux-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ #include <linux/bsg-lib.h> #include <scsi/scsi_transport_iscsi.h> #include <scsi/scsi_bsg_iscsi.h> #include "be_mgmt.h" #include "be_iscsi.h" #include "be_main.h" /* UE Status Low CSR */ static const char * const desc_ue_status_low[] = { "CEV", "CTX", "DBUF", "ERX", "Host", "MPU", "NDMA", "PTC ", "RDMA ", "RXF ", "RXIPS ", "RXULP0 ", "RXULP1 ", "RXULP2 ", "TIM ", "TPOST ", "TPRE ", "TXIPS ", "TXULP0 ", "TXULP1 ", "UC ", "WDMA ", "TXULP2 ", "HOST1 ", "P0_OB_LINK ", "P1_OB_LINK ", "HOST_GPIO ", "MBOX ", "AXGMAC0", "AXGMAC1", "JTAG", "MPU_INTPEND" }; /* UE Status High CSR */ static const char * const desc_ue_status_hi[] = { "LPCMEMHOST", "MGMT_MAC", "PCS0ONLINE", "MPU_IRAM", "PCS1ONLINE", "PCTL0", "PCTL1", "PMEM", "RR", "TXPB", "RXPP", "XAUI", "TXP", "ARM", "IPC", "HOST2", "HOST3", "HOST4", "HOST5", "HOST6", "HOST7", "HOST8", "HOST9", "NETC", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown" }; /* * beiscsi_ue_detec()- Detect Unrecoverable Error on adapter * @phba: Driver priv structure * * Read registers linked to UE and check for the UE status **/ void beiscsi_ue_detect(struct beiscsi_hba *phba) { uint32_t ue_hi = 0, ue_lo = 0; uint32_t ue_mask_hi = 0, ue_mask_lo = 0; uint8_t i = 0; if (phba->ue_detected) return; pci_read_config_dword(phba->pcidev, PCICFG_UE_STATUS_LOW, &ue_lo); pci_read_config_dword(phba->pcidev, PCICFG_UE_STATUS_MASK_LOW, &ue_mask_lo); pci_read_config_dword(phba->pcidev, PCICFG_UE_STATUS_HIGH, &ue_hi); pci_read_config_dword(phba->pcidev, PCICFG_UE_STATUS_MASK_HI, &ue_mask_hi); ue_lo = (ue_lo & ~ue_mask_lo); ue_hi = (ue_hi & ~ue_mask_hi); if (ue_lo || ue_hi) { phba->ue_detected = true; beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BG_%d : Error detected on the adapter\n"); } if (ue_lo) { for (i = 0; ue_lo; ue_lo >>= 1, i++) { if (ue_lo & 1) beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BG_%d : UE_LOW %s bit set\n", desc_ue_status_low[i]); } } if (ue_hi) { for (i = 0; ue_hi; ue_hi >>= 1, i++) { if (ue_hi & 1) beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BG_%d : UE_HIGH %s bit set\n", desc_ue_status_hi[i]); } } } /** * mgmt_reopen_session()- Reopen a session based on reopen_type * @phba: Device priv structure instance * @reopen_type: Type of reopen_session FW should do. * @sess_handle: Session Handle of the session to be re-opened * * return * the TAG used for MBOX Command * **/ unsigned int mgmt_reopen_session(struct beiscsi_hba *phba, unsigned int reopen_type, unsigned int sess_handle) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct be_cmd_reopen_session_req *req; unsigned int tag = 0; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BG_%d : In bescsi_get_boot_target\n"); spin_lock(&ctrl->mbox_lock); tag = alloc_mcc_tag(phba); if (!tag) { spin_unlock(&ctrl->mbox_lock); return tag; } wrb = wrb_from_mccq(phba); req = embedded_payload(wrb); wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS, sizeof(struct be_cmd_reopen_session_resp)); /* set the reopen_type,sess_handle */ req->reopen_type = reopen_type; req->session_handle = sess_handle; be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); return tag; } unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct be_cmd_get_boot_target_req *req; unsigned int tag = 0; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BG_%d : In bescsi_get_boot_target\n"); spin_lock(&ctrl->mbox_lock); tag = alloc_mcc_tag(phba); if (!tag) { spin_unlock(&ctrl->mbox_lock); return tag; } wrb = wrb_from_mccq(phba); req = embedded_payload(wrb); wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET, sizeof(struct be_cmd_get_boot_target_resp)); be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); return tag; } unsigned int mgmt_get_session_info(struct beiscsi_hba *phba, u32 boot_session_handle, struct be_dma_mem *nonemb_cmd) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; unsigned int tag = 0; struct be_cmd_get_session_req *req; struct be_cmd_get_session_resp *resp; struct be_sge *sge; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BG_%d : In beiscsi_get_session_info\n"); spin_lock(&ctrl->mbox_lock); tag = alloc_mcc_tag(phba); if (!tag) { spin_unlock(&ctrl->mbox_lock); return tag; } nonemb_cmd->size = sizeof(*resp); req = nonemb_cmd->va; memset(req, 0, sizeof(*req)); wrb = wrb_from_mccq(phba); sge = nonembedded_sgl(wrb); wrb->tag0 |= tag; wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_SESSION_GET_A_SESSION, sizeof(*resp)); req->session_handle = boot_session_handle; sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd->size); be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); return tag; } int mgmt_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_fw_cfg *req = embedded_payload(wrb); int status = 0; spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); status = be_mbox_notify(ctrl); if (!status) { struct be_fw_cfg *pfw_cfg; pfw_cfg = req; phba->fw_config.phys_port = pfw_cfg->phys_port; phba->fw_config.iscsi_icd_start = pfw_cfg->ulp[0].icd_base; phba->fw_config.iscsi_icd_count = pfw_cfg->ulp[0].icd_count; phba->fw_config.iscsi_cid_start = pfw_cfg->ulp[0].sq_base; phba->fw_config.iscsi_cid_count = pfw_cfg->ulp[0].sq_count; if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BG_%d : FW reported MAX CXNS as %d\t" "Max Supported = %d.\n", phba->fw_config.iscsi_cid_count, BE2_MAX_SESSIONS); phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2; } } else { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, "BG_%d : Failed in mgmt_get_fw_config\n"); } spin_unlock(&ctrl->mbox_lock); return status; } int mgmt_check_supported_fw(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba) { struct be_dma_mem nonemb_cmd; struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_mgmt_controller_attributes *req; struct be_sge *sge = nonembedded_sgl(wrb); int status = 0; nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev, sizeof(struct be_mgmt_controller_attributes), &nonemb_cmd.dma); if (nonemb_cmd.va == NULL) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : Failed to allocate memory for " "mgmt_check_supported_fw\n"); return -ENOMEM; } nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes); req = nonemb_cmd.va; memset(req, 0, sizeof(*req)); spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req)); sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd.size); status = be_mbox_notify(ctrl); if (!status) { struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BG_%d : Firmware Version of CMD : %s\n" "Firmware Version is : %s\n" "Developer Build, not performing version check...\n", resp->params.hba_attribs .flashrom_version_string, resp->params.hba_attribs. firmware_version_string); phba->fw_config.iscsi_features = resp->params.hba_attribs.iscsi_features; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : phba->fw_config.iscsi_features = %d\n", phba->fw_config.iscsi_features); memcpy(phba->fw_ver_str, resp->params.hba_attribs. firmware_version_string, BEISCSI_VER_STRLEN); } else beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : Failed in mgmt_check_supported_fw\n"); spin_unlock(&ctrl->mbox_lock); if (nonemb_cmd.va) pci_free_consistent(ctrl->pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return status; } unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba, struct bsg_job *job, struct be_dma_mem *nonemb_cmd) { struct be_cmd_resp_hdr *resp; struct be_mcc_wrb *wrb = wrb_from_mccq(phba); struct be_sge *mcc_sge = nonembedded_sgl(wrb); unsigned int tag = 0; struct iscsi_bsg_request *bsg_req = job->request; struct be_bsg_vendor_cmd *req = nonemb_cmd->va; unsigned short region, sector_size, sector, offset; nonemb_cmd->size = job->request_payload.payload_len; memset(nonemb_cmd->va, 0, nonemb_cmd->size); resp = nonemb_cmd->va; region = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; sector_size = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; sector = bsg_req->rqst_data.h_vendor.vendor_cmd[3]; offset = bsg_req->rqst_data.h_vendor.vendor_cmd[4]; req->region = region; req->sector = sector; req->offset = offset; spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { case BEISCSI_WRITE_FLASH: offset = sector * sector_size + offset; be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_WRITE_FLASH, sizeof(*req)); sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, nonemb_cmd->va + offset, job->request_len); break; case BEISCSI_READ_FLASH: be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_READ_FLASH, sizeof(*req)); break; default: beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Unsupported cmd = 0x%x\n\n", bsg_req->rqst_data.h_vendor.vendor_cmd[0]); spin_unlock(&ctrl->mbox_lock); return -ENOSYS; } tag = alloc_mcc_tag(phba); if (!tag) { spin_unlock(&ctrl->mbox_lock); return tag; } be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, job->request_payload.sg_cnt); mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); mcc_sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); mcc_sge->len = cpu_to_le32(nonemb_cmd->size); wrb->tag0 |= tag; be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); return tag; } int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb = wrb_from_mccq(phba); struct iscsi_cleanup_req *req = embedded_payload(wrb); int status = 0; spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); req->chute = chute; req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba)); req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba)); status = be_mcc_notify_wait(phba); if (status) beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, "BG_%d : mgmt_epfw_cleanup , FAILED\n"); spin_unlock(&ctrl->mbox_lock); return status; } unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba, struct invalidate_command_table *inv_tbl, unsigned int num_invalidate, unsigned int cid, struct be_dma_mem *nonemb_cmd) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct be_sge *sge; struct invalidate_commands_params_in *req; unsigned int i, tag = 0; spin_lock(&ctrl->mbox_lock); tag = alloc_mcc_tag(phba); if (!tag) { spin_unlock(&ctrl->mbox_lock); return tag; } req = nonemb_cmd->va; memset(req, 0, sizeof(*req)); wrb = wrb_from_mccq(phba); sge = nonembedded_sgl(wrb); wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS, sizeof(*req)); req->ref_handle = 0; req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE; for (i = 0; i < num_invalidate; i++) { req->table[i].icd = inv_tbl->icd; req->table[i].cid = inv_tbl->cid; req->icd_count++; inv_tbl++; } sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd->size); be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); return tag; } unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba, struct beiscsi_endpoint *beiscsi_ep, unsigned short cid, unsigned short issue_reset, unsigned short savecfg_flag) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct iscsi_invalidate_connection_params_in *req; unsigned int tag = 0; spin_lock(&ctrl->mbox_lock); tag = alloc_mcc_tag(phba); if (!tag) { spin_unlock(&ctrl->mbox_lock); return tag; } wrb = wrb_from_mccq(phba); wrb->tag0 |= tag; req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION, sizeof(*req)); req->session_handle = beiscsi_ep->fw_handle; req->cid = cid; if (issue_reset) req->cleanup_type = CMD_ISCSI_CONNECTION_ISSUE_TCP_RST; else req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE; req->save_cfg = savecfg_flag; be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); return tag; } unsigned int mgmt_upload_connection(struct beiscsi_hba *phba, unsigned short cid, unsigned int upload_flag) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct tcp_upload_params_in *req; unsigned int tag = 0; spin_lock(&ctrl->mbox_lock); tag = alloc_mcc_tag(phba); if (!tag) { spin_unlock(&ctrl->mbox_lock); return tag; } wrb = wrb_from_mccq(phba); req = embedded_payload(wrb); wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD, OPCODE_COMMON_TCP_UPLOAD, sizeof(*req)); req->id = (unsigned short)cid; req->upload_type = (unsigned char)upload_flag; be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); return tag; } int mgmt_open_connection(struct beiscsi_hba *phba, struct sockaddr *dst_addr, struct beiscsi_endpoint *beiscsi_ep, struct be_dma_mem *nonemb_cmd) { struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr; struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct tcp_connect_and_offload_in *req; unsigned short def_hdr_id; unsigned short def_data_id; struct phys_addr template_address = { 0, 0 }; struct phys_addr *ptemplate_address; unsigned int tag = 0; unsigned int i; unsigned short cid = beiscsi_ep->ep_cid; struct be_sge *sge; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba); def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba); ptemplate_address = &template_address; ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address); spin_lock(&ctrl->mbox_lock); tag = alloc_mcc_tag(phba); if (!tag) { spin_unlock(&ctrl->mbox_lock); return tag; } wrb = wrb_from_mccq(phba); memset(wrb, 0, sizeof(*wrb)); sge = nonembedded_sgl(wrb); req = nonemb_cmd->va; memset(req, 0, sizeof(*req)); wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, sizeof(*req)); if (dst_addr->sa_family == PF_INET) { __be32 s_addr = daddr_in->sin_addr.s_addr; req->ip_address.ip_type = BE2_IPV4; req->ip_address.addr[0] = s_addr & 0x000000ff; req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8; req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16; req->ip_address.addr[3] = (s_addr & 0xff000000) >> 24; req->tcp_port = ntohs(daddr_in->sin_port); beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr; beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port); beiscsi_ep->ip_type = BE2_IPV4; } else if (dst_addr->sa_family == PF_INET6) { req->ip_address.ip_type = BE2_IPV6; memcpy(&req->ip_address.addr, &daddr_in6->sin6_addr.in6_u.u6_addr8, 16); req->tcp_port = ntohs(daddr_in6->sin6_port); beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port); memcpy(&beiscsi_ep->dst6_addr, &daddr_in6->sin6_addr.in6_u.u6_addr8, 16); beiscsi_ep->ip_type = BE2_IPV6; } else{ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BG_%d : unknown addr family %d\n", dst_addr->sa_family); spin_unlock(&ctrl->mbox_lock); free_mcc_tag(&phba->ctrl, tag); return -EINVAL; } req->cid = cid; i = phba->nxt_cqid++; if (phba->nxt_cqid == phba->num_cpus) phba->nxt_cqid = 0; req->cq_id = phwi_context->be_cq[i].id; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BG_%d : i=%d cq_id=%d\n", i, req->cq_id); req->defq_id = def_hdr_id; req->hdr_ring_id = def_hdr_id; req->data_ring_id = def_data_id; req->do_offload = 1; req->dataout_template_pa.lo = ptemplate_address->lo; req->dataout_template_pa.hi = ptemplate_address->hi; sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd->size); be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); return tag; } unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_cmd_get_all_if_id_req *req = embedded_payload(wrb); struct be_cmd_get_all_if_id_req *pbe_allid = req; int status = 0; memset(wrb, 0, sizeof(*wrb)); spin_lock(&ctrl->mbox_lock); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID, sizeof(*req)); status = be_mbox_notify(ctrl); if (!status) phba->interface_handle = pbe_allid->if_hndl_list[0]; else { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed in mgmt_get_all_if_id\n"); } spin_unlock(&ctrl->mbox_lock); return status; } /* * mgmt_exec_nonemb_cmd()- Execute Non Embedded MBX Cmd * @phba: Driver priv structure * @nonemb_cmd: Address of the MBX command issued * @resp_buf: Buffer to copy the MBX cmd response * @resp_buf_len: respone lenght to be copied * **/ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba, struct be_dma_mem *nonemb_cmd, void *resp_buf, int resp_buf_len) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb = wrb_from_mccq(phba); struct be_sge *sge; unsigned int tag; int rc = 0; spin_lock(&ctrl->mbox_lock); tag = alloc_mcc_tag(phba); if (!tag) { spin_unlock(&ctrl->mbox_lock); rc = -ENOMEM; goto free_cmd; } memset(wrb, 0, sizeof(*wrb)); wrb->tag0 |= tag; sge = nonembedded_sgl(wrb); be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1); sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma)); sge->len = cpu_to_le32(nonemb_cmd->size); be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va); if (rc) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BG_%d : mgmt_exec_nonemb_cmd Failed status\n"); rc = -EIO; goto free_cmd; } if (resp_buf) memcpy(resp_buf, nonemb_cmd->va, resp_buf_len); free_cmd: pci_free_consistent(ctrl->pdev, nonemb_cmd->size, nonemb_cmd->va, nonemb_cmd->dma); return rc; } static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd, int iscsi_cmd, int size) { cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma); if (!cmd->va) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BG_%d : Failed to allocate memory for if info\n"); return -ENOMEM; } memset(cmd->va, 0, size); cmd->size = size; be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size); return 0; } static int mgmt_static_ip_modify(struct beiscsi_hba *phba, struct be_cmd_get_if_info_resp *if_info, struct iscsi_iface_param_info *ip_param, struct iscsi_iface_param_info *subnet_param, uint32_t ip_action) { struct be_cmd_set_ip_addr_req *req; struct be_dma_mem nonemb_cmd; uint32_t ip_type; int rc; rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR, sizeof(*req)); if (rc) return rc; ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ? BE2_IPV6 : BE2_IPV4 ; req = nonemb_cmd.va; req->ip_params.record_entry_count = 1; req->ip_params.ip_record.action = ip_action; req->ip_params.ip_record.interface_hndl = phba->interface_handle; req->ip_params.ip_record.ip_addr.size_of_structure = sizeof(struct be_ip_addr_subnet_format); req->ip_params.ip_record.ip_addr.ip_type = ip_type; if (ip_action == IP_ACTION_ADD) { memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value, ip_param->len); if (subnet_param) memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, subnet_param->value, subnet_param->len); } else { memcpy(req->ip_params.ip_record.ip_addr.addr, if_info->ip_addr.addr, ip_param->len); memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, if_info->ip_addr.subnet_mask, ip_param->len); } rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); if (rc < 0) beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to Modify existing IP Address\n"); return rc; } static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr, uint32_t gtway_action, uint32_t param_len) { struct be_cmd_set_def_gateway_req *req; struct be_dma_mem nonemb_cmd; int rt_val; rt_val = mgmt_alloc_cmd_data(phba, &nonemb_cmd, OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY, sizeof(*req)); if (rt_val) return rt_val; req = nonemb_cmd.va; req->action = gtway_action; req->ip_addr.ip_type = BE2_IPV4; memcpy(req->ip_addr.addr, gt_addr, param_len); return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); } int mgmt_set_ip(struct beiscsi_hba *phba, struct iscsi_iface_param_info *ip_param, struct iscsi_iface_param_info *subnet_param, uint32_t boot_proto) { struct be_cmd_get_def_gateway_resp gtway_addr_set; struct be_cmd_get_if_info_resp if_info; struct be_cmd_set_dhcp_req *dhcpreq; struct be_cmd_rel_dhcp_req *reldhcp; struct be_dma_mem nonemb_cmd; uint8_t *gtway_addr; uint32_t ip_type; int rc; if (mgmt_get_all_if_id(phba)) return -EIO; memset(&if_info, 0, sizeof(if_info)); ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ? BE2_IPV6 : BE2_IPV4 ; rc = mgmt_get_if_info(phba, ip_type, &if_info); if (rc) return rc; if (boot_proto == ISCSI_BOOTPROTO_DHCP) { if (if_info.dhcp_state) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : DHCP Already Enabled\n"); return 0; } /* The ip_param->len is 1 in DHCP case. Setting proper IP len as this it is used while freeing the Static IP. */ ip_param->len = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ? IP_V6_LEN : IP_V4_LEN; } else { if (if_info.dhcp_state) { memset(&if_info, 0, sizeof(if_info)); rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR, sizeof(*reldhcp)); if (rc) return rc; reldhcp = nonemb_cmd.va; reldhcp->interface_hndl = phba->interface_handle; reldhcp->ip_type = ip_type; rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); if (rc < 0) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to Delete existing dhcp\n"); return rc; } } } /* Delete the Static IP Set */ if (if_info.ip_addr.addr[0]) { rc = mgmt_static_ip_modify(phba, &if_info, ip_param, NULL, IP_ACTION_DEL); if (rc) return rc; } /* Delete the Gateway settings if mode change is to DHCP */ if (boot_proto == ISCSI_BOOTPROTO_DHCP) { memset(&gtway_addr_set, 0, sizeof(gtway_addr_set)); rc = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set); if (rc) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to Get Gateway Addr\n"); return rc; } if (gtway_addr_set.ip_addr.addr[0]) { gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr; rc = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL, IP_V4_LEN); if (rc) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to clear Gateway Addr Set\n"); return rc; } } } /* Set Adapter to DHCP/Static Mode */ if (boot_proto == ISCSI_BOOTPROTO_DHCP) { rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR, sizeof(*dhcpreq)); if (rc) return rc; dhcpreq = nonemb_cmd.va; dhcpreq->flags = BLOCKING; dhcpreq->retry_count = 1; dhcpreq->interface_hndl = phba->interface_handle; dhcpreq->ip_type = BE2_DHCP_V4; return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); } else { return mgmt_static_ip_modify(phba, &if_info, ip_param, subnet_param, IP_ACTION_ADD); } return rc; } int mgmt_set_gateway(struct beiscsi_hba *phba, struct iscsi_iface_param_info *gateway_param) { struct be_cmd_get_def_gateway_resp gtway_addr_set; uint8_t *gtway_addr; int rt_val; memset(&gtway_addr_set, 0, sizeof(gtway_addr_set)); rt_val = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set); if (rt_val) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to Get Gateway Addr\n"); return rt_val; } if (gtway_addr_set.ip_addr.addr[0]) { gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr; rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL, gateway_param->len); if (rt_val) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to clear Gateway Addr Set\n"); return rt_val; } } gtway_addr = (uint8_t *)&gateway_param->value; rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_ADD, gateway_param->len); if (rt_val) beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to Set Gateway Addr\n"); return rt_val; } int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type, struct be_cmd_get_def_gateway_resp *gateway) { struct be_cmd_get_def_gateway_req *req; struct be_dma_mem nonemb_cmd; int rc; rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY, sizeof(*gateway)); if (rc) return rc; req = nonemb_cmd.va; req->ip_type = ip_type; return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, gateway, sizeof(*gateway)); } int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type, struct be_cmd_get_if_info_resp *if_info) { struct be_cmd_get_if_info_req *req; struct be_dma_mem nonemb_cmd; int rc; if (mgmt_get_all_if_id(phba)) return -EIO; rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO, sizeof(*if_info)); if (rc) return rc; req = nonemb_cmd.va; req->interface_hndl = phba->interface_handle; req->ip_type = ip_type; return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, if_info, sizeof(*if_info)); } int mgmt_get_nic_conf(struct beiscsi_hba *phba, struct be_cmd_get_nic_conf_resp *nic) { struct be_dma_mem nonemb_cmd; int rc; rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG, sizeof(*nic)); if (rc) return rc; return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, nic, sizeof(*nic)); } unsigned int be_cmd_get_initname(struct beiscsi_hba *phba) { unsigned int tag = 0; struct be_mcc_wrb *wrb; struct be_cmd_hba_name *req; struct be_ctrl_info *ctrl = &phba->ctrl; spin_lock(&ctrl->mbox_lock); tag = alloc_mcc_tag(phba); if (!tag) { spin_unlock(&ctrl->mbox_lock); return tag; } wrb = wrb_from_mccq(phba); req = embedded_payload(wrb); wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_CFG_GET_HBA_NAME, sizeof(*req)); be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); return tag; } unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba) { unsigned int tag = 0; struct be_mcc_wrb *wrb; struct be_cmd_ntwk_link_status_req *req; struct be_ctrl_info *ctrl = &phba->ctrl; spin_lock(&ctrl->mbox_lock); tag = alloc_mcc_tag(phba); if (!tag) { spin_unlock(&ctrl->mbox_lock); return tag; } wrb = wrb_from_mccq(phba); req = embedded_payload(wrb); wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req)); be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); return tag; } /** * be_mgmt_get_boot_shandle()- Get the session handle * @phba: device priv structure instance * @s_handle: session handle returned for boot session. * * Get the boot target session handle. In case of * crashdump mode driver has to issue and MBX Cmd * for FW to login to boot target * * return * Success: 0 * Failure: Non-Zero value * **/ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba, unsigned int *s_handle) { struct be_cmd_get_boot_target_resp *boot_resp; struct be_mcc_wrb *wrb; unsigned int tag; uint8_t boot_retry = 3; int rc; do { /* Get the Boot Target Session Handle and Count*/ tag = mgmt_get_boot_target(phba); if (!tag) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, "BG_%d : Getting Boot Target Info Failed\n"); return -EAGAIN; } rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL); if (rc) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, "BG_%d : MBX CMD get_boot_target Failed\n"); return -EBUSY; } boot_resp = embedded_payload(wrb); /* Check if the there are any Boot targets configured */ if (!boot_resp->boot_session_count) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, "BG_%d ;No boot targets configured\n"); return -ENXIO; } /* FW returns the session handle of the boot session */ if (boot_resp->boot_session_handle != INVALID_SESS_HANDLE) { *s_handle = boot_resp->boot_session_handle; return 0; } /* Issue MBX Cmd to FW to login to the boot target */ tag = mgmt_reopen_session(phba, BE_REOPEN_BOOT_SESSIONS, INVALID_SESS_HANDLE); if (!tag) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, "BG_%d : mgmt_reopen_session Failed\n"); return -EAGAIN; } rc = beiscsi_mccq_compl(phba, tag, NULL, NULL); if (rc) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, "BG_%d : mgmt_reopen_session Failed"); return rc; } } while (--boot_retry); /* Couldn't log into the boot target */ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, "BG_%d : Login to Boot Target Failed\n"); return -ENXIO; } /** * mgmt_set_vlan()- Issue and wait for CMD completion * @phba: device private structure instance * @vlan_tag: VLAN tag * * Issue the MBX Cmd and wait for the completion of the * command. * * returns * Success: 0 * Failure: Non-Xero Value **/ int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag) { int rc; unsigned int tag; struct be_mcc_wrb *wrb = NULL; tag = be_cmd_set_vlan(phba, vlan_tag); if (!tag) { beiscsi_log(phba, KERN_ERR, (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX), "BG_%d : VLAN Setting Failed\n"); return -EBUSY; } rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL); if (rc) { beiscsi_log(phba, KERN_ERR, (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX), "BS_%d : VLAN MBX Cmd Failed\n"); return rc; } return rc; } /** * beiscsi_drvr_ver_disp()- Display the driver Name and Version * @dev: ptr to device not used. * @attr: device attribute, not used. * @buf: contains formatted text driver name and version * * return * size of the formatted string **/ ssize_t beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, BE_NAME "\n"); } /** * beiscsi_fw_ver_disp()- Display Firmware Version * @dev: ptr to device not used. * @attr: device attribute, not used. * @buf: contains formatted text Firmware version * * return * size of the formatted string **/ ssize_t beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct beiscsi_hba *phba = iscsi_host_priv(shost); return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str); } /** * beiscsi_active_cid_disp()- Display Sessions Active * @dev: ptr to device not used. * @attr: device attribute, not used. * @buf: contains formatted text Session Count * * return * size of the formatted string **/ ssize_t beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct beiscsi_hba *phba = iscsi_host_priv(shost); return snprintf(buf, PAGE_SIZE, "%d\n", (phba->params.cxns_per_ctrl - phba->avlbl_cids)); } /** * beiscsi_adap_family_disp()- Display adapter family. * @dev: ptr to device to get priv structure * @attr: device attribute, not used. * @buf: contains formatted text driver name and version * * return * size of the formatted string **/ ssize_t beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr, char *buf) { uint16_t dev_id = 0; struct Scsi_Host *shost = class_to_shost(dev); struct beiscsi_hba *phba = iscsi_host_priv(shost); dev_id = phba->pcidev->device; switch (dev_id) { case BE_DEVICE_ID1: case OC_DEVICE_ID1: case OC_DEVICE_ID2: return snprintf(buf, PAGE_SIZE, "BE2 Adapter Family\n"); break; case BE_DEVICE_ID2: case OC_DEVICE_ID3: return snprintf(buf, PAGE_SIZE, "BE3-R Adapter Family\n"); break; case OC_SKH_ID1: return snprintf(buf, PAGE_SIZE, "Skyhawk-R Adapter Family\n"); break; default: return snprintf(buf, PAGE_SIZE, "Unknown Adapter Family: 0x%x\n", dev_id); break; } } void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, struct wrb_handle *pwrb_handle, struct be_mem_descriptor *mem_descr) { struct iscsi_wrb *pwrb = pwrb_handle->pwrb; memset(pwrb, 0, sizeof(*pwrb)); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, max_send_data_segment_length, pwrb, params->dw[offsetof(struct amap_beiscsi_offload_params, max_send_data_segment_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb, BE_TGT_CTX_UPDT_CMD); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, first_burst_length, pwrb, params->dw[offsetof(struct amap_beiscsi_offload_params, first_burst_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, erl) / 32] & OFFLD_PARAMS_ERL)); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, dde) / 32] & OFFLD_PARAMS_DDE) >> 2); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, hde) / 32] & OFFLD_PARAMS_HDE) >> 3); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, imd) / 32] & OFFLD_PARAMS_IMD) >> 5); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, exp_statsn) / 32] + 1)); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx, pwrb, pwrb_handle->wrb_index); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, max_burst_length, pwrb, params->dw[offsetof (struct amap_beiscsi_offload_params, max_burst_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb, pwrb, pwrb_handle->nxt_wrb_index); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, session_state, pwrb, 0); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack, pwrb, 1); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq, pwrb, 0); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb, 0); mem_descr += ISCSI_MEM_GLOBAL_HEADER; AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, pad_buffer_addr_hi, pwrb, mem_descr->mem_array[0].bus_address.u.a32.address_hi); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, pad_buffer_addr_lo, pwrb, mem_descr->mem_array[0].bus_address.u.a32.address_lo); } void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, struct wrb_handle *pwrb_handle) { struct iscsi_wrb *pwrb = pwrb_handle->pwrb; memset(pwrb, 0, sizeof(*pwrb)); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, max_burst_length, pwrb, params->dw[offsetof (struct amap_beiscsi_offload_params, max_burst_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_burst_length, pwrb, params->dw[offsetof (struct amap_beiscsi_offload_params, max_burst_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, type, pwrb, BE_TGT_CTX_UPDT_CMD); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, ptr2nextwrb, pwrb, pwrb_handle->nxt_wrb_index); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, wrb_idx, pwrb, pwrb_handle->wrb_index); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_send_data_segment_length, pwrb, params->dw[offsetof(struct amap_beiscsi_offload_params, max_send_data_segment_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, first_burst_length, pwrb, params->dw[offsetof(struct amap_beiscsi_offload_params, first_burst_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_recv_dataseg_len, pwrb, BEISCSI_MAX_RECV_DATASEG_LEN); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_cxns, pwrb, BEISCSI_MAX_CXNS); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, erl) / 32] & OFFLD_PARAMS_ERL)); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, dde, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, dde) / 32] & OFFLD_PARAMS_DDE) >> 2); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, hde, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, hde) / 32] & OFFLD_PARAMS_HDE) >> 3); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, ir2t, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, imd, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, imd) / 32] & OFFLD_PARAMS_IMD) >> 5); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, data_seq_inorder, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, data_seq_inorder) / 32] & OFFLD_PARAMS_DATA_SEQ_INORDER) >> 6); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, pdu_seq_inorder, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, pdu_seq_inorder) / 32] & OFFLD_PARAMS_PDU_SEQ_INORDER) >> 7); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_r2t, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, max_r2t) / 32] & OFFLD_PARAMS_MAX_R2T) >> 8); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, stat_sn, pwrb, (params->dw[offsetof(struct amap_beiscsi_offload_params, exp_statsn) / 32] + 1)); }
gpl-2.0
MpApQ/kernel_huawei
fs/lockd/clntxdr.c
1647
13900
/* * linux/fs/lockd/clntxdr.c * * XDR functions to encode/decode NLM version 3 RPC arguments and results. * NLM version 3 is backwards compatible with NLM versions 1 and 2. * * NLM client-side only. * * Copyright (C) 2010, Oracle. All rights reserved. */ #include <linux/types.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/stats.h> #include <linux/lockd/lockd.h> #define NLMDBG_FACILITY NLMDBG_XDR #if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ) # error "NLM host name cannot be larger than XDR_MAX_NETOBJ!" #endif /* * Declare the space requirements for NLM arguments and replies as * number of 32bit-words */ #define NLM_cookie_sz (1+(NLM_MAXCOOKIELEN>>2)) #define NLM_caller_sz (1+(NLMCLNT_OHSIZE>>2)) #define NLM_owner_sz (1+(NLMCLNT_OHSIZE>>2)) #define NLM_fhandle_sz (1+(NFS2_FHSIZE>>2)) #define NLM_lock_sz (3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz) #define NLM_holder_sz (4+NLM_owner_sz) #define NLM_testargs_sz (NLM_cookie_sz+1+NLM_lock_sz) #define NLM_lockargs_sz (NLM_cookie_sz+4+NLM_lock_sz) #define NLM_cancargs_sz (NLM_cookie_sz+2+NLM_lock_sz) #define NLM_unlockargs_sz (NLM_cookie_sz+NLM_lock_sz) #define NLM_testres_sz (NLM_cookie_sz+1+NLM_holder_sz) #define NLM_res_sz (NLM_cookie_sz+1) #define NLM_norep_sz (0) static s32 loff_t_to_s32(loff_t offset) { s32 res; if (offset >= NLM_OFFSET_MAX) res = NLM_OFFSET_MAX; else if (offset <= -NLM_OFFSET_MAX) res = -NLM_OFFSET_MAX; else res = offset; return res; } static void nlm_compute_offsets(const struct nlm_lock *lock, u32 *l_offset, u32 *l_len) { const struct file_lock *fl = &lock->fl; BUG_ON(fl->fl_start > NLM_OFFSET_MAX); BUG_ON(fl->fl_end > NLM_OFFSET_MAX && fl->fl_end != OFFSET_MAX); *l_offset = loff_t_to_s32(fl->fl_start); if (fl->fl_end == OFFSET_MAX) *l_len = 0; else *l_len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); } /* * Handle decode buffer overflows out-of-line. */ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) { dprintk("lockd: %s prematurely hit the end of our receive buffer. " "Remaining buffer length is %tu words.\n", func, xdr->end - xdr->p); } /* * Encode/decode NLMv3 basic data types * * Basic NLMv3 data types are not defined in an IETF standards * document. X/Open has a description of these data types that * is useful. See Chapter 10 of "Protocols for Interworking: * XNFS, Version 3W". * * Not all basic data types have their own encoding and decoding * functions. For run-time efficiency, some data types are encoded * or decoded inline. */ static void encode_bool(struct xdr_stream *xdr, const int value) { __be32 *p; p = xdr_reserve_space(xdr, 4); *p = value ? xdr_one : xdr_zero; } static void encode_int32(struct xdr_stream *xdr, const s32 value) { __be32 *p; p = xdr_reserve_space(xdr, 4); *p = cpu_to_be32(value); } /* * typedef opaque netobj<MAXNETOBJ_SZ> */ static void encode_netobj(struct xdr_stream *xdr, const u8 *data, const unsigned int length) { __be32 *p; BUG_ON(length > XDR_MAX_NETOBJ); p = xdr_reserve_space(xdr, 4 + length); xdr_encode_opaque(p, data, length); } static int decode_netobj(struct xdr_stream *xdr, struct xdr_netobj *obj) { u32 length; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; length = be32_to_cpup(p++); if (unlikely(length > XDR_MAX_NETOBJ)) goto out_size; obj->len = length; obj->data = (u8 *)p; return 0; out_size: dprintk("NFS: returned netobj was too long: %u\n", length); return -EIO; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * netobj cookie; */ static void encode_cookie(struct xdr_stream *xdr, const struct nlm_cookie *cookie) { BUG_ON(cookie->len > NLM_MAXCOOKIELEN); encode_netobj(xdr, (u8 *)&cookie->data, cookie->len); } static int decode_cookie(struct xdr_stream *xdr, struct nlm_cookie *cookie) { u32 length; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; length = be32_to_cpup(p++); /* apparently HPUX can return empty cookies */ if (length == 0) goto out_hpux; if (length > NLM_MAXCOOKIELEN) goto out_size; p = xdr_inline_decode(xdr, length); if (unlikely(p == NULL)) goto out_overflow; cookie->len = length; memcpy(cookie->data, p, length); return 0; out_hpux: cookie->len = 4; memset(cookie->data, 0, 4); return 0; out_size: dprintk("NFS: returned cookie was too long: %u\n", length); return -EIO; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * netobj fh; */ static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh) { BUG_ON(fh->size != NFS2_FHSIZE); encode_netobj(xdr, (u8 *)&fh->data, NFS2_FHSIZE); } /* * enum nlm_stats { * LCK_GRANTED = 0, * LCK_DENIED = 1, * LCK_DENIED_NOLOCKS = 2, * LCK_BLOCKED = 3, * LCK_DENIED_GRACE_PERIOD = 4 * }; * * * struct nlm_stat { * nlm_stats stat; * }; * * NB: we don't swap bytes for the NLM status values. The upper * layers deal directly with the status value in network byte * order. */ static void encode_nlm_stat(struct xdr_stream *xdr, const __be32 stat) { __be32 *p; BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD); p = xdr_reserve_space(xdr, 4); *p = stat; } static int decode_nlm_stat(struct xdr_stream *xdr, __be32 *stat) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; if (unlikely(*p > nlm_lck_denied_grace_period)) goto out_enum; *stat = *p; return 0; out_enum: dprintk("%s: server returned invalid nlm_stats value: %u\n", __func__, be32_to_cpup(p)); return -EIO; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * struct nlm_holder { * bool exclusive; * int uppid; * netobj oh; * unsigned l_offset; * unsigned l_len; * }; */ static void encode_nlm_holder(struct xdr_stream *xdr, const struct nlm_res *result) { const struct nlm_lock *lock = &result->lock; u32 l_offset, l_len; __be32 *p; encode_bool(xdr, lock->fl.fl_type == F_RDLCK); encode_int32(xdr, lock->svid); encode_netobj(xdr, lock->oh.data, lock->oh.len); p = xdr_reserve_space(xdr, 4 + 4); nlm_compute_offsets(lock, &l_offset, &l_len); *p++ = cpu_to_be32(l_offset); *p = cpu_to_be32(l_len); } static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result) { struct nlm_lock *lock = &result->lock; struct file_lock *fl = &lock->fl; u32 exclusive, l_offset, l_len; int error; __be32 *p; s32 end; memset(lock, 0, sizeof(*lock)); locks_init_lock(fl); p = xdr_inline_decode(xdr, 4 + 4); if (unlikely(p == NULL)) goto out_overflow; exclusive = be32_to_cpup(p++); lock->svid = be32_to_cpup(p); fl->fl_pid = (pid_t)lock->svid; error = decode_netobj(xdr, &lock->oh); if (unlikely(error)) goto out; p = xdr_inline_decode(xdr, 4 + 4); if (unlikely(p == NULL)) goto out_overflow; fl->fl_flags = FL_POSIX; fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK; l_offset = be32_to_cpup(p++); l_len = be32_to_cpup(p); end = l_offset + l_len - 1; fl->fl_start = (loff_t)l_offset; if (l_len == 0 || end < 0) fl->fl_end = OFFSET_MAX; else fl->fl_end = (loff_t)end; error = 0; out: return error; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * string caller_name<LM_MAXSTRLEN>; */ static void encode_caller_name(struct xdr_stream *xdr, const char *name) { /* NB: client-side does not set lock->len */ u32 length = strlen(name); __be32 *p; BUG_ON(length > NLM_MAXSTRLEN); p = xdr_reserve_space(xdr, 4 + length); xdr_encode_opaque(p, name, length); } /* * struct nlm_lock { * string caller_name<LM_MAXSTRLEN>; * netobj fh; * netobj oh; * int uppid; * unsigned l_offset; * unsigned l_len; * }; */ static void encode_nlm_lock(struct xdr_stream *xdr, const struct nlm_lock *lock) { u32 l_offset, l_len; __be32 *p; encode_caller_name(xdr, lock->caller); encode_fh(xdr, &lock->fh); encode_netobj(xdr, lock->oh.data, lock->oh.len); p = xdr_reserve_space(xdr, 4 + 4 + 4); *p++ = cpu_to_be32(lock->svid); nlm_compute_offsets(lock, &l_offset, &l_len); *p++ = cpu_to_be32(l_offset); *p = cpu_to_be32(l_len); } /* * NLMv3 XDR encode functions * * NLMv3 argument types are defined in Chapter 10 of The Open Group's * "Protocols for Interworking: XNFS, Version 3W". */ /* * struct nlm_testargs { * netobj cookie; * bool exclusive; * struct nlm_lock alock; * }; */ static void nlm_xdr_enc_testargs(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm_lock(xdr, lock); } /* * struct nlm_lockargs { * netobj cookie; * bool block; * bool exclusive; * struct nlm_lock alock; * bool reclaim; * int state; * }; */ static void nlm_xdr_enc_lockargs(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, args->block); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm_lock(xdr, lock); encode_bool(xdr, args->reclaim); encode_int32(xdr, args->state); } /* * struct nlm_cancargs { * netobj cookie; * bool block; * bool exclusive; * struct nlm_lock alock; * }; */ static void nlm_xdr_enc_cancargs(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, args->block); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm_lock(xdr, lock); } /* * struct nlm_unlockargs { * netobj cookie; * struct nlm_lock alock; * }; */ static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_nlm_lock(xdr, lock); } /* * struct nlm_res { * netobj cookie; * nlm_stat stat; * }; */ static void nlm_xdr_enc_res(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_res *result) { encode_cookie(xdr, &result->cookie); encode_nlm_stat(xdr, result->status); } /* * union nlm_testrply switch (nlm_stats stat) { * case LCK_DENIED: * struct nlm_holder holder; * default: * void; * }; * * struct nlm_testres { * netobj cookie; * nlm_testrply test_stat; * }; */ static void encode_nlm_testrply(struct xdr_stream *xdr, const struct nlm_res *result) { if (result->status == nlm_lck_denied) encode_nlm_holder(xdr, result); } static void nlm_xdr_enc_testres(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_res *result) { encode_cookie(xdr, &result->cookie); encode_nlm_stat(xdr, result->status); encode_nlm_testrply(xdr, result); } /* * NLMv3 XDR decode functions * * NLMv3 result types are defined in Chapter 10 of The Open Group's * "Protocols for Interworking: XNFS, Version 3W". */ /* * union nlm_testrply switch (nlm_stats stat) { * case LCK_DENIED: * struct nlm_holder holder; * default: * void; * }; * * struct nlm_testres { * netobj cookie; * nlm_testrply test_stat; * }; */ static int decode_nlm_testrply(struct xdr_stream *xdr, struct nlm_res *result) { int error; error = decode_nlm_stat(xdr, &result->status); if (unlikely(error)) goto out; if (result->status == nlm_lck_denied) error = decode_nlm_holder(xdr, result); out: return error; } static int nlm_xdr_dec_testres(struct rpc_rqst *req, struct xdr_stream *xdr, struct nlm_res *result) { int error; error = decode_cookie(xdr, &result->cookie); if (unlikely(error)) goto out; error = decode_nlm_testrply(xdr, result); out: return error; } /* * struct nlm_res { * netobj cookie; * nlm_stat stat; * }; */ static int nlm_xdr_dec_res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nlm_res *result) { int error; error = decode_cookie(xdr, &result->cookie); if (unlikely(error)) goto out; error = decode_nlm_stat(xdr, &result->status); out: return error; } /* * For NLM, a void procedure really returns nothing */ #define nlm_xdr_dec_norep NULL #define PROC(proc, argtype, restype) \ [NLMPROC_##proc] = { \ .p_proc = NLMPROC_##proc, \ .p_encode = (kxdreproc_t)nlm_xdr_enc_##argtype, \ .p_decode = (kxdrdproc_t)nlm_xdr_dec_##restype, \ .p_arglen = NLM_##argtype##_sz, \ .p_replen = NLM_##restype##_sz, \ .p_statidx = NLMPROC_##proc, \ .p_name = #proc, \ } static struct rpc_procinfo nlm_procedures[] = { PROC(TEST, testargs, testres), PROC(LOCK, lockargs, res), PROC(CANCEL, cancargs, res), PROC(UNLOCK, unlockargs, res), PROC(GRANTED, testargs, res), PROC(TEST_MSG, testargs, norep), PROC(LOCK_MSG, lockargs, norep), PROC(CANCEL_MSG, cancargs, norep), PROC(UNLOCK_MSG, unlockargs, norep), PROC(GRANTED_MSG, testargs, norep), PROC(TEST_RES, testres, norep), PROC(LOCK_RES, res, norep), PROC(CANCEL_RES, res, norep), PROC(UNLOCK_RES, res, norep), PROC(GRANTED_RES, res, norep), }; static struct rpc_version nlm_version1 = { .number = 1, .nrprocs = ARRAY_SIZE(nlm_procedures), .procs = nlm_procedures, }; static struct rpc_version nlm_version3 = { .number = 3, .nrprocs = ARRAY_SIZE(nlm_procedures), .procs = nlm_procedures, }; static struct rpc_version *nlm_versions[] = { [1] = &nlm_version1, [3] = &nlm_version3, #ifdef CONFIG_LOCKD_V4 [4] = &nlm_version4, #endif }; static struct rpc_stat nlm_rpc_stats; struct rpc_program nlm_program = { .name = "lockd", .number = NLM_PROGRAM, .nrvers = ARRAY_SIZE(nlm_versions), .version = nlm_versions, .stats = &nlm_rpc_stats, };
gpl-2.0
fedux/linux
drivers/iio/dac/ad5449.c
2159
8711
/* * AD5415, AD5426, AD5429, AD5432, AD5439, AD5443, AD5449 Digital to Analog * Converter driver. * * Copyright 2012 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> * * Licensed under the GPL-2. */ #include <linux/device.h> #include <linux/err.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/regulator/consumer.h> #include <asm/unaligned.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/platform_data/ad5449.h> #define AD5449_MAX_CHANNELS 2 #define AD5449_MAX_VREFS 2 #define AD5449_CMD_NOOP 0x0 #define AD5449_CMD_LOAD_AND_UPDATE(x) (0x1 + (x) * 3) #define AD5449_CMD_READ(x) (0x2 + (x) * 3) #define AD5449_CMD_LOAD(x) (0x3 + (x) * 3) #define AD5449_CMD_CTRL 13 #define AD5449_CTRL_SDO_OFFSET 10 #define AD5449_CTRL_DAISY_CHAIN BIT(9) #define AD5449_CTRL_HCLR_TO_MIDSCALE BIT(8) #define AD5449_CTRL_SAMPLE_RISING BIT(7) /** * struct ad5449_chip_info - chip specific information * @channels: Channel specification * @num_channels: Number of channels * @has_ctrl: Chip has a control register */ struct ad5449_chip_info { const struct iio_chan_spec *channels; unsigned int num_channels; bool has_ctrl; }; /** * struct ad5449 - driver instance specific data * @spi: the SPI device for this driver instance * @chip_info: chip model specific constants, available modes etc * @vref_reg: vref supply regulators * @has_sdo: whether the SDO line is connected * @dac_cache: Cache for the DAC values * @data: spi transfer buffers */ struct ad5449 { struct spi_device *spi; const struct ad5449_chip_info *chip_info; struct regulator_bulk_data vref_reg[AD5449_MAX_VREFS]; bool has_sdo; uint16_t dac_cache[AD5449_MAX_CHANNELS]; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ __be16 data[2] ____cacheline_aligned; }; enum ad5449_type { ID_AD5426, ID_AD5429, ID_AD5432, ID_AD5439, ID_AD5443, ID_AD5449, }; static int ad5449_write(struct iio_dev *indio_dev, unsigned int addr, unsigned int val) { struct ad5449 *st = iio_priv(indio_dev); int ret; mutex_lock(&indio_dev->mlock); st->data[0] = cpu_to_be16((addr << 12) | val); ret = spi_write(st->spi, st->data, 2); mutex_unlock(&indio_dev->mlock); return ret; } static int ad5449_read(struct iio_dev *indio_dev, unsigned int addr, unsigned int *val) { struct ad5449 *st = iio_priv(indio_dev); int ret; struct spi_message msg; struct spi_transfer t[] = { { .tx_buf = &st->data[0], .len = 2, .cs_change = 1, }, { .tx_buf = &st->data[1], .rx_buf = &st->data[1], .len = 2, }, }; spi_message_init(&msg); spi_message_add_tail(&t[0], &msg); spi_message_add_tail(&t[1], &msg); mutex_lock(&indio_dev->mlock); st->data[0] = cpu_to_be16(addr << 12); st->data[1] = cpu_to_be16(AD5449_CMD_NOOP); ret = spi_sync(st->spi, &msg); if (ret < 0) goto out_unlock; *val = be16_to_cpu(st->data[1]); out_unlock: mutex_unlock(&indio_dev->mlock); return ret; } static int ad5449_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long info) { struct ad5449 *st = iio_priv(indio_dev); struct regulator_bulk_data *reg; int scale_uv; int ret; switch (info) { case IIO_CHAN_INFO_RAW: if (st->has_sdo) { ret = ad5449_read(indio_dev, AD5449_CMD_READ(chan->address), val); if (ret) return ret; *val &= 0xfff; } else { *val = st->dac_cache[chan->address]; } return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: reg = &st->vref_reg[chan->channel]; scale_uv = regulator_get_voltage(reg->consumer); if (scale_uv < 0) return scale_uv; *val = scale_uv / 1000; *val2 = chan->scan_type.realbits; return IIO_VAL_FRACTIONAL_LOG2; default: break; } return -EINVAL; } static int ad5449_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long info) { struct ad5449 *st = iio_priv(indio_dev); int ret; switch (info) { case IIO_CHAN_INFO_RAW: if (val < 0 || val >= (1 << chan->scan_type.realbits)) return -EINVAL; ret = ad5449_write(indio_dev, AD5449_CMD_LOAD_AND_UPDATE(chan->address), val << chan->scan_type.shift); if (ret == 0) st->dac_cache[chan->address] = val; break; default: ret = -EINVAL; } return ret; } static const struct iio_info ad5449_info = { .read_raw = ad5449_read_raw, .write_raw = ad5449_write_raw, .driver_module = THIS_MODULE, }; #define AD5449_CHANNEL(chan, bits) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .output = 1, \ .channel = (chan), \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE), \ .address = (chan), \ .scan_type = IIO_ST('u', (bits), 16, 12 - (bits)), \ } #define DECLARE_AD5449_CHANNELS(name, bits) \ const struct iio_chan_spec name[] = { \ AD5449_CHANNEL(0, bits), \ AD5449_CHANNEL(1, bits), \ } static DECLARE_AD5449_CHANNELS(ad5429_channels, 8); static DECLARE_AD5449_CHANNELS(ad5439_channels, 10); static DECLARE_AD5449_CHANNELS(ad5449_channels, 12); static const struct ad5449_chip_info ad5449_chip_info[] = { [ID_AD5426] = { .channels = ad5429_channels, .num_channels = 1, .has_ctrl = false, }, [ID_AD5429] = { .channels = ad5429_channels, .num_channels = 2, .has_ctrl = true, }, [ID_AD5432] = { .channels = ad5439_channels, .num_channels = 1, .has_ctrl = false, }, [ID_AD5439] = { .channels = ad5439_channels, .num_channels = 2, .has_ctrl = true, }, [ID_AD5443] = { .channels = ad5449_channels, .num_channels = 1, .has_ctrl = false, }, [ID_AD5449] = { .channels = ad5449_channels, .num_channels = 2, .has_ctrl = true, }, }; static const char *ad5449_vref_name(struct ad5449 *st, int n) { if (st->chip_info->num_channels == 1) return "VREF"; if (n == 0) return "VREFA"; else return "VREFB"; } static int ad5449_spi_probe(struct spi_device *spi) { struct ad5449_platform_data *pdata = spi->dev.platform_data; const struct spi_device_id *id = spi_get_device_id(spi); struct iio_dev *indio_dev; struct ad5449 *st; unsigned int i; int ret; indio_dev = iio_device_alloc(sizeof(*st)); if (indio_dev == NULL) return -ENOMEM; st = iio_priv(indio_dev); spi_set_drvdata(spi, indio_dev); st->chip_info = &ad5449_chip_info[id->driver_data]; st->spi = spi; for (i = 0; i < st->chip_info->num_channels; ++i) st->vref_reg[i].supply = ad5449_vref_name(st, i); ret = regulator_bulk_get(&spi->dev, st->chip_info->num_channels, st->vref_reg); if (ret) goto error_free; ret = regulator_bulk_enable(st->chip_info->num_channels, st->vref_reg); if (ret) goto error_free_reg; indio_dev->dev.parent = &spi->dev; indio_dev->name = id->name; indio_dev->info = &ad5449_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = st->chip_info->channels; indio_dev->num_channels = st->chip_info->num_channels; if (st->chip_info->has_ctrl) { unsigned int ctrl = 0x00; if (pdata) { if (pdata->hardware_clear_to_midscale) ctrl |= AD5449_CTRL_HCLR_TO_MIDSCALE; ctrl |= pdata->sdo_mode << AD5449_CTRL_SDO_OFFSET; st->has_sdo = pdata->sdo_mode != AD5449_SDO_DISABLED; } else { st->has_sdo = true; } ad5449_write(indio_dev, AD5449_CMD_CTRL, ctrl); } ret = iio_device_register(indio_dev); if (ret) goto error_disable_reg; return 0; error_disable_reg: regulator_bulk_disable(st->chip_info->num_channels, st->vref_reg); error_free_reg: regulator_bulk_free(st->chip_info->num_channels, st->vref_reg); error_free: iio_device_free(indio_dev); return ret; } static int ad5449_spi_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5449 *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); regulator_bulk_disable(st->chip_info->num_channels, st->vref_reg); regulator_bulk_free(st->chip_info->num_channels, st->vref_reg); iio_device_free(indio_dev); return 0; } static const struct spi_device_id ad5449_spi_ids[] = { { "ad5415", ID_AD5449 }, { "ad5426", ID_AD5426 }, { "ad5429", ID_AD5429 }, { "ad5432", ID_AD5432 }, { "ad5439", ID_AD5439 }, { "ad5443", ID_AD5443 }, { "ad5449", ID_AD5449 }, {} }; MODULE_DEVICE_TABLE(spi, ad5449_spi_ids); static struct spi_driver ad5449_spi_driver = { .driver = { .name = "ad5449", .owner = THIS_MODULE, }, .probe = ad5449_spi_probe, .remove = ad5449_spi_remove, .id_table = ad5449_spi_ids, }; module_spi_driver(ad5449_spi_driver); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("Analog Devices AD5449 and similar DACs"); MODULE_LICENSE("GPL v2");
gpl-2.0
tbalden/One_X-2.6.39.4
drivers/staging/tidspbridge/rmgr/drv_interface.c
2927
15554
/* * drv_interface.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * DSP/BIOS Bridge driver interface. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* ----------------------------------- Host OS */ #include <plat/dsp.h> #include <dspbridge/host_os.h> #include <linux/types.h> #include <linux/platform_device.h> #include <linux/pm.h> #ifdef MODULE #include <linux/module.h> #endif #include <linux/device.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/cdev.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> /* ----------------------------------- Trace & Debug */ #include <dspbridge/dbc.h> /* ----------------------------------- OS Adaptation Layer */ #include <dspbridge/clk.h> #include <dspbridge/sync.h> /* ----------------------------------- Platform Manager */ #include <dspbridge/dspapi-ioctl.h> #include <dspbridge/dspapi.h> #include <dspbridge/dspdrv.h> /* ----------------------------------- Resource Manager */ #include <dspbridge/pwr.h> /* ----------------------------------- This */ #include <drv_interface.h> #include <dspbridge/resourcecleanup.h> #include <dspbridge/chnl.h> #include <dspbridge/proc.h> #include <dspbridge/dev.h> #include <dspbridge/drv.h> #ifdef CONFIG_TIDSPBRIDGE_DVFS #include <mach-omap2/omap3-opp.h> #endif /* ----------------------------------- Globals */ #define DRIVER_NAME "DspBridge" #define DSPBRIDGE_VERSION "0.3" s32 dsp_debug; struct platform_device *omap_dspbridge_dev; struct device *bridge; /* This is a test variable used by Bridge to test different sleep states */ s32 dsp_test_sleepstate; static struct cdev bridge_cdev; static struct class *bridge_class; static u32 driver_context; static s32 driver_major; static char *base_img; char *iva_img; static s32 shm_size = 0x500000; /* 5 MB */ static int tc_wordswapon; /* Default value is always false */ #ifdef CONFIG_TIDSPBRIDGE_RECOVERY #define REC_TIMEOUT 5000 /*recovery timeout in msecs */ static atomic_t bridge_cref; /* number of bridge open handles */ static struct workqueue_struct *bridge_rec_queue; static struct work_struct bridge_recovery_work; static DECLARE_COMPLETION(bridge_comp); static DECLARE_COMPLETION(bridge_open_comp); static bool recover; #endif #ifdef CONFIG_PM struct omap34_xx_bridge_suspend_data { int suspended; wait_queue_head_t suspend_wq; }; static struct omap34_xx_bridge_suspend_data bridge_suspend_data; static int omap34_xxbridge_suspend_lockout(struct omap34_xx_bridge_suspend_data *s, struct file *f) { if ((s)->suspended) { if ((f)->f_flags & O_NONBLOCK) return -EPERM; wait_event_interruptible((s)->suspend_wq, (s)->suspended == 0); } return 0; } #endif module_param(dsp_debug, int, 0); MODULE_PARM_DESC(dsp_debug, "Wait after loading DSP image. default = false"); module_param(dsp_test_sleepstate, int, 0); MODULE_PARM_DESC(dsp_test_sleepstate, "DSP Sleep state = 0"); module_param(base_img, charp, 0); MODULE_PARM_DESC(base_img, "DSP base image, default = NULL"); module_param(shm_size, int, 0); MODULE_PARM_DESC(shm_size, "shm size, default = 4 MB, minimum = 64 KB"); module_param(tc_wordswapon, int, 0); MODULE_PARM_DESC(tc_wordswapon, "TC Word Swap Option. default = 0"); MODULE_AUTHOR("Texas Instruments"); MODULE_LICENSE("GPL"); MODULE_VERSION(DSPBRIDGE_VERSION); static char *driver_name = DRIVER_NAME; static const struct file_operations bridge_fops = { .open = bridge_open, .release = bridge_release, .unlocked_ioctl = bridge_ioctl, .mmap = bridge_mmap, .llseek = noop_llseek, }; #ifdef CONFIG_PM static u32 time_out = 1000; #ifdef CONFIG_TIDSPBRIDGE_DVFS s32 dsp_max_opps = VDD1_OPP5; #endif /* Maximum Opps that can be requested by IVA */ /*vdd1 rate table */ #ifdef CONFIG_TIDSPBRIDGE_DVFS const struct omap_opp vdd1_rate_table_bridge[] = { {0, 0, 0}, /*OPP1 */ {S125M, VDD1_OPP1, 0}, /*OPP2 */ {S250M, VDD1_OPP2, 0}, /*OPP3 */ {S500M, VDD1_OPP3, 0}, /*OPP4 */ {S550M, VDD1_OPP4, 0}, /*OPP5 */ {S600M, VDD1_OPP5, 0}, }; #endif #endif struct omap_dsp_platform_data *omap_dspbridge_pdata; u32 vdd1_dsp_freq[6][4] = { {0, 0, 0, 0}, /*OPP1 */ {0, 90000, 0, 86000}, /*OPP2 */ {0, 180000, 80000, 170000}, /*OPP3 */ {0, 360000, 160000, 340000}, /*OPP4 */ {0, 396000, 325000, 376000}, /*OPP5 */ {0, 430000, 355000, 430000}, }; #ifdef CONFIG_TIDSPBRIDGE_RECOVERY static void bridge_recover(struct work_struct *work) { struct dev_object *dev; struct cfg_devnode *dev_node; if (atomic_read(&bridge_cref)) { INIT_COMPLETION(bridge_comp); while (!wait_for_completion_timeout(&bridge_comp, msecs_to_jiffies(REC_TIMEOUT))) pr_info("%s:%d handle(s) still opened\n", __func__, atomic_read(&bridge_cref)); } dev = dev_get_first(); dev_get_dev_node(dev, &dev_node); if (!dev_node || proc_auto_start(dev_node, dev)) pr_err("DSP could not be restarted\n"); recover = false; complete_all(&bridge_open_comp); } void bridge_recover_schedule(void) { INIT_COMPLETION(bridge_open_comp); recover = true; queue_work(bridge_rec_queue, &bridge_recovery_work); } #endif #ifdef CONFIG_TIDSPBRIDGE_DVFS static int dspbridge_scale_notification(struct notifier_block *op, unsigned long val, void *ptr) { struct omap_dsp_platform_data *pdata = omap_dspbridge_dev->dev.platform_data; if (CPUFREQ_POSTCHANGE == val && pdata->dsp_get_opp) pwr_pm_post_scale(PRCM_VDD1, pdata->dsp_get_opp()); return 0; } static struct notifier_block iva_clk_notifier = { .notifier_call = dspbridge_scale_notification, NULL, }; #endif /** * omap3_bridge_startup() - perform low lever initializations * @pdev: pointer to platform device * * Initializes recovery, PM and DVFS required data, before calling * clk and memory init routines. */ static int omap3_bridge_startup(struct platform_device *pdev) { struct omap_dsp_platform_data *pdata = pdev->dev.platform_data; struct drv_data *drv_datap = NULL; u32 phys_membase, phys_memsize; int err; #ifdef CONFIG_TIDSPBRIDGE_RECOVERY bridge_rec_queue = create_workqueue("bridge_rec_queue"); INIT_WORK(&bridge_recovery_work, bridge_recover); INIT_COMPLETION(bridge_comp); #endif #ifdef CONFIG_PM /* Initialize the wait queue */ bridge_suspend_data.suspended = 0; init_waitqueue_head(&bridge_suspend_data.suspend_wq); #ifdef CONFIG_TIDSPBRIDGE_DVFS for (i = 0; i < 6; i++) pdata->mpu_speed[i] = vdd1_rate_table_bridge[i].rate; err = cpufreq_register_notifier(&iva_clk_notifier, CPUFREQ_TRANSITION_NOTIFIER); if (err) pr_err("%s: clk_notifier_register failed for iva2_ck\n", __func__); #endif #endif dsp_clk_init(); drv_datap = kzalloc(sizeof(struct drv_data), GFP_KERNEL); if (!drv_datap) { err = -ENOMEM; goto err1; } drv_datap->shm_size = shm_size; drv_datap->tc_wordswapon = tc_wordswapon; if (base_img) { drv_datap->base_img = kmalloc(strlen(base_img) + 1, GFP_KERNEL); if (!drv_datap->base_img) { err = -ENOMEM; goto err2; } strncpy(drv_datap->base_img, base_img, strlen(base_img) + 1); } dev_set_drvdata(bridge, drv_datap); if (shm_size < 0x10000) { /* 64 KB */ err = -EINVAL; pr_err("%s: shm size must be at least 64 KB\n", __func__); goto err3; } dev_dbg(bridge, "%s: requested shm_size = 0x%x\n", __func__, shm_size); phys_membase = pdata->phys_mempool_base; phys_memsize = pdata->phys_mempool_size; if (phys_membase > 0 && phys_memsize > 0) mem_ext_phys_pool_init(phys_membase, phys_memsize); if (tc_wordswapon) dev_dbg(bridge, "%s: TC Word Swap is enabled\n", __func__); driver_context = dsp_init(&err); if (err) { pr_err("DSP Bridge driver initialization failed\n"); goto err4; } return 0; err4: mem_ext_phys_pool_release(); err3: kfree(drv_datap->base_img); err2: kfree(drv_datap); err1: #ifdef CONFIG_TIDSPBRIDGE_DVFS cpufreq_unregister_notifier(&iva_clk_notifier, CPUFREQ_TRANSITION_NOTIFIER); #endif dsp_clk_exit(); return err; } static int __devinit omap34_xx_bridge_probe(struct platform_device *pdev) { int err; dev_t dev = 0; #ifdef CONFIG_TIDSPBRIDGE_DVFS int i = 0; #endif omap_dspbridge_dev = pdev; /* Global bridge device */ bridge = &omap_dspbridge_dev->dev; /* Bridge low level initializations */ err = omap3_bridge_startup(pdev); if (err) goto err1; /* use 2.6 device model */ err = alloc_chrdev_region(&dev, 0, 1, driver_name); if (err) { pr_err("%s: Can't get major %d\n", __func__, driver_major); goto err1; } cdev_init(&bridge_cdev, &bridge_fops); bridge_cdev.owner = THIS_MODULE; err = cdev_add(&bridge_cdev, dev, 1); if (err) { pr_err("%s: Failed to add bridge device\n", __func__); goto err2; } /* udev support */ bridge_class = class_create(THIS_MODULE, "ti_bridge"); if (IS_ERR(bridge_class)) { pr_err("%s: Error creating bridge class\n", __func__); goto err3; } driver_major = MAJOR(dev); device_create(bridge_class, NULL, MKDEV(driver_major, 0), NULL, "DspBridge"); pr_info("DSP Bridge driver loaded\n"); return 0; err3: cdev_del(&bridge_cdev); err2: unregister_chrdev_region(dev, 1); err1: return err; } static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev) { dev_t devno; bool ret; int status = 0; struct drv_data *drv_datap = dev_get_drvdata(bridge); /* Retrieve the Object handle from the driver data */ if (!drv_datap || !drv_datap->drv_object) { status = -ENODATA; pr_err("%s: Failed to retrieve the object handle\n", __func__); goto func_cont; } #ifdef CONFIG_TIDSPBRIDGE_DVFS if (cpufreq_unregister_notifier(&iva_clk_notifier, CPUFREQ_TRANSITION_NOTIFIER)) pr_err("%s: cpufreq_unregister_notifier failed for iva2_ck\n", __func__); #endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */ if (driver_context) { /* Put the DSP in reset state */ ret = dsp_deinit(driver_context); driver_context = 0; DBC_ASSERT(ret == true); } func_cont: mem_ext_phys_pool_release(); dsp_clk_exit(); devno = MKDEV(driver_major, 0); cdev_del(&bridge_cdev); unregister_chrdev_region(devno, 1); if (bridge_class) { /* remove the device from sysfs */ device_destroy(bridge_class, MKDEV(driver_major, 0)); class_destroy(bridge_class); } return 0; } #ifdef CONFIG_PM static int BRIDGE_SUSPEND(struct platform_device *pdev, pm_message_t state) { u32 status; u32 command = PWR_EMERGENCYDEEPSLEEP; status = pwr_sleep_dsp(command, time_out); if (status) return -1; bridge_suspend_data.suspended = 1; return 0; } static int BRIDGE_RESUME(struct platform_device *pdev) { u32 status; status = pwr_wake_dsp(time_out); if (status) return -1; bridge_suspend_data.suspended = 0; wake_up(&bridge_suspend_data.suspend_wq); return 0; } #else #define BRIDGE_SUSPEND NULL #define BRIDGE_RESUME NULL #endif static struct platform_driver bridge_driver = { .driver = { .name = "omap-dsp", }, .probe = omap34_xx_bridge_probe, .remove = __devexit_p(omap34_xx_bridge_remove), .suspend = BRIDGE_SUSPEND, .resume = BRIDGE_RESUME, }; static int __init bridge_init(void) { return platform_driver_register(&bridge_driver); } static void __exit bridge_exit(void) { platform_driver_unregister(&bridge_driver); } /* * This function is called when an application opens handle to the * bridge driver. */ static int bridge_open(struct inode *ip, struct file *filp) { int status = 0; struct process_context *pr_ctxt = NULL; /* * Allocate a new process context and insert it into global * process context list. */ #ifdef CONFIG_TIDSPBRIDGE_RECOVERY if (recover) { if (filp->f_flags & O_NONBLOCK || wait_for_completion_interruptible(&bridge_open_comp)) return -EBUSY; } #endif pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL); if (pr_ctxt) { pr_ctxt->res_state = PROC_RES_ALLOCATED; spin_lock_init(&pr_ctxt->dmm_map_lock); INIT_LIST_HEAD(&pr_ctxt->dmm_map_list); spin_lock_init(&pr_ctxt->dmm_rsv_lock); INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list); pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL); if (pr_ctxt->node_id) { idr_init(pr_ctxt->node_id); } else { status = -ENOMEM; goto err; } pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL); if (pr_ctxt->stream_id) idr_init(pr_ctxt->stream_id); else status = -ENOMEM; } else { status = -ENOMEM; } err: filp->private_data = pr_ctxt; #ifdef CONFIG_TIDSPBRIDGE_RECOVERY if (!status) atomic_inc(&bridge_cref); #endif return status; } /* * This function is called when an application closes handle to the bridge * driver. */ static int bridge_release(struct inode *ip, struct file *filp) { int status = 0; struct process_context *pr_ctxt; if (!filp->private_data) { status = -EIO; goto err; } pr_ctxt = filp->private_data; flush_signals(current); drv_remove_all_resources(pr_ctxt); proc_detach(pr_ctxt); kfree(pr_ctxt); filp->private_data = NULL; err: #ifdef CONFIG_TIDSPBRIDGE_RECOVERY if (!atomic_dec_return(&bridge_cref)) complete(&bridge_comp); #endif return status; } /* This function provides IO interface to the bridge driver. */ static long bridge_ioctl(struct file *filp, unsigned int code, unsigned long args) { int status; u32 retval = 0; union trapped_args buf_in; DBC_REQUIRE(filp != NULL); #ifdef CONFIG_TIDSPBRIDGE_RECOVERY if (recover) { status = -EIO; goto err; } #endif #ifdef CONFIG_PM status = omap34_xxbridge_suspend_lockout(&bridge_suspend_data, filp); if (status != 0) return status; #endif if (!filp->private_data) { status = -EIO; goto err; } status = copy_from_user(&buf_in, (union trapped_args *)args, sizeof(union trapped_args)); if (!status) { status = api_call_dev_ioctl(code, &buf_in, &retval, filp->private_data); if (!status) { status = retval; } else { dev_dbg(bridge, "%s: IOCTL Failed, code: 0x%x " "status 0x%x\n", __func__, code, status); status = -1; } } err: return status; } /* This function maps kernel space memory to user space memory. */ static int bridge_mmap(struct file *filp, struct vm_area_struct *vma) { u32 offset = vma->vm_pgoff << PAGE_SHIFT; u32 status; DBC_ASSERT(vma->vm_start < vma->vm_end); vma->vm_flags |= VM_RESERVED | VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); dev_dbg(bridge, "%s: vm filp %p offset %x start %lx end %lx page_prot " "%lx flags %lx\n", __func__, filp, offset, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma->vm_flags); status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); if (status != 0) status = -EAGAIN; return status; } /* To remove all process resources before removing the process from the * process context list */ int drv_remove_all_resources(void *process_ctxt) { int status = 0; struct process_context *ctxt = (struct process_context *)process_ctxt; drv_remove_all_strm_res_elements(ctxt); drv_remove_all_node_res_elements(ctxt); drv_remove_all_dmm_res_elements(ctxt); ctxt->res_state = PROC_RES_FREED; return status; } /* Bridge driver initialization and de-initialization functions */ module_init(bridge_init); module_exit(bridge_exit);
gpl-2.0
SlimRoms/kernel_sony_apq8064
arch/ia64/kernel/pci-dma.c
4463
2742
/* * Dynamic DMA mapping support. */ #include <linux/types.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/dmar.h> #include <asm/iommu.h> #include <asm/machvec.h> #include <linux/dma-mapping.h> #ifdef CONFIG_INTEL_IOMMU #include <linux/kernel.h> #include <asm/page.h> dma_addr_t bad_dma_address __read_mostly; EXPORT_SYMBOL(bad_dma_address); static int iommu_sac_force __read_mostly; int no_iommu __read_mostly; #ifdef CONFIG_IOMMU_DEBUG int force_iommu __read_mostly = 1; #else int force_iommu __read_mostly; #endif int iommu_pass_through; int iommu_group_mf; /* Dummy device used for NULL arguments (normally ISA). Better would be probably a smaller DMA mask, but this is bug-to-bug compatible to i386. */ struct device fallback_dev = { .init_name = "fallback device", .coherent_dma_mask = DMA_BIT_MASK(32), .dma_mask = &fallback_dev.coherent_dma_mask, }; extern struct dma_map_ops intel_dma_ops; static int __init pci_iommu_init(void) { if (iommu_detected) intel_iommu_init(); return 0; } /* Must execute after PCI subsystem */ fs_initcall(pci_iommu_init); void pci_iommu_shutdown(void) { return; } void __init iommu_dma_init(void) { return; } int iommu_dma_supported(struct device *dev, u64 mask) { /* Copied from i386. Doesn't make much sense, because it will only work for pci_alloc_coherent. The caller just has to use GFP_DMA in this case. */ if (mask < DMA_BIT_MASK(24)) return 0; /* Tell the device to use SAC when IOMMU force is on. This allows the driver to use cheaper accesses in some cases. Problem with this is that if we overflow the IOMMU area and return DAC as fallback address the device may not handle it correctly. As a special case some controllers have a 39bit address mode that is as efficient as 32bit (aic79xx). Don't force SAC for these. Assume all masks <= 40 bits are of this type. Normally this doesn't make any difference, but gives more gentle handling of IOMMU overflow. */ if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) { dev_info(dev, "Force SAC with mask %llx\n", mask); return 0; } return 1; } EXPORT_SYMBOL(iommu_dma_supported); void __init pci_iommu_alloc(void) { dma_ops = &intel_dma_ops; dma_ops->sync_single_for_cpu = machvec_dma_sync_single; dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg; dma_ops->sync_single_for_device = machvec_dma_sync_single; dma_ops->sync_sg_for_device = machvec_dma_sync_sg; dma_ops->dma_supported = iommu_dma_supported; /* * The order of these functions is important for * fall-back/fail-over reasons */ detect_intel_iommu(); #ifdef CONFIG_SWIOTLB pci_swiotlb_init(); #endif } #endif
gpl-2.0
Flemmard/android_kernel_htc_msm8974
drivers/media/video/davinci/isif.c
4975
30834
/* * Copyright (C) 2008-2009 Texas Instruments Inc * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Image Sensor Interface (ISIF) driver * * This driver is for configuring the ISIF IP available on DM365 or any other * TI SoCs. This is used for capturing yuv or bayer video or image data * from a decoder or sensor. This IP is similar to the CCDC IP on DM355 * and DM6446, but with enhanced or additional ip blocks. The driver * configures the ISIF upon commands from the vpfe bridge driver through * ccdc_hw_device interface. * * TODO: 1) Raw bayer parameter settings and bayer capture * 2) Add support for control ioctl */ #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/videodev2.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/module.h> #include <mach/mux.h> #include <media/davinci/isif.h> #include <media/davinci/vpss.h> #include "isif_regs.h" #include "ccdc_hw_device.h" /* Defaults for module configuration parameters */ static struct isif_config_params_raw isif_config_defaults = { .linearize = { .en = 0, .corr_shft = ISIF_NO_SHIFT, .scale_fact = {1, 0}, }, .df_csc = { .df_or_csc = 0, .csc = { .en = 0, }, }, .dfc = { .en = 0, }, .bclamp = { .en = 0, }, .gain_offset = { .gain = { .r_ye = {1, 0}, .gr_cy = {1, 0}, .gb_g = {1, 0}, .b_mg = {1, 0}, }, }, .culling = { .hcpat_odd = 0xff, .hcpat_even = 0xff, .vcpat = 0xff, }, .compress = { .alg = ISIF_ALAW, }, }; /* ISIF operation configuration */ static struct isif_oper_config { struct device *dev; enum vpfe_hw_if_type if_type; struct isif_ycbcr_config ycbcr; struct isif_params_raw bayer; enum isif_data_pack data_pack; /* Master clock */ struct clk *mclk; /* ISIF base address */ void __iomem *base_addr; /* ISIF Linear Table 0 */ void __iomem *linear_tbl0_addr; /* ISIF Linear Table 1 */ void __iomem *linear_tbl1_addr; } isif_cfg = { .ycbcr = { .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT, .frm_fmt = CCDC_FRMFMT_INTERLACED, .win = ISIF_WIN_NTSC, .fid_pol = VPFE_PINPOL_POSITIVE, .vd_pol = VPFE_PINPOL_POSITIVE, .hd_pol = VPFE_PINPOL_POSITIVE, .pix_order = CCDC_PIXORDER_CBYCRY, .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED, }, .bayer = { .pix_fmt = CCDC_PIXFMT_RAW, .frm_fmt = CCDC_FRMFMT_PROGRESSIVE, .win = ISIF_WIN_VGA, .fid_pol = VPFE_PINPOL_POSITIVE, .vd_pol = VPFE_PINPOL_POSITIVE, .hd_pol = VPFE_PINPOL_POSITIVE, .gain = { .r_ye = {1, 0}, .gr_cy = {1, 0}, .gb_g = {1, 0}, .b_mg = {1, 0}, }, .cfa_pat = ISIF_CFA_PAT_MOSAIC, .data_msb = ISIF_BIT_MSB_11, .config_params = { .data_shift = ISIF_NO_SHIFT, .col_pat_field0 = { .olop = ISIF_GREEN_BLUE, .olep = ISIF_BLUE, .elop = ISIF_RED, .elep = ISIF_GREEN_RED, }, .col_pat_field1 = { .olop = ISIF_GREEN_BLUE, .olep = ISIF_BLUE, .elop = ISIF_RED, .elep = ISIF_GREEN_RED, }, .test_pat_gen = 0, }, }, .data_pack = ISIF_DATA_PACK8, }; /* Raw Bayer formats */ static const u32 isif_raw_bayer_pix_formats[] = { V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16}; /* Raw YUV formats */ static const u32 isif_raw_yuv_pix_formats[] = { V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV}; /* register access routines */ static inline u32 regr(u32 offset) { return __raw_readl(isif_cfg.base_addr + offset); } static inline void regw(u32 val, u32 offset) { __raw_writel(val, isif_cfg.base_addr + offset); } /* reg_modify() - read, modify and write register */ static inline u32 reg_modify(u32 mask, u32 val, u32 offset) { u32 new_val = (regr(offset) & ~mask) | (val & mask); regw(new_val, offset); return new_val; } static inline void regw_lin_tbl(u32 val, u32 offset, int i) { if (!i) __raw_writel(val, isif_cfg.linear_tbl0_addr + offset); else __raw_writel(val, isif_cfg.linear_tbl1_addr + offset); } static void isif_disable_all_modules(void) { /* disable BC */ regw(0, CLAMPCFG); /* disable vdfc */ regw(0, DFCCTL); /* disable CSC */ regw(0, CSCCTL); /* disable linearization */ regw(0, LINCFG0); /* disable other modules here as they are supported */ } static void isif_enable(int en) { if (!en) { /* Before disable isif, disable all ISIF modules */ isif_disable_all_modules(); /* * wait for next VD. Assume lowest scan rate is 12 Hz. So * 100 msec delay is good enough */ msleep(100); } reg_modify(ISIF_SYNCEN_VDHDEN_MASK, en, SYNCEN); } static void isif_enable_output_to_sdram(int en) { reg_modify(ISIF_SYNCEN_WEN_MASK, en << ISIF_SYNCEN_WEN_SHIFT, SYNCEN); } static void isif_config_culling(struct isif_cul *cul) { u32 val; /* Horizontal pattern */ val = (cul->hcpat_even << CULL_PAT_EVEN_LINE_SHIFT) | cul->hcpat_odd; regw(val, CULH); /* vertical pattern */ regw(cul->vcpat, CULV); /* LPF */ reg_modify(ISIF_LPF_MASK << ISIF_LPF_SHIFT, cul->en_lpf << ISIF_LPF_SHIFT, MODESET); } static void isif_config_gain_offset(void) { struct isif_gain_offsets_adj *gain_off_p = &isif_cfg.bayer.config_params.gain_offset; u32 val; val = (!!gain_off_p->gain_sdram_en << GAIN_SDRAM_EN_SHIFT) | (!!gain_off_p->gain_ipipe_en << GAIN_IPIPE_EN_SHIFT) | (!!gain_off_p->gain_h3a_en << GAIN_H3A_EN_SHIFT) | (!!gain_off_p->offset_sdram_en << OFST_SDRAM_EN_SHIFT) | (!!gain_off_p->offset_ipipe_en << OFST_IPIPE_EN_SHIFT) | (!!gain_off_p->offset_h3a_en << OFST_H3A_EN_SHIFT); reg_modify(GAIN_OFFSET_EN_MASK, val, CGAMMAWD); val = (gain_off_p->gain.r_ye.integer << GAIN_INTEGER_SHIFT) | gain_off_p->gain.r_ye.decimal; regw(val, CRGAIN); val = (gain_off_p->gain.gr_cy.integer << GAIN_INTEGER_SHIFT) | gain_off_p->gain.gr_cy.decimal; regw(val, CGRGAIN); val = (gain_off_p->gain.gb_g.integer << GAIN_INTEGER_SHIFT) | gain_off_p->gain.gb_g.decimal; regw(val, CGBGAIN); val = (gain_off_p->gain.b_mg.integer << GAIN_INTEGER_SHIFT) | gain_off_p->gain.b_mg.decimal; regw(val, CBGAIN); regw(gain_off_p->offset, COFSTA); } static void isif_restore_defaults(void) { enum vpss_ccdc_source_sel source = VPSS_CCDCIN; dev_dbg(isif_cfg.dev, "\nstarting isif_restore_defaults..."); isif_cfg.bayer.config_params = isif_config_defaults; /* Enable clock to ISIF, IPIPEIF and BL */ vpss_enable_clock(VPSS_CCDC_CLOCK, 1); vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1); vpss_enable_clock(VPSS_BL_CLOCK, 1); /* Set default offset and gain */ isif_config_gain_offset(); vpss_select_ccdc_source(source); dev_dbg(isif_cfg.dev, "\nEnd of isif_restore_defaults..."); } static int isif_open(struct device *device) { isif_restore_defaults(); return 0; } /* This function will configure the window size to be capture in ISIF reg */ static void isif_setwin(struct v4l2_rect *image_win, enum ccdc_frmfmt frm_fmt, int ppc) { int horz_start, horz_nr_pixels; int vert_start, vert_nr_lines; int mid_img = 0; dev_dbg(isif_cfg.dev, "\nStarting isif_setwin..."); /* * ppc - per pixel count. indicates how many pixels per cell * output to SDRAM. example, for ycbcr, it is one y and one c, so 2. * raw capture this is 1 */ horz_start = image_win->left << (ppc - 1); horz_nr_pixels = ((image_win->width) << (ppc - 1)) - 1; /* Writing the horizontal info into the registers */ regw(horz_start & START_PX_HOR_MASK, SPH); regw(horz_nr_pixels & NUM_PX_HOR_MASK, LNH); vert_start = image_win->top; if (frm_fmt == CCDC_FRMFMT_INTERLACED) { vert_nr_lines = (image_win->height >> 1) - 1; vert_start >>= 1; /* To account for VD since line 0 doesn't have any data */ vert_start += 1; } else { /* To account for VD since line 0 doesn't have any data */ vert_start += 1; vert_nr_lines = image_win->height - 1; /* configure VDINT0 and VDINT1 */ mid_img = vert_start + (image_win->height / 2); regw(mid_img, VDINT1); } regw(0, VDINT0); regw(vert_start & START_VER_ONE_MASK, SLV0); regw(vert_start & START_VER_TWO_MASK, SLV1); regw(vert_nr_lines & NUM_LINES_VER, LNV); } static void isif_config_bclamp(struct isif_black_clamp *bc) { u32 val; /* * DC Offset is always added to image data irrespective of bc enable * status */ regw(bc->dc_offset, CLDCOFST); if (bc->en) { val = bc->bc_mode_color << ISIF_BC_MODE_COLOR_SHIFT; /* Enable BC and horizontal clamp caculation paramaters */ val = val | 1 | (bc->horz.mode << ISIF_HORZ_BC_MODE_SHIFT); regw(val, CLAMPCFG); if (bc->horz.mode != ISIF_HORZ_BC_DISABLE) { /* * Window count for calculation * Base window selection * pixel limit * Horizontal size of window * vertical size of the window * Horizontal start position of the window * Vertical start position of the window */ val = bc->horz.win_count_calc | ((!!bc->horz.base_win_sel_calc) << ISIF_HORZ_BC_WIN_SEL_SHIFT) | ((!!bc->horz.clamp_pix_limit) << ISIF_HORZ_BC_PIX_LIMIT_SHIFT) | (bc->horz.win_h_sz_calc << ISIF_HORZ_BC_WIN_H_SIZE_SHIFT) | (bc->horz.win_v_sz_calc << ISIF_HORZ_BC_WIN_V_SIZE_SHIFT); regw(val, CLHWIN0); regw(bc->horz.win_start_h_calc, CLHWIN1); regw(bc->horz.win_start_v_calc, CLHWIN2); } /* vertical clamp caculation paramaters */ /* Reset clamp value sel for previous line */ val |= (bc->vert.reset_val_sel << ISIF_VERT_BC_RST_VAL_SEL_SHIFT) | (bc->vert.line_ave_coef << ISIF_VERT_BC_LINE_AVE_COEF_SHIFT); regw(val, CLVWIN0); /* Optical Black horizontal start position */ regw(bc->vert.ob_start_h, CLVWIN1); /* Optical Black vertical start position */ regw(bc->vert.ob_start_v, CLVWIN2); /* Optical Black vertical size for calculation */ regw(bc->vert.ob_v_sz_calc, CLVWIN3); /* Vertical start position for BC subtraction */ regw(bc->vert_start_sub, CLSV); } } static void isif_config_linearization(struct isif_linearize *linearize) { u32 val, i; if (!linearize->en) { regw(0, LINCFG0); return; } /* shift value for correction & enable linearization (set lsb) */ val = (linearize->corr_shft << ISIF_LIN_CORRSFT_SHIFT) | 1; regw(val, LINCFG0); /* Scale factor */ val = ((!!linearize->scale_fact.integer) << ISIF_LIN_SCALE_FACT_INTEG_SHIFT) | linearize->scale_fact.decimal; regw(val, LINCFG1); for (i = 0; i < ISIF_LINEAR_TAB_SIZE; i++) { if (i % 2) regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 1); else regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 0); } } static int isif_config_dfc(struct isif_dfc *vdfc) { /* initialize retries to loop for max ~ 250 usec */ u32 val, count, retries = loops_per_jiffy / (4000/HZ); int i; if (!vdfc->en) return 0; /* Correction mode */ val = (vdfc->corr_mode << ISIF_VDFC_CORR_MOD_SHIFT); /* Correct whole line or partial */ if (vdfc->corr_whole_line) val |= 1 << ISIF_VDFC_CORR_WHOLE_LN_SHIFT; /* level shift value */ val |= vdfc->def_level_shift << ISIF_VDFC_LEVEL_SHFT_SHIFT; regw(val, DFCCTL); /* Defect saturation level */ regw(vdfc->def_sat_level, VDFSATLV); regw(vdfc->table[0].pos_vert, DFCMEM0); regw(vdfc->table[0].pos_horz, DFCMEM1); if (vdfc->corr_mode == ISIF_VDFC_NORMAL || vdfc->corr_mode == ISIF_VDFC_HORZ_INTERPOL_IF_SAT) { regw(vdfc->table[0].level_at_pos, DFCMEM2); regw(vdfc->table[0].level_up_pixels, DFCMEM3); regw(vdfc->table[0].level_low_pixels, DFCMEM4); } /* set DFCMARST and set DFCMWR */ val = regr(DFCMEMCTL) | (1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT) | 1; regw(val, DFCMEMCTL); count = retries; while (count && (regr(DFCMEMCTL) & 0x1)) count--; if (!count) { dev_dbg(isif_cfg.dev, "defect table write timeout !!!\n"); return -1; } for (i = 1; i < vdfc->num_vdefects; i++) { regw(vdfc->table[i].pos_vert, DFCMEM0); regw(vdfc->table[i].pos_horz, DFCMEM1); if (vdfc->corr_mode == ISIF_VDFC_NORMAL || vdfc->corr_mode == ISIF_VDFC_HORZ_INTERPOL_IF_SAT) { regw(vdfc->table[i].level_at_pos, DFCMEM2); regw(vdfc->table[i].level_up_pixels, DFCMEM3); regw(vdfc->table[i].level_low_pixels, DFCMEM4); } val = regr(DFCMEMCTL); /* clear DFCMARST and set DFCMWR */ val &= ~BIT(ISIF_DFCMEMCTL_DFCMARST_SHIFT); val |= 1; regw(val, DFCMEMCTL); count = retries; while (count && (regr(DFCMEMCTL) & 0x1)) count--; if (!count) { dev_err(isif_cfg.dev, "defect table write timeout !!!\n"); return -1; } } if (vdfc->num_vdefects < ISIF_VDFC_TABLE_SIZE) { /* Extra cycle needed */ regw(0, DFCMEM0); regw(0x1FFF, DFCMEM1); regw(1, DFCMEMCTL); } /* enable VDFC */ reg_modify((1 << ISIF_VDFC_EN_SHIFT), (1 << ISIF_VDFC_EN_SHIFT), DFCCTL); return 0; } static void isif_config_csc(struct isif_df_csc *df_csc) { u32 val1 = 0, val2 = 0, i; if (!df_csc->csc.en) { regw(0, CSCCTL); return; } for (i = 0; i < ISIF_CSC_NUM_COEFF; i++) { if ((i % 2) == 0) { /* CSCM - LSB */ val1 = (df_csc->csc.coeff[i].integer << ISIF_CSC_COEF_INTEG_SHIFT) | df_csc->csc.coeff[i].decimal; } else { /* CSCM - MSB */ val2 = (df_csc->csc.coeff[i].integer << ISIF_CSC_COEF_INTEG_SHIFT) | df_csc->csc.coeff[i].decimal; val2 <<= ISIF_CSCM_MSB_SHIFT; val2 |= val1; regw(val2, (CSCM0 + ((i - 1) << 1))); } } /* program the active area */ regw(df_csc->start_pix, FMTSPH); /* * one extra pixel as required for CSC. Actually number of * pixel - 1 should be configured in this register. So we * need to subtract 1 before writing to FMTSPH, but we will * not do this since csc requires one extra pixel */ regw(df_csc->num_pixels, FMTLNH); regw(df_csc->start_line, FMTSLV); /* * one extra line as required for CSC. See reason documented for * num_pixels */ regw(df_csc->num_lines, FMTLNV); /* Enable CSC */ regw(1, CSCCTL); } static int isif_config_raw(void) { struct isif_params_raw *params = &isif_cfg.bayer; struct isif_config_params_raw *module_params = &isif_cfg.bayer.config_params; struct vpss_pg_frame_size frame_size; struct vpss_sync_pol sync; u32 val; dev_dbg(isif_cfg.dev, "\nStarting isif_config_raw..\n"); /* * Configure CCDCFG register:- * Set CCD Not to swap input since input is RAW data * Set FID detection function to Latch at V-Sync * Set WENLOG - isif valid area * Set TRGSEL * Set EXTRG * Packed to 8 or 16 bits */ val = ISIF_YCINSWP_RAW | ISIF_CCDCFG_FIDMD_LATCH_VSYNC | ISIF_CCDCFG_WENLOG_AND | ISIF_CCDCFG_TRGSEL_WEN | ISIF_CCDCFG_EXTRG_DISABLE | isif_cfg.data_pack; dev_dbg(isif_cfg.dev, "Writing 0x%x to ...CCDCFG \n", val); regw(val, CCDCFG); /* * Configure the vertical sync polarity(MODESET.VDPOL) * Configure the horizontal sync polarity (MODESET.HDPOL) * Configure frame id polarity (MODESET.FLDPOL) * Configure data polarity * Configure External WEN Selection * Configure frame format(progressive or interlace) * Configure pixel format (Input mode) * Configure the data shift */ val = ISIF_VDHDOUT_INPUT | (params->vd_pol << ISIF_VD_POL_SHIFT) | (params->hd_pol << ISIF_HD_POL_SHIFT) | (params->fid_pol << ISIF_FID_POL_SHIFT) | (ISIF_DATAPOL_NORMAL << ISIF_DATAPOL_SHIFT) | (ISIF_EXWEN_DISABLE << ISIF_EXWEN_SHIFT) | (params->frm_fmt << ISIF_FRM_FMT_SHIFT) | (params->pix_fmt << ISIF_INPUT_SHIFT) | (params->config_params.data_shift << ISIF_DATASFT_SHIFT); regw(val, MODESET); dev_dbg(isif_cfg.dev, "Writing 0x%x to MODESET...\n", val); /* * Configure GAMMAWD register * CFA pattern setting */ val = params->cfa_pat << ISIF_GAMMAWD_CFA_SHIFT; /* Gamma msb */ if (module_params->compress.alg == ISIF_ALAW) val |= ISIF_ALAW_ENABLE; val |= (params->data_msb << ISIF_ALAW_GAMA_WD_SHIFT); regw(val, CGAMMAWD); /* Configure DPCM compression settings */ if (module_params->compress.alg == ISIF_DPCM) { val = BIT(ISIF_DPCM_EN_SHIFT) | (module_params->compress.pred << ISIF_DPCM_PREDICTOR_SHIFT); } regw(val, MISC); /* Configure Gain & Offset */ isif_config_gain_offset(); /* Configure Color pattern */ val = (params->config_params.col_pat_field0.olop) | (params->config_params.col_pat_field0.olep << 2) | (params->config_params.col_pat_field0.elop << 4) | (params->config_params.col_pat_field0.elep << 6) | (params->config_params.col_pat_field1.olop << 8) | (params->config_params.col_pat_field1.olep << 10) | (params->config_params.col_pat_field1.elop << 12) | (params->config_params.col_pat_field1.elep << 14); regw(val, CCOLP); dev_dbg(isif_cfg.dev, "Writing %x to CCOLP ...\n", val); /* Configure HSIZE register */ val = (!!params->horz_flip_en) << ISIF_HSIZE_FLIP_SHIFT; /* calculate line offset in 32 bytes based on pack value */ if (isif_cfg.data_pack == ISIF_PACK_8BIT) val |= ((params->win.width + 31) >> 5); else if (isif_cfg.data_pack == ISIF_PACK_12BIT) val |= (((params->win.width + (params->win.width >> 2)) + 31) >> 5); else val |= (((params->win.width * 2) + 31) >> 5); regw(val, HSIZE); /* Configure SDOFST register */ if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) { if (params->image_invert_en) { /* For interlace inverse mode */ regw(0x4B6D, SDOFST); dev_dbg(isif_cfg.dev, "Writing 0x4B6D to SDOFST...\n"); } else { /* For interlace non inverse mode */ regw(0x0B6D, SDOFST); dev_dbg(isif_cfg.dev, "Writing 0x0B6D to SDOFST...\n"); } } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) { if (params->image_invert_en) { /* For progressive inverse mode */ regw(0x4000, SDOFST); dev_dbg(isif_cfg.dev, "Writing 0x4000 to SDOFST...\n"); } else { /* For progressive non inverse mode */ regw(0x0000, SDOFST); dev_dbg(isif_cfg.dev, "Writing 0x0000 to SDOFST...\n"); } } /* Configure video window */ isif_setwin(&params->win, params->frm_fmt, 1); /* Configure Black Clamp */ isif_config_bclamp(&module_params->bclamp); /* Configure Vertical Defection Pixel Correction */ if (isif_config_dfc(&module_params->dfc) < 0) return -EFAULT; if (!module_params->df_csc.df_or_csc) /* Configure Color Space Conversion */ isif_config_csc(&module_params->df_csc); isif_config_linearization(&module_params->linearize); /* Configure Culling */ isif_config_culling(&module_params->culling); /* Configure horizontal and vertical offsets(DFC,LSC,Gain) */ regw(module_params->horz_offset, DATAHOFST); regw(module_params->vert_offset, DATAVOFST); /* Setup test pattern if enabled */ if (params->config_params.test_pat_gen) { /* Use the HD/VD pol settings from user */ sync.ccdpg_hdpol = params->hd_pol; sync.ccdpg_vdpol = params->vd_pol; dm365_vpss_set_sync_pol(sync); frame_size.hlpfr = isif_cfg.bayer.win.width; frame_size.pplen = isif_cfg.bayer.win.height; dm365_vpss_set_pg_frame_size(frame_size); vpss_select_ccdc_source(VPSS_PGLPBK); } dev_dbg(isif_cfg.dev, "\nEnd of isif_config_ycbcr...\n"); return 0; } static int isif_set_buftype(enum ccdc_buftype buf_type) { if (isif_cfg.if_type == VPFE_RAW_BAYER) isif_cfg.bayer.buf_type = buf_type; else isif_cfg.ycbcr.buf_type = buf_type; return 0; } static enum ccdc_buftype isif_get_buftype(void) { if (isif_cfg.if_type == VPFE_RAW_BAYER) return isif_cfg.bayer.buf_type; return isif_cfg.ycbcr.buf_type; } static int isif_enum_pix(u32 *pix, int i) { int ret = -EINVAL; if (isif_cfg.if_type == VPFE_RAW_BAYER) { if (i < ARRAY_SIZE(isif_raw_bayer_pix_formats)) { *pix = isif_raw_bayer_pix_formats[i]; ret = 0; } } else { if (i < ARRAY_SIZE(isif_raw_yuv_pix_formats)) { *pix = isif_raw_yuv_pix_formats[i]; ret = 0; } } return ret; } static int isif_set_pixel_format(unsigned int pixfmt) { if (isif_cfg.if_type == VPFE_RAW_BAYER) { if (pixfmt == V4L2_PIX_FMT_SBGGR8) { if ((isif_cfg.bayer.config_params.compress.alg != ISIF_ALAW) && (isif_cfg.bayer.config_params.compress.alg != ISIF_DPCM)) { dev_dbg(isif_cfg.dev, "Either configure A-Law or DPCM\n"); return -EINVAL; } isif_cfg.data_pack = ISIF_PACK_8BIT; } else if (pixfmt == V4L2_PIX_FMT_SBGGR16) { isif_cfg.bayer.config_params.compress.alg = ISIF_NO_COMPRESSION; isif_cfg.data_pack = ISIF_PACK_16BIT; } else return -EINVAL; isif_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW; } else { if (pixfmt == V4L2_PIX_FMT_YUYV) isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR; else if (pixfmt == V4L2_PIX_FMT_UYVY) isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY; else return -EINVAL; isif_cfg.data_pack = ISIF_PACK_8BIT; } return 0; } static u32 isif_get_pixel_format(void) { u32 pixfmt; if (isif_cfg.if_type == VPFE_RAW_BAYER) if (isif_cfg.bayer.config_params.compress.alg == ISIF_ALAW || isif_cfg.bayer.config_params.compress.alg == ISIF_DPCM) pixfmt = V4L2_PIX_FMT_SBGGR8; else pixfmt = V4L2_PIX_FMT_SBGGR16; else { if (isif_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR) pixfmt = V4L2_PIX_FMT_YUYV; else pixfmt = V4L2_PIX_FMT_UYVY; } return pixfmt; } static int isif_set_image_window(struct v4l2_rect *win) { if (isif_cfg.if_type == VPFE_RAW_BAYER) { isif_cfg.bayer.win.top = win->top; isif_cfg.bayer.win.left = win->left; isif_cfg.bayer.win.width = win->width; isif_cfg.bayer.win.height = win->height; } else { isif_cfg.ycbcr.win.top = win->top; isif_cfg.ycbcr.win.left = win->left; isif_cfg.ycbcr.win.width = win->width; isif_cfg.ycbcr.win.height = win->height; } return 0; } static void isif_get_image_window(struct v4l2_rect *win) { if (isif_cfg.if_type == VPFE_RAW_BAYER) *win = isif_cfg.bayer.win; else *win = isif_cfg.ycbcr.win; } static unsigned int isif_get_line_length(void) { unsigned int len; if (isif_cfg.if_type == VPFE_RAW_BAYER) { if (isif_cfg.data_pack == ISIF_PACK_8BIT) len = ((isif_cfg.bayer.win.width)); else if (isif_cfg.data_pack == ISIF_PACK_12BIT) len = (((isif_cfg.bayer.win.width * 2) + (isif_cfg.bayer.win.width >> 2))); else len = (((isif_cfg.bayer.win.width * 2))); } else len = (((isif_cfg.ycbcr.win.width * 2))); return ALIGN(len, 32); } static int isif_set_frame_format(enum ccdc_frmfmt frm_fmt) { if (isif_cfg.if_type == VPFE_RAW_BAYER) isif_cfg.bayer.frm_fmt = frm_fmt; else isif_cfg.ycbcr.frm_fmt = frm_fmt; return 0; } static enum ccdc_frmfmt isif_get_frame_format(void) { if (isif_cfg.if_type == VPFE_RAW_BAYER) return isif_cfg.bayer.frm_fmt; return isif_cfg.ycbcr.frm_fmt; } static int isif_getfid(void) { return (regr(MODESET) >> 15) & 0x1; } /* misc operations */ static void isif_setfbaddr(unsigned long addr) { regw((addr >> 21) & 0x07ff, CADU); regw((addr >> 5) & 0x0ffff, CADL); } static int isif_set_hw_if_params(struct vpfe_hw_if_param *params) { isif_cfg.if_type = params->if_type; switch (params->if_type) { case VPFE_BT656: case VPFE_BT656_10BIT: case VPFE_YCBCR_SYNC_8: isif_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT; isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY; break; case VPFE_BT1120: case VPFE_YCBCR_SYNC_16: isif_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_16BIT; isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY; break; case VPFE_RAW_BAYER: isif_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW; break; default: dev_dbg(isif_cfg.dev, "Invalid interface type\n"); return -EINVAL; } return 0; } /* This function will configure ISIF for YCbCr parameters. */ static int isif_config_ycbcr(void) { struct isif_ycbcr_config *params = &isif_cfg.ycbcr; struct vpss_pg_frame_size frame_size; u32 modeset = 0, ccdcfg = 0; struct vpss_sync_pol sync; dev_dbg(isif_cfg.dev, "\nStarting isif_config_ycbcr..."); /* configure pixel format or input mode */ modeset = modeset | (params->pix_fmt << ISIF_INPUT_SHIFT) | (params->frm_fmt << ISIF_FRM_FMT_SHIFT) | (params->fid_pol << ISIF_FID_POL_SHIFT) | (params->hd_pol << ISIF_HD_POL_SHIFT) | (params->vd_pol << ISIF_VD_POL_SHIFT); /* pack the data to 8-bit ISIFCFG */ switch (isif_cfg.if_type) { case VPFE_BT656: if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) { dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n"); return -EINVAL; } modeset |= (VPFE_PINPOL_NEGATIVE << ISIF_VD_POL_SHIFT); regw(3, REC656IF); ccdcfg = ccdcfg | ISIF_DATA_PACK8 | ISIF_YCINSWP_YCBCR; break; case VPFE_BT656_10BIT: if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) { dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n"); return -EINVAL; } /* setup BT.656, embedded sync */ regw(3, REC656IF); /* enable 10 bit mode in ccdcfg */ ccdcfg = ccdcfg | ISIF_DATA_PACK8 | ISIF_YCINSWP_YCBCR | ISIF_BW656_ENABLE; break; case VPFE_BT1120: if (params->pix_fmt != CCDC_PIXFMT_YCBCR_16BIT) { dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n"); return -EINVAL; } regw(3, REC656IF); break; case VPFE_YCBCR_SYNC_8: ccdcfg |= ISIF_DATA_PACK8; ccdcfg |= ISIF_YCINSWP_YCBCR; if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) { dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n"); return -EINVAL; } break; case VPFE_YCBCR_SYNC_16: if (params->pix_fmt != CCDC_PIXFMT_YCBCR_16BIT) { dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n"); return -EINVAL; } break; default: /* should never come here */ dev_dbg(isif_cfg.dev, "Invalid interface type\n"); return -EINVAL; } regw(modeset, MODESET); /* Set up pix order */ ccdcfg |= params->pix_order << ISIF_PIX_ORDER_SHIFT; regw(ccdcfg, CCDCFG); /* configure video window */ if ((isif_cfg.if_type == VPFE_BT1120) || (isif_cfg.if_type == VPFE_YCBCR_SYNC_16)) isif_setwin(&params->win, params->frm_fmt, 1); else isif_setwin(&params->win, params->frm_fmt, 2); /* * configure the horizontal line offset * this is done by rounding up width to a multiple of 16 pixels * and multiply by two to account for y:cb:cr 4:2:2 data */ regw(((((params->win.width * 2) + 31) & 0xffffffe0) >> 5), HSIZE); /* configure the memory line offset */ if ((params->frm_fmt == CCDC_FRMFMT_INTERLACED) && (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)) /* two fields are interleaved in memory */ regw(0x00000249, SDOFST); /* Setup test pattern if enabled */ if (isif_cfg.bayer.config_params.test_pat_gen) { sync.ccdpg_hdpol = params->hd_pol; sync.ccdpg_vdpol = params->vd_pol; dm365_vpss_set_sync_pol(sync); dm365_vpss_set_pg_frame_size(frame_size); } return 0; } static int isif_configure(void) { if (isif_cfg.if_type == VPFE_RAW_BAYER) return isif_config_raw(); return isif_config_ycbcr(); } static int isif_close(struct device *device) { /* copy defaults to module params */ isif_cfg.bayer.config_params = isif_config_defaults; return 0; } static struct ccdc_hw_device isif_hw_dev = { .name = "ISIF", .owner = THIS_MODULE, .hw_ops = { .open = isif_open, .close = isif_close, .enable = isif_enable, .enable_out_to_sdram = isif_enable_output_to_sdram, .set_hw_if_params = isif_set_hw_if_params, .configure = isif_configure, .set_buftype = isif_set_buftype, .get_buftype = isif_get_buftype, .enum_pix = isif_enum_pix, .set_pixel_format = isif_set_pixel_format, .get_pixel_format = isif_get_pixel_format, .set_frame_format = isif_set_frame_format, .get_frame_format = isif_get_frame_format, .set_image_window = isif_set_image_window, .get_image_window = isif_get_image_window, .get_line_length = isif_get_line_length, .setfbaddr = isif_setfbaddr, .getfid = isif_getfid, }, }; static int __init isif_probe(struct platform_device *pdev) { void (*setup_pinmux)(void); struct resource *res; void *__iomem addr; int status = 0, i; /* * first try to register with vpfe. If not correct platform, then we * don't have to iomap */ status = vpfe_register_ccdc_device(&isif_hw_dev); if (status < 0) return status; /* Get and enable Master clock */ isif_cfg.mclk = clk_get(&pdev->dev, "master"); if (IS_ERR(isif_cfg.mclk)) { status = PTR_ERR(isif_cfg.mclk); goto fail_mclk; } if (clk_enable(isif_cfg.mclk)) { status = -ENODEV; goto fail_mclk; } /* Platform data holds setup_pinmux function ptr */ if (NULL == pdev->dev.platform_data) { status = -ENODEV; goto fail_mclk; } setup_pinmux = pdev->dev.platform_data; /* * setup Mux configuration for ccdc which may be different for * different SoCs using this CCDC */ setup_pinmux(); i = 0; /* Get the ISIF base address, linearization table0 and table1 addr. */ while (i < 3) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!res) { status = -ENODEV; goto fail_nobase_res; } res = request_mem_region(res->start, resource_size(res), res->name); if (!res) { status = -EBUSY; goto fail_nobase_res; } addr = ioremap_nocache(res->start, resource_size(res)); if (!addr) { status = -ENOMEM; goto fail_base_iomap; } switch (i) { case 0: /* ISIF base address */ isif_cfg.base_addr = addr; break; case 1: /* ISIF linear tbl0 address */ isif_cfg.linear_tbl0_addr = addr; break; default: /* ISIF linear tbl0 address */ isif_cfg.linear_tbl1_addr = addr; break; } i++; } isif_cfg.dev = &pdev->dev; printk(KERN_NOTICE "%s is registered with vpfe.\n", isif_hw_dev.name); return 0; fail_base_iomap: release_mem_region(res->start, resource_size(res)); i--; fail_nobase_res: if (isif_cfg.base_addr) iounmap(isif_cfg.base_addr); if (isif_cfg.linear_tbl0_addr) iounmap(isif_cfg.linear_tbl0_addr); while (i >= 0) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); release_mem_region(res->start, resource_size(res)); i--; } fail_mclk: clk_put(isif_cfg.mclk); vpfe_unregister_ccdc_device(&isif_hw_dev); return status; } static int isif_remove(struct platform_device *pdev) { struct resource *res; int i = 0; iounmap(isif_cfg.base_addr); iounmap(isif_cfg.linear_tbl0_addr); iounmap(isif_cfg.linear_tbl1_addr); while (i < 3) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (res) release_mem_region(res->start, resource_size(res)); i++; } vpfe_unregister_ccdc_device(&isif_hw_dev); return 0; } static struct platform_driver isif_driver = { .driver = { .name = "isif", .owner = THIS_MODULE, }, .remove = __devexit_p(isif_remove), .probe = isif_probe, }; module_platform_driver(isif_driver); MODULE_LICENSE("GPL");
gpl-2.0
brymaster5000/Lunar_Max
drivers/mtd/maps/pxa2xx-flash.c
4975
3716
/* * Map driver for Intel XScale PXA2xx platforms. * * Author: Nicolas Pitre * Copyright: (C) 2001 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <asm/io.h> #include <mach/hardware.h> #include <asm/mach/flash.h> #define CACHELINESIZE 32 static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from, ssize_t len) { unsigned long start = (unsigned long)map->cached + from; unsigned long end = start + len; start &= ~(CACHELINESIZE - 1); while (start < end) { /* invalidate D cache line */ asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)); start += CACHELINESIZE; } } struct pxa2xx_flash_info { struct mtd_info *mtd; struct map_info map; }; static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; static int __devinit pxa2xx_flash_probe(struct platform_device *pdev) { struct flash_platform_data *flash = pdev->dev.platform_data; struct pxa2xx_flash_info *info; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; info = kzalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL); if (!info) return -ENOMEM; info->map.name = (char *) flash->name; info->map.bankwidth = flash->width; info->map.phys = res->start; info->map.size = resource_size(res); info->map.virt = ioremap(info->map.phys, info->map.size); if (!info->map.virt) { printk(KERN_WARNING "Failed to ioremap %s\n", info->map.name); return -ENOMEM; } info->map.cached = ioremap_cached(info->map.phys, info->map.size); if (!info->map.cached) printk(KERN_WARNING "Failed to ioremap cached %s\n", info->map.name); info->map.inval_cache = pxa2xx_map_inval_cache; simple_map_init(&info->map); printk(KERN_NOTICE "Probing %s at physical address 0x%08lx" " (%d-bit bankwidth)\n", info->map.name, (unsigned long)info->map.phys, info->map.bankwidth * 8); info->mtd = do_map_probe(flash->map_name, &info->map); if (!info->mtd) { iounmap((void *)info->map.virt); if (info->map.cached) iounmap(info->map.cached); return -EIO; } info->mtd->owner = THIS_MODULE; mtd_device_parse_register(info->mtd, probes, NULL, flash->parts, flash->nr_parts); platform_set_drvdata(pdev, info); return 0; } static int __devexit pxa2xx_flash_remove(struct platform_device *dev) { struct pxa2xx_flash_info *info = platform_get_drvdata(dev); platform_set_drvdata(dev, NULL); mtd_device_unregister(info->mtd); map_destroy(info->mtd); iounmap(info->map.virt); if (info->map.cached) iounmap(info->map.cached); kfree(info); return 0; } #ifdef CONFIG_PM static void pxa2xx_flash_shutdown(struct platform_device *dev) { struct pxa2xx_flash_info *info = platform_get_drvdata(dev); if (info && mtd_suspend(info->mtd) == 0) mtd_resume(info->mtd); } #else #define pxa2xx_flash_shutdown NULL #endif static struct platform_driver pxa2xx_flash_driver = { .driver = { .name = "pxa2xx-flash", .owner = THIS_MODULE, }, .probe = pxa2xx_flash_probe, .remove = __devexit_p(pxa2xx_flash_remove), .shutdown = pxa2xx_flash_shutdown, }; module_platform_driver(pxa2xx_flash_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>"); MODULE_DESCRIPTION("MTD map driver for Intel XScale PXA2xx");
gpl-2.0
jasonpritchard/linux-imx
arch/score/kernel/sys_score.c
7791
3704
/* * arch/score/kernel/syscall.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Chen Liqin <liqin.chen@sunplusct.com> * Lennox Wu <lennox.wu@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/file.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/unistd.h> #include <linux/syscalls.h> #include <asm/syscalls.h> asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); } asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, off_t offset) { if (unlikely(offset & ~PAGE_MASK)) return -EINVAL; return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); } asmlinkage long score_fork(struct pt_regs *regs) { return do_fork(SIGCHLD, regs->regs[0], regs, 0, NULL, NULL); } /* * Clone a task - this clones the calling program thread. * This is called indirectly via a small wrapper */ asmlinkage long score_clone(struct pt_regs *regs) { unsigned long clone_flags; unsigned long newsp; int __user *parent_tidptr, *child_tidptr; clone_flags = regs->regs[4]; newsp = regs->regs[5]; if (!newsp) newsp = regs->regs[0]; parent_tidptr = (int __user *)regs->regs[6]; child_tidptr = (int __user *)regs->regs[8]; return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); } asmlinkage long score_vfork(struct pt_regs *regs) { return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[0], regs, 0, NULL, NULL); } /* * sys_execve() executes a new program. * This is called indirectly via a small wrapper */ asmlinkage long score_execve(struct pt_regs *regs) { int error; char *filename; filename = getname((char __user*)regs->regs[4]); error = PTR_ERR(filename); if (IS_ERR(filename)) return error; error = do_execve(filename, (const char __user *const __user *)regs->regs[5], (const char __user *const __user *)regs->regs[6], regs); putname(filename); return error; } /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. */ int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]) { register unsigned long __r4 asm("r4") = (unsigned long) filename; register unsigned long __r5 asm("r5") = (unsigned long) argv; register unsigned long __r6 asm("r6") = (unsigned long) envp; register unsigned long __r7 asm("r7"); __asm__ __volatile__ (" \n" "ldi r27, %5 \n" "syscall \n" "mv %0, r4 \n" "mv %1, r7 \n" : "=&r" (__r4), "=r" (__r7) : "r" (__r4), "r" (__r5), "r" (__r6), "i" (__NR_execve) : "r8", "r9", "r10", "r11", "r22", "r23", "r24", "r25", "r26", "r27", "memory"); if (__r7 == 0) return __r4; return -__r4; }
gpl-2.0
Sparkey67/android_kernel_lge_g3
drivers/oprofile/oprofile_stats.c
11631
2371
/** * @file oprofile_stats.c * * @remark Copyright 2002 OProfile authors * @remark Read the file COPYING * * @author John Levon */ #include <linux/oprofile.h> #include <linux/smp.h> #include <linux/cpumask.h> #include <linux/threads.h> #include "oprofile_stats.h" #include "cpu_buffer.h" struct oprofile_stat_struct oprofile_stats; void oprofile_reset_stats(void) { struct oprofile_cpu_buffer *cpu_buf; int i; for_each_possible_cpu(i) { cpu_buf = &per_cpu(op_cpu_buffer, i); cpu_buf->sample_received = 0; cpu_buf->sample_lost_overflow = 0; cpu_buf->backtrace_aborted = 0; cpu_buf->sample_invalid_eip = 0; } atomic_set(&oprofile_stats.sample_lost_no_mm, 0); atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); atomic_set(&oprofile_stats.event_lost_overflow, 0); atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); atomic_set(&oprofile_stats.multiplex_counter, 0); } void oprofile_create_stats_files(struct super_block *sb, struct dentry *root) { struct oprofile_cpu_buffer *cpu_buf; struct dentry *cpudir; struct dentry *dir; char buf[10]; int i; dir = oprofilefs_mkdir(sb, root, "stats"); if (!dir) return; for_each_possible_cpu(i) { cpu_buf = &per_cpu(op_cpu_buffer, i); snprintf(buf, 10, "cpu%d", i); cpudir = oprofilefs_mkdir(sb, dir, buf); /* Strictly speaking access to these ulongs is racy, * but we can't simply lock them, and they are * informational only. */ oprofilefs_create_ro_ulong(sb, cpudir, "sample_received", &cpu_buf->sample_received); oprofilefs_create_ro_ulong(sb, cpudir, "sample_lost_overflow", &cpu_buf->sample_lost_overflow); oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted", &cpu_buf->backtrace_aborted); oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip", &cpu_buf->sample_invalid_eip); } oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm", &oprofile_stats.sample_lost_no_mm); oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping", &oprofile_stats.sample_lost_no_mapping); oprofilefs_create_ro_atomic(sb, dir, "event_lost_overflow", &oprofile_stats.event_lost_overflow); oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping", &oprofile_stats.bt_lost_no_mapping); #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX oprofilefs_create_ro_atomic(sb, dir, "multiplex_counter", &oprofile_stats.multiplex_counter); #endif }
gpl-2.0
Sudokamikaze/XKernel-taoshan
arch/sh/kernel/cpu/clock.c
11887
1152
/* * arch/sh/kernel/cpu/clock.c - SuperH clock framework * * Copyright (C) 2005 - 2009 Paul Mundt * * This clock framework is derived from the OMAP version by: * * Copyright (C) 2004 - 2008 Nokia Corporation * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> * * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/clk.h> #include <asm/clock.h> #include <asm/machvec.h> int __init clk_init(void) { int ret; ret = arch_clk_init(); if (unlikely(ret)) { pr_err("%s: CPU clock registration failed.\n", __func__); return ret; } if (sh_mv.mv_clk_init) { ret = sh_mv.mv_clk_init(); if (unlikely(ret)) { pr_err("%s: machvec clock initialization failed.\n", __func__); return ret; } } /* Kick the child clocks.. */ recalculate_root_clocks(); /* Enable the necessary init clocks */ clk_enable_init_clocks(); return ret; }
gpl-2.0
OUDhs/android_kernel_samsung_amazing3gcri
net/irda/irnet/irnet_irda.c
12911
56923
/* * IrNET protocol module : Synchronous PPP over an IrDA socket. * * Jean II - HPL `00 - <jt@hpl.hp.com> * * This file implement the IRDA interface of IrNET. * Basically, we sit on top of IrTTP. We set up IrTTP, IrIAS properly, * and exchange frames with IrTTP. */ #include "irnet_irda.h" /* Private header */ #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <asm/unaligned.h> /* * PPP disconnect work: we need to make sure we're in * process context when calling ppp_unregister_channel(). */ static void irnet_ppp_disconnect(struct work_struct *work) { irnet_socket * self = container_of(work, irnet_socket, disconnect_work); if (self == NULL) return; /* * If we were connected, cleanup & close the PPP * channel, which will kill pppd (hangup) and the rest. */ if (self->ppp_open && !self->ttp_open && !self->ttp_connect) { ppp_unregister_channel(&self->chan); self->ppp_open = 0; } } /************************* CONTROL CHANNEL *************************/ /* * When ppp is not active, /dev/irnet act as a control channel. * Writing allow to set up the IrDA destination of the IrNET channel, * and any application may be read events happening on IrNET... */ /*------------------------------------------------------------------*/ /* * Post an event to the control channel... * Put the event in the log, and then wait all process blocked on read * so they can read the log... */ static void irnet_post_event(irnet_socket * ap, irnet_event event, __u32 saddr, __u32 daddr, char * name, __u16 hints) { int index; /* In the log */ DENTER(CTRL_TRACE, "(ap=0x%p, event=%d, daddr=%08x, name=``%s'')\n", ap, event, daddr, name); /* Protect this section via spinlock. * Note : as we are the only event producer, we only need to exclude * ourself when touching the log, which is nice and easy. */ spin_lock_bh(&irnet_events.spinlock); /* Copy the event in the log */ index = irnet_events.index; irnet_events.log[index].event = event; irnet_events.log[index].daddr = daddr; irnet_events.log[index].saddr = saddr; /* Try to copy IrDA nickname */ if(name) strcpy(irnet_events.log[index].name, name); else irnet_events.log[index].name[0] = '\0'; /* Copy hints */ irnet_events.log[index].hints.word = hints; /* Try to get ppp unit number */ if((ap != (irnet_socket *) NULL) && (ap->ppp_open)) irnet_events.log[index].unit = ppp_unit_number(&ap->chan); else irnet_events.log[index].unit = -1; /* Increment the index * Note that we increment the index only after the event is written, * to make sure that the readers don't get garbage... */ irnet_events.index = (index + 1) % IRNET_MAX_EVENTS; DEBUG(CTRL_INFO, "New event index is %d\n", irnet_events.index); /* Spin lock end */ spin_unlock_bh(&irnet_events.spinlock); /* Now : wake up everybody waiting for events... */ wake_up_interruptible_all(&irnet_events.rwait); DEXIT(CTRL_TRACE, "\n"); } /************************* IRDA SUBROUTINES *************************/ /* * These are a bunch of subroutines called from other functions * down there, mostly common code or to improve readability... * * Note : we duplicate quite heavily some routines of af_irda.c, * because our input structure (self) is quite different * (struct irnet instead of struct irda_sock), which make sharing * the same code impossible (at least, without templates). */ /*------------------------------------------------------------------*/ /* * Function irda_open_tsap (self) * * Open local Transport Service Access Point (TSAP) * * Create a IrTTP instance for us and set all the IrTTP callbacks. */ static inline int irnet_open_tsap(irnet_socket * self) { notify_t notify; /* Callback structure */ DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self); DABORT(self->tsap != NULL, -EBUSY, IRDA_SR_ERROR, "Already busy !\n"); /* Initialize IrTTP callbacks to be used by the IrDA stack */ irda_notify_init(&notify); notify.connect_confirm = irnet_connect_confirm; notify.connect_indication = irnet_connect_indication; notify.disconnect_indication = irnet_disconnect_indication; notify.data_indication = irnet_data_indication; /*notify.udata_indication = NULL;*/ notify.flow_indication = irnet_flow_indication; notify.status_indication = irnet_status_indication; notify.instance = self; strlcpy(notify.name, IRNET_NOTIFY_NAME, sizeof(notify.name)); /* Open an IrTTP instance */ self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify); DABORT(self->tsap == NULL, -ENOMEM, IRDA_SR_ERROR, "Unable to allocate TSAP !\n"); /* Remember which TSAP selector we actually got */ self->stsap_sel = self->tsap->stsap_sel; DEXIT(IRDA_SR_TRACE, " - tsap=0x%p, sel=0x%X\n", self->tsap, self->stsap_sel); return 0; } /*------------------------------------------------------------------*/ /* * Function irnet_ias_to_tsap (self, result, value) * * Examine an IAS object and extract TSAP * * We do an IAP query to find the TSAP associated with the IrNET service. * When IrIAP pass us the result of the query, this function look at * the return values to check for failures and extract the TSAP if * possible. * Also deallocate value * The failure is in self->errno * Return TSAP or -1 */ static inline __u8 irnet_ias_to_tsap(irnet_socket * self, int result, struct ias_value * value) { __u8 dtsap_sel = 0; /* TSAP we are looking for */ DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self); /* By default, no error */ self->errno = 0; /* Check if request succeeded */ switch(result) { /* Standard errors : service not available */ case IAS_CLASS_UNKNOWN: case IAS_ATTRIB_UNKNOWN: DEBUG(IRDA_SR_INFO, "IAS object doesn't exist ! (%d)\n", result); self->errno = -EADDRNOTAVAIL; break; /* Other errors, most likely IrDA stack failure */ default : DEBUG(IRDA_SR_INFO, "IAS query failed ! (%d)\n", result); self->errno = -EHOSTUNREACH; break; /* Success : we got what we wanted */ case IAS_SUCCESS: break; } /* Check what was returned to us */ if(value != NULL) { /* What type of argument have we got ? */ switch(value->type) { case IAS_INTEGER: DEBUG(IRDA_SR_INFO, "result=%d\n", value->t.integer); if(value->t.integer != -1) /* Get the remote TSAP selector */ dtsap_sel = value->t.integer; else self->errno = -EADDRNOTAVAIL; break; default: self->errno = -EADDRNOTAVAIL; DERROR(IRDA_SR_ERROR, "bad type ! (0x%X)\n", value->type); break; } /* Cleanup */ irias_delete_value(value); } else /* value == NULL */ { /* Nothing returned to us - usually result != SUCCESS */ if(!(self->errno)) { DERROR(IRDA_SR_ERROR, "IrDA bug : result == SUCCESS && value == NULL\n"); self->errno = -EHOSTUNREACH; } } DEXIT(IRDA_SR_TRACE, "\n"); /* Return the TSAP */ return dtsap_sel; } /*------------------------------------------------------------------*/ /* * Function irnet_find_lsap_sel (self) * * Try to lookup LSAP selector in remote LM-IAS * * Basically, we start a IAP query, and then go to sleep. When the query * return, irnet_getvalue_confirm will wake us up, and we can examine the * result of the query... * Note that in some case, the query fail even before we go to sleep, * creating some races... */ static inline int irnet_find_lsap_sel(irnet_socket * self) { DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self); /* This should not happen */ DABORT(self->iriap, -EBUSY, IRDA_SR_ERROR, "busy with a previous query.\n"); /* Create an IAP instance, will be closed in irnet_getvalue_confirm() */ self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, irnet_getvalue_confirm); /* Treat unexpected signals as disconnect */ self->errno = -EHOSTUNREACH; /* Query remote LM-IAS */ iriap_getvaluebyclass_request(self->iriap, self->rsaddr, self->daddr, IRNET_SERVICE_NAME, IRNET_IAS_VALUE); /* The above request is non-blocking. * After a while, IrDA will call us back in irnet_getvalue_confirm() * We will then call irnet_ias_to_tsap() and finish the * connection procedure */ DEXIT(IRDA_SR_TRACE, "\n"); return 0; } /*------------------------------------------------------------------*/ /* * Function irnet_connect_tsap (self) * * Initialise the TTP socket and initiate TTP connection * */ static inline int irnet_connect_tsap(irnet_socket * self) { int err; DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self); /* Open a local TSAP (an IrTTP instance) */ err = irnet_open_tsap(self); if(err != 0) { clear_bit(0, &self->ttp_connect); DERROR(IRDA_SR_ERROR, "connect aborted!\n"); return err; } /* Connect to remote device */ err = irttp_connect_request(self->tsap, self->dtsap_sel, self->rsaddr, self->daddr, NULL, self->max_sdu_size_rx, NULL); if(err != 0) { clear_bit(0, &self->ttp_connect); DERROR(IRDA_SR_ERROR, "connect aborted!\n"); return err; } /* The above call is non-blocking. * After a while, the IrDA stack will either call us back in * irnet_connect_confirm() or irnet_disconnect_indication() * See you there ;-) */ DEXIT(IRDA_SR_TRACE, "\n"); return err; } /*------------------------------------------------------------------*/ /* * Function irnet_discover_next_daddr (self) * * Query the IrNET TSAP of the next device in the log. * * Used in the TSAP discovery procedure. */ static inline int irnet_discover_next_daddr(irnet_socket * self) { /* Close the last instance of IrIAP, and open a new one. * We can't reuse the IrIAP instance in the IrIAP callback */ if(self->iriap) { iriap_close(self->iriap); self->iriap = NULL; } /* Create a new IAP instance */ self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, irnet_discovervalue_confirm); if(self->iriap == NULL) return -ENOMEM; /* Next discovery - before the call to avoid races */ self->disco_index++; /* Check if we have one more address to try */ if(self->disco_index < self->disco_number) { /* Query remote LM-IAS */ iriap_getvaluebyclass_request(self->iriap, self->discoveries[self->disco_index].saddr, self->discoveries[self->disco_index].daddr, IRNET_SERVICE_NAME, IRNET_IAS_VALUE); /* The above request is non-blocking. * After a while, IrDA will call us back in irnet_discovervalue_confirm() * We will then call irnet_ias_to_tsap() and come back here again... */ return 0; } else return 1; } /*------------------------------------------------------------------*/ /* * Function irnet_discover_daddr_and_lsap_sel (self) * * This try to find a device with the requested service. * * Initiate a TSAP discovery procedure. * It basically look into the discovery log. For each address in the list, * it queries the LM-IAS of the device to find if this device offer * the requested service. * If there is more than one node supporting the service, we complain * to the user (it should move devices around). * If we find one node which have the requested TSAP, we connect to it. * * This function just start the whole procedure. It request the discovery * log and submit the first IAS query. * The bulk of the job is handled in irnet_discovervalue_confirm() * * Note : this procedure fails if there is more than one device in range * on the same dongle, because IrLMP doesn't disconnect the LAP when the * last LSAP is closed. Moreover, we would need to wait the LAP * disconnection... */ static inline int irnet_discover_daddr_and_lsap_sel(irnet_socket * self) { int ret; DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self); /* Ask lmp for the current discovery log */ self->discoveries = irlmp_get_discoveries(&self->disco_number, self->mask, DISCOVERY_DEFAULT_SLOTS); /* Check if the we got some results */ if(self->discoveries == NULL) { self->disco_number = -1; clear_bit(0, &self->ttp_connect); DRETURN(-ENETUNREACH, IRDA_SR_INFO, "No Cachelog...\n"); } DEBUG(IRDA_SR_INFO, "Got the log (0x%p), size is %d\n", self->discoveries, self->disco_number); /* Start with the first discovery */ self->disco_index = -1; self->daddr = DEV_ADDR_ANY; /* This will fail if the log is empty - this is non-blocking */ ret = irnet_discover_next_daddr(self); if(ret) { /* Close IAP */ if(self->iriap) iriap_close(self->iriap); self->iriap = NULL; /* Cleanup our copy of the discovery log */ kfree(self->discoveries); self->discoveries = NULL; clear_bit(0, &self->ttp_connect); DRETURN(-ENETUNREACH, IRDA_SR_INFO, "Cachelog empty...\n"); } /* Follow me in irnet_discovervalue_confirm() */ DEXIT(IRDA_SR_TRACE, "\n"); return 0; } /*------------------------------------------------------------------*/ /* * Function irnet_dname_to_daddr (self) * * Convert an IrDA nickname to a valid IrDA address * * It basically look into the discovery log until there is a match. */ static inline int irnet_dname_to_daddr(irnet_socket * self) { struct irda_device_info *discoveries; /* Copy of the discovery log */ int number; /* Number of nodes in the log */ int i; DENTER(IRDA_SR_TRACE, "(self=0x%p)\n", self); /* Ask lmp for the current discovery log */ discoveries = irlmp_get_discoveries(&number, 0xffff, DISCOVERY_DEFAULT_SLOTS); /* Check if the we got some results */ if(discoveries == NULL) DRETURN(-ENETUNREACH, IRDA_SR_INFO, "Cachelog empty...\n"); /* * Now, check all discovered devices (if any), and connect * client only about the services that the client is * interested in... */ for(i = 0; i < number; i++) { /* Does the name match ? */ if(!strncmp(discoveries[i].info, self->rname, NICKNAME_MAX_LEN)) { /* Yes !!! Get it.. */ self->daddr = discoveries[i].daddr; DEBUG(IRDA_SR_INFO, "discovered device ``%s'' at address 0x%08x.\n", self->rname, self->daddr); kfree(discoveries); DEXIT(IRDA_SR_TRACE, "\n"); return 0; } } /* No luck ! */ DEBUG(IRDA_SR_INFO, "cannot discover device ``%s'' !!!\n", self->rname); kfree(discoveries); return -EADDRNOTAVAIL; } /************************* SOCKET ROUTINES *************************/ /* * This are the main operations on IrNET sockets, basically to create * and destroy IrNET sockets. These are called from the PPP part... */ /*------------------------------------------------------------------*/ /* * Create a IrNET instance : just initialise some parameters... */ int irda_irnet_create(irnet_socket * self) { DENTER(IRDA_SOCK_TRACE, "(self=0x%p)\n", self); self->magic = IRNET_MAGIC; /* Paranoia */ self->ttp_open = 0; /* Prevent higher layer from accessing IrTTP */ self->ttp_connect = 0; /* Not connecting yet */ self->rname[0] = '\0'; /* May be set via control channel */ self->rdaddr = DEV_ADDR_ANY; /* May be set via control channel */ self->rsaddr = DEV_ADDR_ANY; /* May be set via control channel */ self->daddr = DEV_ADDR_ANY; /* Until we get connected */ self->saddr = DEV_ADDR_ANY; /* Until we get connected */ self->max_sdu_size_rx = TTP_SAR_UNBOUND; /* Register as a client with IrLMP */ self->ckey = irlmp_register_client(0, NULL, NULL, NULL); #ifdef DISCOVERY_NOMASK self->mask = 0xffff; /* For W2k compatibility */ #else /* DISCOVERY_NOMASK */ self->mask = irlmp_service_to_hint(S_LAN); #endif /* DISCOVERY_NOMASK */ self->tx_flow = FLOW_START; /* Flow control from IrTTP */ INIT_WORK(&self->disconnect_work, irnet_ppp_disconnect); DEXIT(IRDA_SOCK_TRACE, "\n"); return 0; } /*------------------------------------------------------------------*/ /* * Connect to the other side : * o convert device name to an address * o find the socket number (dlsap) * o Establish the connection * * Note : We no longer mimic af_irda. The IAS query for finding the TSAP * is done asynchronously, like the TTP connection. This allow us to * call this function from any context (not only process). * The downside is that following what's happening in there is tricky * because it involve various functions all over the place... */ int irda_irnet_connect(irnet_socket * self) { int err; DENTER(IRDA_SOCK_TRACE, "(self=0x%p)\n", self); /* Check if we are already trying to connect. * Because irda_irnet_connect() can be called directly by pppd plus * packet retries in ppp_generic and connect may take time, plus we may * race with irnet_connect_indication(), we need to be careful there... */ if(test_and_set_bit(0, &self->ttp_connect)) DRETURN(-EBUSY, IRDA_SOCK_INFO, "Already connecting...\n"); if((self->iriap != NULL) || (self->tsap != NULL)) DERROR(IRDA_SOCK_ERROR, "Socket not cleaned up...\n"); /* Insert ourselves in the hashbin so that the IrNET server can find us. * Notes : 4th arg is string of 32 char max and must be null terminated * When 4th arg is used (string), 3rd arg isn't (int) * Can't re-insert (MUST remove first) so check for that... */ if((irnet_server.running) && (self->q.q_next == NULL)) { spin_lock_bh(&irnet_server.spinlock); hashbin_insert(irnet_server.list, (irda_queue_t *) self, 0, self->rname); spin_unlock_bh(&irnet_server.spinlock); DEBUG(IRDA_SOCK_INFO, "Inserted ``%s'' in hashbin...\n", self->rname); } /* If we don't have anything (no address, no name) */ if((self->rdaddr == DEV_ADDR_ANY) && (self->rname[0] == '\0')) { /* Try to find a suitable address */ if((err = irnet_discover_daddr_and_lsap_sel(self)) != 0) DRETURN(err, IRDA_SOCK_INFO, "auto-connect failed!\n"); /* In most cases, the call above is non-blocking */ } else { /* If we have only the name (no address), try to get an address */ if(self->rdaddr == DEV_ADDR_ANY) { if((err = irnet_dname_to_daddr(self)) != 0) DRETURN(err, IRDA_SOCK_INFO, "name connect failed!\n"); } else /* Use the requested destination address */ self->daddr = self->rdaddr; /* Query remote LM-IAS to find LSAP selector */ irnet_find_lsap_sel(self); /* The above call is non blocking */ } /* At this point, we are waiting for the IrDA stack to call us back, * or we have already failed. * We will finish the connection procedure in irnet_connect_tsap(). */ DEXIT(IRDA_SOCK_TRACE, "\n"); return 0; } /*------------------------------------------------------------------*/ /* * Function irda_irnet_destroy(self) * * Destroy irnet instance * * Note : this need to be called from a process context. */ void irda_irnet_destroy(irnet_socket * self) { DENTER(IRDA_SOCK_TRACE, "(self=0x%p)\n", self); if(self == NULL) return; /* Remove ourselves from hashbin (if we are queued in hashbin) * Note : `irnet_server.running' protect us from calls in hashbin_delete() */ if((irnet_server.running) && (self->q.q_next != NULL)) { struct irnet_socket * entry; DEBUG(IRDA_SOCK_INFO, "Removing from hash..\n"); spin_lock_bh(&irnet_server.spinlock); entry = hashbin_remove_this(irnet_server.list, (irda_queue_t *) self); self->q.q_next = NULL; spin_unlock_bh(&irnet_server.spinlock); DASSERT(entry == self, , IRDA_SOCK_ERROR, "Can't remove from hash.\n"); } /* If we were connected, post a message */ if(test_bit(0, &self->ttp_open)) { /* Note : as the disconnect comes from ppp_generic, the unit number * doesn't exist anymore when we post the event, so we need to pass * NULL as the first arg... */ irnet_post_event(NULL, IRNET_DISCONNECT_TO, self->saddr, self->daddr, self->rname, 0); } /* Prevent various IrDA callbacks from messing up things * Need to be first */ clear_bit(0, &self->ttp_connect); /* Prevent higher layer from accessing IrTTP */ clear_bit(0, &self->ttp_open); /* Unregister with IrLMP */ irlmp_unregister_client(self->ckey); /* Unregister with LM-IAS */ if(self->iriap) { iriap_close(self->iriap); self->iriap = NULL; } /* Cleanup eventual discoveries from connection attempt or control channel */ if(self->discoveries != NULL) { /* Cleanup our copy of the discovery log */ kfree(self->discoveries); self->discoveries = NULL; } /* Close our IrTTP connection */ if(self->tsap) { DEBUG(IRDA_SOCK_INFO, "Closing our TTP connection.\n"); irttp_disconnect_request(self->tsap, NULL, P_NORMAL); irttp_close_tsap(self->tsap); self->tsap = NULL; } self->stsap_sel = 0; DEXIT(IRDA_SOCK_TRACE, "\n"); } /************************** SERVER SOCKET **************************/ /* * The IrNET service is composed of one server socket and a variable * number of regular IrNET sockets. The server socket is supposed to * handle incoming connections and redirect them to one IrNET sockets. * It's a superset of the regular IrNET socket, but has a very distinct * behaviour... */ /*------------------------------------------------------------------*/ /* * Function irnet_daddr_to_dname (self) * * Convert an IrDA address to a IrDA nickname * * It basically look into the discovery log until there is a match. */ static inline int irnet_daddr_to_dname(irnet_socket * self) { struct irda_device_info *discoveries; /* Copy of the discovery log */ int number; /* Number of nodes in the log */ int i; DENTER(IRDA_SERV_TRACE, "(self=0x%p)\n", self); /* Ask lmp for the current discovery log */ discoveries = irlmp_get_discoveries(&number, 0xffff, DISCOVERY_DEFAULT_SLOTS); /* Check if the we got some results */ if (discoveries == NULL) DRETURN(-ENETUNREACH, IRDA_SERV_INFO, "Cachelog empty...\n"); /* Now, check all discovered devices (if any) */ for(i = 0; i < number; i++) { /* Does the name match ? */ if(discoveries[i].daddr == self->daddr) { /* Yes !!! Get it.. */ strlcpy(self->rname, discoveries[i].info, sizeof(self->rname)); self->rname[sizeof(self->rname) - 1] = '\0'; DEBUG(IRDA_SERV_INFO, "Device 0x%08x is in fact ``%s''.\n", self->daddr, self->rname); kfree(discoveries); DEXIT(IRDA_SERV_TRACE, "\n"); return 0; } } /* No luck ! */ DEXIT(IRDA_SERV_INFO, ": cannot discover device 0x%08x !!!\n", self->daddr); kfree(discoveries); return -EADDRNOTAVAIL; } /*------------------------------------------------------------------*/ /* * Function irda_find_socket (self) * * Find the correct IrNET socket * * Look into the list of IrNET sockets and finds one with the right * properties... */ static inline irnet_socket * irnet_find_socket(irnet_socket * self) { irnet_socket * new = (irnet_socket *) NULL; int err; DENTER(IRDA_SERV_TRACE, "(self=0x%p)\n", self); /* Get the addresses of the requester */ self->daddr = irttp_get_daddr(self->tsap); self->saddr = irttp_get_saddr(self->tsap); /* Try to get the IrDA nickname of the requester */ err = irnet_daddr_to_dname(self); /* Protect access to the instance list */ spin_lock_bh(&irnet_server.spinlock); /* So now, try to get an socket having specifically * requested that nickname */ if(err == 0) { new = (irnet_socket *) hashbin_find(irnet_server.list, 0, self->rname); if(new) DEBUG(IRDA_SERV_INFO, "Socket 0x%p matches rname ``%s''.\n", new, new->rname); } /* If no name matches, try to find an socket by the destination address */ /* It can be either the requested destination address (set via the * control channel), or the current destination address if the * socket is in the middle of a connection request */ if(new == (irnet_socket *) NULL) { new = (irnet_socket *) hashbin_get_first(irnet_server.list); while(new !=(irnet_socket *) NULL) { /* Does it have the same address ? */ if((new->rdaddr == self->daddr) || (new->daddr == self->daddr)) { /* Yes !!! Get it.. */ DEBUG(IRDA_SERV_INFO, "Socket 0x%p matches daddr %#08x.\n", new, self->daddr); break; } new = (irnet_socket *) hashbin_get_next(irnet_server.list); } } /* If we don't have any socket, get the first unconnected socket */ if(new == (irnet_socket *) NULL) { new = (irnet_socket *) hashbin_get_first(irnet_server.list); while(new !=(irnet_socket *) NULL) { /* Is it available ? */ if(!(test_bit(0, &new->ttp_open)) && (new->rdaddr == DEV_ADDR_ANY) && (new->rname[0] == '\0') && (new->ppp_open)) { /* Yes !!! Get it.. */ DEBUG(IRDA_SERV_INFO, "Socket 0x%p is free.\n", new); break; } new = (irnet_socket *) hashbin_get_next(irnet_server.list); } } /* Spin lock end */ spin_unlock_bh(&irnet_server.spinlock); DEXIT(IRDA_SERV_TRACE, " - new = 0x%p\n", new); return new; } /*------------------------------------------------------------------*/ /* * Function irda_connect_socket (self) * * Connect an incoming connection to the socket * */ static inline int irnet_connect_socket(irnet_socket * server, irnet_socket * new, struct qos_info * qos, __u32 max_sdu_size, __u8 max_header_size) { DENTER(IRDA_SERV_TRACE, "(server=0x%p, new=0x%p)\n", server, new); /* Now attach up the new socket */ new->tsap = irttp_dup(server->tsap, new); DABORT(new->tsap == NULL, -1, IRDA_SERV_ERROR, "dup failed!\n"); /* Set up all the relevant parameters on the new socket */ new->stsap_sel = new->tsap->stsap_sel; new->dtsap_sel = new->tsap->dtsap_sel; new->saddr = irttp_get_saddr(new->tsap); new->daddr = irttp_get_daddr(new->tsap); new->max_header_size = max_header_size; new->max_sdu_size_tx = max_sdu_size; new->max_data_size = max_sdu_size; #ifdef STREAM_COMPAT /* If we want to receive "stream sockets" */ if(max_sdu_size == 0) new->max_data_size = irttp_get_max_seg_size(new->tsap); #endif /* STREAM_COMPAT */ /* Clean up the original one to keep it in listen state */ irttp_listen(server->tsap); /* Send a connection response on the new socket */ irttp_connect_response(new->tsap, new->max_sdu_size_rx, NULL); /* Allow PPP to send its junk over the new socket... */ set_bit(0, &new->ttp_open); /* Not connecting anymore, and clean up last possible remains * of connection attempts on the socket */ clear_bit(0, &new->ttp_connect); if(new->iriap) { iriap_close(new->iriap); new->iriap = NULL; } if(new->discoveries != NULL) { kfree(new->discoveries); new->discoveries = NULL; } #ifdef CONNECT_INDIC_KICK /* As currently we don't block packets in ppp_irnet_send() while passive, * this is not really needed... * Also, not doing it give IrDA a chance to finish the setup properly * before being swamped with packets... */ ppp_output_wakeup(&new->chan); #endif /* CONNECT_INDIC_KICK */ /* Notify the control channel */ irnet_post_event(new, IRNET_CONNECT_FROM, new->saddr, new->daddr, server->rname, 0); DEXIT(IRDA_SERV_TRACE, "\n"); return 0; } /*------------------------------------------------------------------*/ /* * Function irda_disconnect_server (self) * * Cleanup the server socket when the incoming connection abort * */ static inline void irnet_disconnect_server(irnet_socket * self, struct sk_buff *skb) { DENTER(IRDA_SERV_TRACE, "(self=0x%p)\n", self); /* Put the received packet in the black hole */ kfree_skb(skb); #ifdef FAIL_SEND_DISCONNECT /* Tell the other party we don't want to be connected */ /* Hum... Is it the right thing to do ? And do we need to send * a connect response before ? It looks ok without this... */ irttp_disconnect_request(self->tsap, NULL, P_NORMAL); #endif /* FAIL_SEND_DISCONNECT */ /* Notify the control channel (see irnet_find_socket()) */ irnet_post_event(NULL, IRNET_REQUEST_FROM, self->saddr, self->daddr, self->rname, 0); /* Clean up the server to keep it in listen state */ irttp_listen(self->tsap); DEXIT(IRDA_SERV_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Function irda_setup_server (self) * * Create a IrTTP server and set it up... * * Register the IrLAN hint bit, create a IrTTP instance for us, * set all the IrTTP callbacks and create an IrIAS entry... */ static inline int irnet_setup_server(void) { __u16 hints; DENTER(IRDA_SERV_TRACE, "()\n"); /* Initialise the regular socket part of the server */ irda_irnet_create(&irnet_server.s); /* Open a local TSAP (an IrTTP instance) for the server */ irnet_open_tsap(&irnet_server.s); /* PPP part setup */ irnet_server.s.ppp_open = 0; irnet_server.s.chan.private = NULL; irnet_server.s.file = NULL; /* Get the hint bit corresponding to IrLAN */ /* Note : we overload the IrLAN hint bit. As it is only a "hint", and as * we provide roughly the same functionality as IrLAN, this is ok. * In fact, the situation is similar as JetSend overloading the Obex hint */ hints = irlmp_service_to_hint(S_LAN); #ifdef ADVERTISE_HINT /* Register with IrLMP as a service (advertise our hint bit) */ irnet_server.skey = irlmp_register_service(hints); #endif /* ADVERTISE_HINT */ /* Register with LM-IAS (so that people can connect to us) */ irnet_server.ias_obj = irias_new_object(IRNET_SERVICE_NAME, jiffies); irias_add_integer_attrib(irnet_server.ias_obj, IRNET_IAS_VALUE, irnet_server.s.stsap_sel, IAS_KERNEL_ATTR); irias_insert_object(irnet_server.ias_obj); #ifdef DISCOVERY_EVENTS /* Tell IrLMP we want to be notified of newly discovered nodes */ irlmp_update_client(irnet_server.s.ckey, hints, irnet_discovery_indication, irnet_expiry_indication, (void *) &irnet_server.s); #endif DEXIT(IRDA_SERV_TRACE, " - self=0x%p\n", &irnet_server.s); return 0; } /*------------------------------------------------------------------*/ /* * Function irda_destroy_server (self) * * Destroy the IrTTP server... * * Reverse of the previous function... */ static inline void irnet_destroy_server(void) { DENTER(IRDA_SERV_TRACE, "()\n"); #ifdef ADVERTISE_HINT /* Unregister with IrLMP */ irlmp_unregister_service(irnet_server.skey); #endif /* ADVERTISE_HINT */ /* Unregister with LM-IAS */ if(irnet_server.ias_obj) irias_delete_object(irnet_server.ias_obj); /* Cleanup the socket part */ irda_irnet_destroy(&irnet_server.s); DEXIT(IRDA_SERV_TRACE, "\n"); } /************************ IRDA-TTP CALLBACKS ************************/ /* * When we create a IrTTP instance, we pass to it a set of callbacks * that IrTTP will call in case of various events. * We take care of those events here. */ /*------------------------------------------------------------------*/ /* * Function irnet_data_indication (instance, sap, skb) * * Received some data from TinyTP. Just queue it on the receive queue * */ static int irnet_data_indication(void * instance, void * sap, struct sk_buff *skb) { irnet_socket * ap = (irnet_socket *) instance; unsigned char * p; int code = 0; DENTER(IRDA_TCB_TRACE, "(self/ap=0x%p, skb=0x%p)\n", ap, skb); DASSERT(skb != NULL, 0, IRDA_CB_ERROR, "skb is NULL !!!\n"); /* Check is ppp is ready to receive our packet */ if(!ap->ppp_open) { DERROR(IRDA_CB_ERROR, "PPP not ready, dropping packet...\n"); /* When we return error, TTP will need to requeue the skb and * will stop the sender. IrTTP will stall until we send it a * flow control request... */ return -ENOMEM; } /* strip address/control field if present */ p = skb->data; if((p[0] == PPP_ALLSTATIONS) && (p[1] == PPP_UI)) { /* chop off address/control */ if(skb->len < 3) goto err_exit; p = skb_pull(skb, 2); } /* decompress protocol field if compressed */ if(p[0] & 1) { /* protocol is compressed */ skb_push(skb, 1)[0] = 0; } else if(skb->len < 2) goto err_exit; /* pass to generic ppp layer */ /* Note : how do I know if ppp can accept or not the packet ? This is * essential if I want to manage flow control smoothly... */ ppp_input(&ap->chan, skb); DEXIT(IRDA_TCB_TRACE, "\n"); return 0; err_exit: DERROR(IRDA_CB_ERROR, "Packet too small, dropping...\n"); kfree_skb(skb); ppp_input_error(&ap->chan, code); return 0; /* Don't return an error code, only for flow control... */ } /*------------------------------------------------------------------*/ /* * Function irnet_disconnect_indication (instance, sap, reason, skb) * * Connection has been closed. Chech reason to find out why * * Note : there are many cases where we come here : * o attempted to connect, timeout * o connected, link is broken, LAP has timeout * o connected, other side close the link * o connection request on the server not handled */ static void irnet_disconnect_indication(void * instance, void * sap, LM_REASON reason, struct sk_buff *skb) { irnet_socket * self = (irnet_socket *) instance; int test_open; int test_connect; DENTER(IRDA_TCB_TRACE, "(self=0x%p)\n", self); DASSERT(self != NULL, , IRDA_CB_ERROR, "Self is NULL !!!\n"); /* Don't care about it, but let's not leak it */ if(skb) dev_kfree_skb(skb); /* Prevent higher layer from accessing IrTTP */ test_open = test_and_clear_bit(0, &self->ttp_open); /* Not connecting anymore... * (note : TSAP is open, so IAP callbacks are no longer pending...) */ test_connect = test_and_clear_bit(0, &self->ttp_connect); /* If both self->ttp_open and self->ttp_connect are NULL, it mean that we * have a race condition with irda_irnet_destroy() or * irnet_connect_indication(), so don't mess up tsap... */ if(!(test_open || test_connect)) { DERROR(IRDA_CB_ERROR, "Race condition detected...\n"); return; } /* If we were active, notify the control channel */ if(test_open) irnet_post_event(self, IRNET_DISCONNECT_FROM, self->saddr, self->daddr, self->rname, 0); else /* If we were trying to connect, notify the control channel */ if((self->tsap) && (self != &irnet_server.s)) irnet_post_event(self, IRNET_NOANSWER_FROM, self->saddr, self->daddr, self->rname, 0); /* Close our IrTTP connection, cleanup tsap */ if((self->tsap) && (self != &irnet_server.s)) { DEBUG(IRDA_CB_INFO, "Closing our TTP connection.\n"); irttp_close_tsap(self->tsap); self->tsap = NULL; } /* Cleanup the socket in case we want to reconnect in ppp_output_wakeup() */ self->stsap_sel = 0; self->daddr = DEV_ADDR_ANY; self->tx_flow = FLOW_START; /* Deal with the ppp instance if it's still alive */ if(self->ppp_open) { if(test_open) { /* ppp_unregister_channel() wants a user context. */ schedule_work(&self->disconnect_work); } else { /* If we were trying to connect, flush (drain) ppp_generic * Tx queue (most often we have blocked it), which will * trigger an other attempt to connect. If we are passive, * this will empty the Tx queue after last try. */ ppp_output_wakeup(&self->chan); } } DEXIT(IRDA_TCB_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Function irnet_connect_confirm (instance, sap, qos, max_sdu_size, skb) * * Connections has been confirmed by the remote device * */ static void irnet_connect_confirm(void * instance, void * sap, struct qos_info *qos, __u32 max_sdu_size, __u8 max_header_size, struct sk_buff *skb) { irnet_socket * self = (irnet_socket *) instance; DENTER(IRDA_TCB_TRACE, "(self=0x%p)\n", self); /* Check if socket is closing down (via irda_irnet_destroy()) */ if(! test_bit(0, &self->ttp_connect)) { DERROR(IRDA_CB_ERROR, "Socket no longer connecting. Ouch !\n"); return; } /* How much header space do we need to reserve */ self->max_header_size = max_header_size; /* IrTTP max SDU size in transmit direction */ self->max_sdu_size_tx = max_sdu_size; self->max_data_size = max_sdu_size; #ifdef STREAM_COMPAT if(max_sdu_size == 0) self->max_data_size = irttp_get_max_seg_size(self->tsap); #endif /* STREAM_COMPAT */ /* At this point, IrLMP has assigned our source address */ self->saddr = irttp_get_saddr(self->tsap); /* Allow higher layer to access IrTTP */ set_bit(0, &self->ttp_open); clear_bit(0, &self->ttp_connect); /* Not racy, IrDA traffic is serial */ /* Give a kick in the ass of ppp_generic so that he sends us some data */ ppp_output_wakeup(&self->chan); /* Check size of received packet */ if(skb->len > 0) { #ifdef PASS_CONNECT_PACKETS DEBUG(IRDA_CB_INFO, "Passing connect packet to PPP.\n"); /* Try to pass it to PPP */ irnet_data_indication(instance, sap, skb); #else /* PASS_CONNECT_PACKETS */ DERROR(IRDA_CB_ERROR, "Dropping non empty packet.\n"); kfree_skb(skb); /* Note : will be optimised with other kfree... */ #endif /* PASS_CONNECT_PACKETS */ } else kfree_skb(skb); /* Notify the control channel */ irnet_post_event(self, IRNET_CONNECT_TO, self->saddr, self->daddr, self->rname, 0); DEXIT(IRDA_TCB_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Function irnet_flow_indication (instance, sap, flow) * * Used by TinyTP to tell us if it can accept more data or not * */ static void irnet_flow_indication(void * instance, void * sap, LOCAL_FLOW flow) { irnet_socket * self = (irnet_socket *) instance; LOCAL_FLOW oldflow = self->tx_flow; DENTER(IRDA_TCB_TRACE, "(self=0x%p, flow=%d)\n", self, flow); /* Update our state */ self->tx_flow = flow; /* Check what IrTTP want us to do... */ switch(flow) { case FLOW_START: DEBUG(IRDA_CB_INFO, "IrTTP wants us to start again\n"); /* Check if we really need to wake up PPP */ if(oldflow == FLOW_STOP) ppp_output_wakeup(&self->chan); else DEBUG(IRDA_CB_INFO, "But we were already transmitting !!!\n"); break; case FLOW_STOP: DEBUG(IRDA_CB_INFO, "IrTTP wants us to slow down\n"); break; default: DEBUG(IRDA_CB_INFO, "Unknown flow command!\n"); break; } DEXIT(IRDA_TCB_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Function irnet_status_indication (instance, sap, reason, skb) * * Link (IrLAP) status report. * */ static void irnet_status_indication(void * instance, LINK_STATUS link, LOCK_STATUS lock) { irnet_socket * self = (irnet_socket *) instance; DENTER(IRDA_TCB_TRACE, "(self=0x%p)\n", self); DASSERT(self != NULL, , IRDA_CB_ERROR, "Self is NULL !!!\n"); /* We can only get this event if we are connected */ switch(link) { case STATUS_NO_ACTIVITY: irnet_post_event(self, IRNET_BLOCKED_LINK, self->saddr, self->daddr, self->rname, 0); break; default: DEBUG(IRDA_CB_INFO, "Unknown status...\n"); } DEXIT(IRDA_TCB_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Function irnet_connect_indication(instance, sap, qos, max_sdu_size, userdata) * * Incoming connection * * In theory, this function is called only on the server socket. * Some other node is attempting to connect to the IrNET service, and has * sent a connection request on our server socket. * We just redirect the connection to the relevant IrNET socket. * * Note : we also make sure that between 2 irnet nodes, there can * exist only one irnet connection. */ static void irnet_connect_indication(void * instance, void * sap, struct qos_info *qos, __u32 max_sdu_size, __u8 max_header_size, struct sk_buff *skb) { irnet_socket * server = &irnet_server.s; irnet_socket * new = (irnet_socket *) NULL; DENTER(IRDA_TCB_TRACE, "(server=0x%p)\n", server); DASSERT(instance == &irnet_server, , IRDA_CB_ERROR, "Invalid instance (0x%p) !!!\n", instance); DASSERT(sap == irnet_server.s.tsap, , IRDA_CB_ERROR, "Invalid sap !!!\n"); /* Try to find the most appropriate IrNET socket */ new = irnet_find_socket(server); /* After all this hard work, do we have an socket ? */ if(new == (irnet_socket *) NULL) { DEXIT(IRDA_CB_INFO, ": No socket waiting for this connection.\n"); irnet_disconnect_server(server, skb); return; } /* Is the socket already busy ? */ if(test_bit(0, &new->ttp_open)) { DEXIT(IRDA_CB_INFO, ": Socket already connected.\n"); irnet_disconnect_server(server, skb); return; } /* The following code is a bit tricky, so need comments ;-) */ /* If ttp_connect is set, the socket is trying to connect to the other * end and may have sent a IrTTP connection request and is waiting for * a connection response (that may never come). * Now, the pain is that the socket may have opened a tsap and is * waiting on it, while the other end is trying to connect to it on * another tsap. * Because IrNET can be peer to peer, we need to workaround this. * Furthermore, the way the irnetd script is implemented, the * target will create a second IrNET connection back to the * originator and expect the originator to bind this new connection * to the original PPPD instance. * And of course, if we don't use irnetd, we can have a race when * both side try to connect simultaneously, which could leave both * connections half closed (yuck). * Conclusions : * 1) The "originator" must accept the new connection and get rid * of the old one so that irnetd works * 2) One side must deny the new connection to avoid races, * but both side must agree on which side it is... * Most often, the originator is primary at the LAP layer. * Jean II */ /* Now, let's look at the way I wrote the test... * We need to clear up the ttp_connect flag atomically to prevent * irnet_disconnect_indication() to mess up the tsap we are going to close. * We want to clear the ttp_connect flag only if we close the tsap, * otherwise we will never close it, so we need to check for primary * *before* doing the test on the flag. * And of course, ALLOW_SIMULT_CONNECT can disable this entirely... * Jean II */ /* Socket already connecting ? On primary ? */ if(0 #ifdef ALLOW_SIMULT_CONNECT || ((irttp_is_primary(server->tsap) == 1) && /* primary */ (test_and_clear_bit(0, &new->ttp_connect))) #endif /* ALLOW_SIMULT_CONNECT */ ) { DERROR(IRDA_CB_ERROR, "Socket already connecting, but going to reuse it !\n"); /* Cleanup the old TSAP if necessary - IrIAP will be cleaned up later */ if(new->tsap != NULL) { /* Close the old connection the new socket was attempting, * so that we can hook it up to the new connection. * It's now safe to do it... */ irttp_close_tsap(new->tsap); new->tsap = NULL; } } else { /* Three options : * 1) socket was not connecting or connected : ttp_connect should be 0. * 2) we don't want to connect the socket because we are secondary or * ALLOW_SIMULT_CONNECT is undefined. ttp_connect should be 1. * 3) we are half way in irnet_disconnect_indication(), and it's a * nice race condition... Fortunately, we can detect that by checking * if tsap is still alive. On the other hand, we can't be in * irda_irnet_destroy() otherwise we would not have found this * socket in the hashbin. * Jean II */ if((test_bit(0, &new->ttp_connect)) || (new->tsap != NULL)) { /* Don't mess this socket, somebody else in in charge... */ DERROR(IRDA_CB_ERROR, "Race condition detected, socket in use, abort connect...\n"); irnet_disconnect_server(server, skb); return; } } /* So : at this point, we have a socket, and it is idle. Good ! */ irnet_connect_socket(server, new, qos, max_sdu_size, max_header_size); /* Check size of received packet */ if(skb->len > 0) { #ifdef PASS_CONNECT_PACKETS DEBUG(IRDA_CB_INFO, "Passing connect packet to PPP.\n"); /* Try to pass it to PPP */ irnet_data_indication(new, new->tsap, skb); #else /* PASS_CONNECT_PACKETS */ DERROR(IRDA_CB_ERROR, "Dropping non empty packet.\n"); kfree_skb(skb); /* Note : will be optimised with other kfree... */ #endif /* PASS_CONNECT_PACKETS */ } else kfree_skb(skb); DEXIT(IRDA_TCB_TRACE, "\n"); } /********************** IRDA-IAS/LMP CALLBACKS **********************/ /* * These are the callbacks called by other layers of the IrDA stack, * mainly LMP for discovery and IAS for name queries. */ /*------------------------------------------------------------------*/ /* * Function irnet_getvalue_confirm (result, obj_id, value, priv) * * Got answer from remote LM-IAS, just connect * * This is the reply to a IAS query we were doing to find the TSAP of * the device we want to connect to. * If we have found a valid TSAP, just initiate the TTP connection * on this TSAP. */ static void irnet_getvalue_confirm(int result, __u16 obj_id, struct ias_value *value, void * priv) { irnet_socket * self = (irnet_socket *) priv; DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self); DASSERT(self != NULL, , IRDA_OCB_ERROR, "Self is NULL !!!\n"); /* Check if already connected (via irnet_connect_socket()) * or socket is closing down (via irda_irnet_destroy()) */ if(! test_bit(0, &self->ttp_connect)) { DERROR(IRDA_OCB_ERROR, "Socket no longer connecting. Ouch !\n"); return; } /* We probably don't need to make any more queries */ iriap_close(self->iriap); self->iriap = NULL; /* Post process the IAS reply */ self->dtsap_sel = irnet_ias_to_tsap(self, result, value); /* If error, just go out */ if(self->errno) { clear_bit(0, &self->ttp_connect); DERROR(IRDA_OCB_ERROR, "IAS connect failed ! (0x%X)\n", self->errno); return; } DEBUG(IRDA_OCB_INFO, "daddr = %08x, lsap = %d, starting IrTTP connection\n", self->daddr, self->dtsap_sel); /* Start up TTP - non blocking */ irnet_connect_tsap(self); DEXIT(IRDA_OCB_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Function irnet_discovervalue_confirm (result, obj_id, value, priv) * * Handle the TSAP discovery procedure state machine. * Got answer from remote LM-IAS, try next device * * We are doing a TSAP discovery procedure, and we got an answer to * a IAS query we were doing to find the TSAP on one of the address * in the discovery log. * * If we have found a valid TSAP for the first time, save it. If it's * not the first time we found one, complain. * * If we have more addresses in the log, just initiate a new query. * Note that those query may fail (see irnet_discover_daddr_and_lsap_sel()) * * Otherwise, wrap up the procedure (cleanup), check if we have found * any device and connect to it. */ static void irnet_discovervalue_confirm(int result, __u16 obj_id, struct ias_value *value, void * priv) { irnet_socket * self = (irnet_socket *) priv; __u8 dtsap_sel; /* TSAP we are looking for */ DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self); DASSERT(self != NULL, , IRDA_OCB_ERROR, "Self is NULL !!!\n"); /* Check if already connected (via irnet_connect_socket()) * or socket is closing down (via irda_irnet_destroy()) */ if(! test_bit(0, &self->ttp_connect)) { DERROR(IRDA_OCB_ERROR, "Socket no longer connecting. Ouch !\n"); return; } /* Post process the IAS reply */ dtsap_sel = irnet_ias_to_tsap(self, result, value); /* Have we got something ? */ if(self->errno == 0) { /* We found the requested service */ if(self->daddr != DEV_ADDR_ANY) { DERROR(IRDA_OCB_ERROR, "More than one device in range supports IrNET...\n"); } else { /* First time we found that one, save it ! */ self->daddr = self->discoveries[self->disco_index].daddr; self->dtsap_sel = dtsap_sel; } } /* If no failure */ if((self->errno == -EADDRNOTAVAIL) || (self->errno == 0)) { int ret; /* Search the next node */ ret = irnet_discover_next_daddr(self); if(!ret) { /* In this case, the above request was non-blocking. * We will return here after a while... */ return; } /* In this case, we have processed the last discovery item */ } /* No more queries to be done (failure or last one) */ /* We probably don't need to make any more queries */ iriap_close(self->iriap); self->iriap = NULL; /* No more items : remove the log and signal termination */ DEBUG(IRDA_OCB_INFO, "Cleaning up log (0x%p)\n", self->discoveries); if(self->discoveries != NULL) { /* Cleanup our copy of the discovery log */ kfree(self->discoveries); self->discoveries = NULL; } self->disco_number = -1; /* Check out what we found */ if(self->daddr == DEV_ADDR_ANY) { self->daddr = DEV_ADDR_ANY; clear_bit(0, &self->ttp_connect); DEXIT(IRDA_OCB_TRACE, ": cannot discover IrNET in any device !!!\n"); return; } /* We have a valid address - just connect */ DEBUG(IRDA_OCB_INFO, "daddr = %08x, lsap = %d, starting IrTTP connection\n", self->daddr, self->dtsap_sel); /* Start up TTP - non blocking */ irnet_connect_tsap(self); DEXIT(IRDA_OCB_TRACE, "\n"); } #ifdef DISCOVERY_EVENTS /*------------------------------------------------------------------*/ /* * Function irnet_discovery_indication (discovery) * * Got a discovery indication from IrLMP, post an event * * Note : IrLMP take care of matching the hint mask for us, and also * check if it is a "new" node for us... * * As IrLMP filter on the IrLAN hint bit, we get both IrLAN and IrNET * nodes, so it's only at connection time that we will know if the * node support IrNET, IrLAN or both. The other solution is to check * in IAS the PNP ids and service name. * Note : even if a node support IrNET (or IrLAN), it's no guarantee * that we will be able to connect to it, the node might already be * busy... * * One last thing : in some case, this function will trigger duplicate * discovery events. On the other hand, we should catch all * discoveries properly (i.e. not miss one). Filtering duplicate here * is to messy, so we leave that to user space... */ static void irnet_discovery_indication(discinfo_t * discovery, DISCOVERY_MODE mode, void * priv) { irnet_socket * self = &irnet_server.s; DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self); DASSERT(priv == &irnet_server, , IRDA_OCB_ERROR, "Invalid instance (0x%p) !!!\n", priv); DEBUG(IRDA_OCB_INFO, "Discovered new IrNET/IrLAN node %s...\n", discovery->info); /* Notify the control channel */ irnet_post_event(NULL, IRNET_DISCOVER, discovery->saddr, discovery->daddr, discovery->info, get_unaligned((__u16 *)discovery->hints)); DEXIT(IRDA_OCB_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Function irnet_expiry_indication (expiry) * * Got a expiry indication from IrLMP, post an event * * Note : IrLMP take care of matching the hint mask for us, we only * check if it is a "new" node... */ static void irnet_expiry_indication(discinfo_t * expiry, DISCOVERY_MODE mode, void * priv) { irnet_socket * self = &irnet_server.s; DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self); DASSERT(priv == &irnet_server, , IRDA_OCB_ERROR, "Invalid instance (0x%p) !!!\n", priv); DEBUG(IRDA_OCB_INFO, "IrNET/IrLAN node %s expired...\n", expiry->info); /* Notify the control channel */ irnet_post_event(NULL, IRNET_EXPIRE, expiry->saddr, expiry->daddr, expiry->info, get_unaligned((__u16 *)expiry->hints)); DEXIT(IRDA_OCB_TRACE, "\n"); } #endif /* DISCOVERY_EVENTS */ /*********************** PROC ENTRY CALLBACKS ***********************/ /* * We create a instance in the /proc filesystem, and here we take care * of that... */ #ifdef CONFIG_PROC_FS static int irnet_proc_show(struct seq_file *m, void *v) { irnet_socket * self; char * state; int i = 0; /* Get the IrNET server information... */ seq_printf(m, "IrNET server - "); seq_printf(m, "IrDA state: %s, ", (irnet_server.running ? "running" : "dead")); seq_printf(m, "stsap_sel: %02x, ", irnet_server.s.stsap_sel); seq_printf(m, "dtsap_sel: %02x\n", irnet_server.s.dtsap_sel); /* Do we need to continue ? */ if(!irnet_server.running) return 0; /* Protect access to the instance list */ spin_lock_bh(&irnet_server.spinlock); /* Get the sockets one by one... */ self = (irnet_socket *) hashbin_get_first(irnet_server.list); while(self != NULL) { /* Start printing info about the socket. */ seq_printf(m, "\nIrNET socket %d - ", i++); /* First, get the requested configuration */ seq_printf(m, "Requested IrDA name: \"%s\", ", self->rname); seq_printf(m, "daddr: %08x, ", self->rdaddr); seq_printf(m, "saddr: %08x\n", self->rsaddr); /* Second, get all the PPP info */ seq_printf(m, " PPP state: %s", (self->ppp_open ? "registered" : "unregistered")); if(self->ppp_open) { seq_printf(m, ", unit: ppp%d", ppp_unit_number(&self->chan)); seq_printf(m, ", channel: %d", ppp_channel_index(&self->chan)); seq_printf(m, ", mru: %d", self->mru); /* Maybe add self->flags ? Later... */ } /* Then, get all the IrDA specific info... */ if(self->ttp_open) state = "connected"; else if(self->tsap != NULL) state = "connecting"; else if(self->iriap != NULL) state = "searching"; else if(self->ttp_connect) state = "weird"; else state = "idle"; seq_printf(m, "\n IrDA state: %s, ", state); seq_printf(m, "daddr: %08x, ", self->daddr); seq_printf(m, "stsap_sel: %02x, ", self->stsap_sel); seq_printf(m, "dtsap_sel: %02x\n", self->dtsap_sel); /* Next socket, please... */ self = (irnet_socket *) hashbin_get_next(irnet_server.list); } /* Spin lock end */ spin_unlock_bh(&irnet_server.spinlock); return 0; } static int irnet_proc_open(struct inode *inode, struct file *file) { return single_open(file, irnet_proc_show, NULL); } static const struct file_operations irnet_proc_fops = { .owner = THIS_MODULE, .open = irnet_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* PROC_FS */ /********************** CONFIGURATION/CLEANUP **********************/ /* * Initialisation and teardown of the IrDA part, called at module * insertion and removal... */ /*------------------------------------------------------------------*/ /* * Prepare the IrNET layer for operation... */ int __init irda_irnet_init(void) { int err = 0; DENTER(MODULE_TRACE, "()\n"); /* Pure paranoia - should be redundant */ memset(&irnet_server, 0, sizeof(struct irnet_root)); /* Setup start of irnet instance list */ irnet_server.list = hashbin_new(HB_NOLOCK); DABORT(irnet_server.list == NULL, -ENOMEM, MODULE_ERROR, "Can't allocate hashbin!\n"); /* Init spinlock for instance list */ spin_lock_init(&irnet_server.spinlock); /* Initialise control channel */ init_waitqueue_head(&irnet_events.rwait); irnet_events.index = 0; /* Init spinlock for event logging */ spin_lock_init(&irnet_events.spinlock); #ifdef CONFIG_PROC_FS /* Add a /proc file for irnet infos */ proc_create("irnet", 0, proc_irda, &irnet_proc_fops); #endif /* CONFIG_PROC_FS */ /* Setup the IrNET server */ err = irnet_setup_server(); if(!err) /* We are no longer functional... */ irnet_server.running = 1; DEXIT(MODULE_TRACE, "\n"); return err; } /*------------------------------------------------------------------*/ /* * Cleanup at exit... */ void __exit irda_irnet_cleanup(void) { DENTER(MODULE_TRACE, "()\n"); /* We are no longer there... */ irnet_server.running = 0; #ifdef CONFIG_PROC_FS /* Remove our /proc file */ remove_proc_entry("irnet", proc_irda); #endif /* CONFIG_PROC_FS */ /* Remove our IrNET server from existence */ irnet_destroy_server(); /* Remove all instances of IrNET socket still present */ hashbin_delete(irnet_server.list, (FREE_FUNC) irda_irnet_destroy); DEXIT(MODULE_TRACE, "\n"); }
gpl-2.0
fedya/aircam-openwrt
build_dir/linux-gm812x/linux-2.6.28.fa2/drivers/input/joystick/iforce/iforce-serio.c
14703
4470
/* * Copyright (c) 2000-2001 Vojtech Pavlik <vojtech@ucw.cz> * Copyright (c) 2001, 2007 Johann Deneux <johann.deneux@gmail.com> * * USB/RS232 I-Force joysticks and wheels. */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include "iforce.h" void iforce_serial_xmit(struct iforce *iforce) { unsigned char cs; int i; unsigned long flags; if (test_and_set_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags)) { set_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags); return; } spin_lock_irqsave(&iforce->xmit_lock, flags); again: if (iforce->xmit.head == iforce->xmit.tail) { clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); spin_unlock_irqrestore(&iforce->xmit_lock, flags); return; } cs = 0x2b; serio_write(iforce->serio, 0x2b); serio_write(iforce->serio, iforce->xmit.buf[iforce->xmit.tail]); cs ^= iforce->xmit.buf[iforce->xmit.tail]; XMIT_INC(iforce->xmit.tail, 1); for (i=iforce->xmit.buf[iforce->xmit.tail]; i >= 0; --i) { serio_write(iforce->serio, iforce->xmit.buf[iforce->xmit.tail]); cs ^= iforce->xmit.buf[iforce->xmit.tail]; XMIT_INC(iforce->xmit.tail, 1); } serio_write(iforce->serio, cs); if (test_and_clear_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags)) goto again; clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); spin_unlock_irqrestore(&iforce->xmit_lock, flags); } static void iforce_serio_write_wakeup(struct serio *serio) { struct iforce *iforce = serio_get_drvdata(serio); iforce_serial_xmit(iforce); } static irqreturn_t iforce_serio_irq(struct serio *serio, unsigned char data, unsigned int flags) { struct iforce *iforce = serio_get_drvdata(serio); if (!iforce->pkt) { if (data == 0x2b) iforce->pkt = 1; goto out; } if (!iforce->id) { if (data > 3 && data != 0xff) iforce->pkt = 0; else iforce->id = data; goto out; } if (!iforce->len) { if (data > IFORCE_MAX_LENGTH) { iforce->pkt = 0; iforce->id = 0; } else { iforce->len = data; } goto out; } if (iforce->idx < iforce->len) { iforce->csum += iforce->data[iforce->idx++] = data; goto out; } if (iforce->idx == iforce->len) { iforce_process_packet(iforce, (iforce->id << 8) | iforce->idx, iforce->data); iforce->pkt = 0; iforce->id = 0; iforce->len = 0; iforce->idx = 0; iforce->csum = 0; } out: return IRQ_HANDLED; } static int iforce_serio_connect(struct serio *serio, struct serio_driver *drv) { struct iforce *iforce; int err; iforce = kzalloc(sizeof(struct iforce), GFP_KERNEL); if (!iforce) return -ENOMEM; iforce->bus = IFORCE_232; iforce->serio = serio; serio_set_drvdata(serio, iforce); err = serio_open(serio, drv); if (err) goto fail1; err = iforce_init_device(iforce); if (err) goto fail2; return 0; fail2: serio_close(serio); fail1: serio_set_drvdata(serio, NULL); kfree(iforce); return err; } static void iforce_serio_disconnect(struct serio *serio) { struct iforce *iforce = serio_get_drvdata(serio); input_unregister_device(iforce->dev); serio_close(serio); serio_set_drvdata(serio, NULL); kfree(iforce); } static struct serio_device_id iforce_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_IFORCE, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, iforce_serio_ids); struct serio_driver iforce_serio_drv = { .driver = { .name = "iforce", }, .description = "RS232 I-Force joysticks and wheels driver", .id_table = iforce_serio_ids, .write_wakeup = iforce_serio_write_wakeup, .interrupt = iforce_serio_irq, .connect = iforce_serio_connect, .disconnect = iforce_serio_disconnect, };
gpl-2.0
halfline/linux
sound/pci/echoaudio/indigoio_dsp.c
624
3426
/**************************************************************************** Copyright Echo Digital Audio Corporation (c) 1998 - 2004 All rights reserved www.echoaudio.com This file is part of Echo Digital Audio's generic driver library. Echo Digital Audio's generic driver library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> ****************************************************************************/ static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe, int gain); static int update_vmixer_level(struct echoaudio *chip); static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id) { int err; if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO_IO)) return -ENODEV; if ((err = init_dsp_comm_page(chip))) { dev_err(chip->card->dev, "init_hw - could not initialize DSP comm page\n"); return err; } chip->device_id = device_id; chip->subdevice_id = subdevice_id; chip->bad_board = TRUE; chip->dsp_code_to_load = FW_INDIGO_IO_DSP; /* Since this card has no ASIC, mark it as loaded so everything works OK */ chip->asic_loaded = TRUE; chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL; if ((err = load_firmware(chip)) < 0) return err; chip->bad_board = FALSE; return err; } static int set_mixer_defaults(struct echoaudio *chip) { return init_line_levels(chip); } static u32 detect_input_clocks(const struct echoaudio *chip) { return ECHO_CLOCK_BIT_INTERNAL; } /* The IndigoIO has no ASIC. Just do nothing */ static int load_asic(struct echoaudio *chip) { return 0; } static int set_sample_rate(struct echoaudio *chip, u32 rate) { if (wait_handshake(chip)) return -EIO; chip->sample_rate = rate; chip->comm_page->sample_rate = cpu_to_le32(rate); clear_handshake(chip); return send_vector(chip, DSP_VC_UPDATE_CLOCKS); } /* This function routes the sound from a virtual channel to a real output */ static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe, int gain) { int index; if (snd_BUG_ON(pipe >= num_pipes_out(chip) || output >= num_busses_out(chip))) return -EINVAL; if (wait_handshake(chip)) return -EIO; chip->vmixer_gain[output][pipe] = gain; index = output * num_pipes_out(chip) + pipe; chip->comm_page->vmixer[index] = gain; dev_dbg(chip->card->dev, "set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain); return 0; } /* Tell the DSP to read and update virtual mixer levels in comm page. */ static int update_vmixer_level(struct echoaudio *chip) { if (wait_handshake(chip)) return -EIO; clear_handshake(chip); return send_vector(chip, DSP_VC_SET_VMIXER_GAIN); }
gpl-2.0
fenggangwu/sffs
sound/soc/samsung/snow.c
624
3153
/* * ASoC machine driver for Snow boards * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_device.h> #include <sound/soc.h> #include "i2s.h" #define FIN_PLL_RATE 24000000 static struct snd_soc_dai_link snow_dai[] = { { .name = "Primary", .stream_name = "Primary", .codec_dai_name = "HiFi", .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, }, }; static int snow_late_probe(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai; struct snd_soc_dai *cpu_dai = card->rtd[0].cpu_dai; int ret; /* Set the MCLK rate for the codec */ ret = snd_soc_dai_set_sysclk(codec_dai, 0, FIN_PLL_RATE, SND_SOC_CLOCK_IN); if (ret < 0) return ret; /* Select I2S Bus clock to set RCLK and BCLK */ ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_RCLKSRC_0, 0, SND_SOC_CLOCK_IN); if (ret < 0) return ret; return 0; } static struct snd_soc_card snow_snd = { .name = "Snow-I2S", .dai_link = snow_dai, .num_links = ARRAY_SIZE(snow_dai), .late_probe = snow_late_probe, }; static int snow_probe(struct platform_device *pdev) { struct snd_soc_card *card = &snow_snd; struct device_node *i2s_node, *codec_node; int i, ret; i2s_node = of_parse_phandle(pdev->dev.of_node, "samsung,i2s-controller", 0); if (!i2s_node) { dev_err(&pdev->dev, "Property 'i2s-controller' missing or invalid\n"); return -EINVAL; } codec_node = of_parse_phandle(pdev->dev.of_node, "samsung,audio-codec", 0); if (!codec_node) { dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n"); return -EINVAL; } for (i = 0; i < ARRAY_SIZE(snow_dai); i++) { snow_dai[i].codec_of_node = codec_node; snow_dai[i].cpu_of_node = i2s_node; snow_dai[i].platform_of_node = i2s_node; } card->dev = &pdev->dev; /* Update card-name if provided through DT, else use default name */ snd_soc_of_parse_card_name(card, "samsung,model"); ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); return ret; } return ret; } static const struct of_device_id snow_of_match[] = { { .compatible = "google,snow-audio-max98090", }, { .compatible = "google,snow-audio-max98091", }, { .compatible = "google,snow-audio-max98095", }, {}, }; MODULE_DEVICE_TABLE(of, snow_of_match); static struct platform_driver snow_driver = { .driver = { .name = "snow-audio", .pm = &snd_soc_pm_ops, .of_match_table = snow_of_match, }, .probe = snow_probe, }; module_platform_driver(snow_driver); MODULE_DESCRIPTION("ALSA SoC Audio machine driver for Snow"); MODULE_LICENSE("GPL");
gpl-2.0
szezso/android_kernel_motorola_msm8916
drivers/staging/prima/CORE/BAP/src/bapApiLinkCntl.c
624
83946
/* * Copyright (c) 2012-2013 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * This file was originally distributed by Qualcomm Atheros, Inc. * under proprietary terms before Copyright ownership was assigned * to the Linux Foundation. */ /*=========================================================================== b a p A p i L i n k C n t l . C OVERVIEW: This software unit holds the implementation of the WLAN BAP modules Link Control functions. The functions externalized by this module are to be called ONLY by other WLAN modules (HDD) that properly register with the BAP Layer initially. DEPENDENCIES: Are listed for each API below. Copyright (c) 2008 QUALCOMM Incorporated. All Rights Reserved. Qualcomm Confidential and Proprietary ===========================================================================*/ /*=========================================================================== EDIT HISTORY FOR FILE This section contains comments describing changes made to the module. Notice that changes are listed in reverse chronological order. $Header: /home/labuser/ampBlueZ_2/CORE/BAP/src/bapApiLinkCntl.c,v 1.1 2010/10/23 23:40:28 labuser Exp labuser $$DateTime$$Author: labuser $ when who what, where, why ---------- --- -------------------------------------------------------- 2008-09-15 jez Created module ===========================================================================*/ /*---------------------------------------------------------------------------- * Include Files * -------------------------------------------------------------------------*/ //#include "wlan_qct_tl.h" #include "vos_trace.h" // Pick up the CSR callback definition #include "csrApi.h" /* BT-AMP PAL API header file */ #include "bapApi.h" #include "bapInternal.h" #include "btampFsm.h" //#define BAP_DEBUG /*---------------------------------------------------------------------------- * Preprocessor Definitions and Constants * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Type Declarations * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Global Data Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Static Variable Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Static Function Declarations and Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Externalized Function Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Function Declarations and Documentation * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- FUNCTION WLANBAP_RoamCallback() DESCRIPTION Callback for Roam (connection status) Events DEPENDENCIES NA. PARAMETERS IN pContext: is the pContext passed in with the roam request pCsrRoamInfo: is a pointer to a tCsrRoamInfo, see definition of eRoamCmdStatus and eRoamCmdResult: for detail valid members. It may be NULL roamId: is to identify the callback related roam request. 0 means unsolicited roamStatus: is a flag indicating the status of the callback roamResult: is the result RETURN VALUE The eHalStatus code associated with performing the operation eHAL_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ #if 0 eCSR_ROAM_RESULT_WDS_STARTED #define eWLAN_BAP_MAC_START_BSS_SUCCESS /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_STARTED */ eCSR_ROAM_RESULT_FAILURE eCSR_ROAM_RESULT_NOT_ASSOCIATED #define eWLAN_BAP_MAC_START_FAILS /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_FAILURE or eCSR_ROAM_RESULT_NOT_ASSOCIATED */ eCSR_ROAM_RESULT_WDS_ASSOCIATED #define eWLAN_BAP_MAC_CONNECT_COMPLETED /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_ASSOCIATED */ eCSR_ROAM_RESULT_FAILURE eCSR_ROAM_RESULT_NOT_ASSOCIATED #define eWLAN_BAP_MAC_CONNECT_FAILED /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_FAILURE or eCSR_ROAM_RESULT_NOT_ASSOCIATED */ eCSR_ROAM_RESULT_WDS_ASSOCIATION_IND #define eWLAN_BAP_MAC_CONNECT_INDICATION /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_ASSOCIATION_IND */ eCSR_ROAM_RESULT_KEY_SET #define eWLAN_BAP_MAC_KEY_SET_SUCCESS /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_KEY_SET */ eCSR_ROAM_RESULT_WDS_DISASSOC_IND #define eWLAN_BAP_MAC_INDICATES_MEDIA_DISCONNECTION /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_DISASSOC_IND */ eCSR_ROAM_RESULT_WDS_STOPPED #define eWLAN_BAP_MAC_READY_FOR_CONNECTIONS /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_STOPPED */ #endif //0 eHalStatus WLANBAP_RoamCallback ( void *pContext, tCsrRoamInfo *pCsrRoamInfo, tANI_U32 roamId, eRoamCmdStatus roamStatus, eCsrRoamResult roamResult ) { eHalStatus halStatus = eHAL_STATUS_SUCCESS; /* btampContext value */ ptBtampContext btampContext = (ptBtampContext) pContext; tWLAN_BAPEvent bapEvent; /* State machine event */ VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; v_U8_t status; /* return the BT-AMP status here */ /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, before switch on roamStatus = %d", __func__, roamStatus); switch (roamStatus) { //JEZ081110: For testing purposes, with Infra STA as BT STA, this //actually takes care of the "eCSR_ROAM_RESULT_WDS_STARTED" case, //below, better than "eCSR_ROAM_RESULT_IBSS_STARTED". //case eCSR_ROAM_ROAMING_START: case eCSR_ROAM_ASSOCIATION_START: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_STARTED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_ROAMING_START", roamResult); // This only gets called when CSR decides to roam on its own - due to lostlink. #if 0 if ((pCsrRoamInfo) && (pCsrRoamInfo->pConnectedProfile) && (pCsrRoamInfo->pConnectedProfile->pBssDesc)) { memcpy(bssid.ether_addr_octet, pCsrRoamInfo->pConnectedProfile->pBssDesc->bssId, sizeof(tSirMacAddr)); apple80211Interface->willRoam(&bssid); // Return result isn't significant VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: willRoam returns\n", __func__); } #endif //0 /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_START_BSS_SUCCESS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; case eCSR_ROAM_SET_KEY_COMPLETE: /* bapRoamCompleteCallback with eCSR_ROAM_SET_KEY_COMPLETE */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamStatus = %s (%d)", __func__, "eCSR_ROAM_SET_KEY_COMPLETE", roamStatus); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_KEY_SET_SUCCESS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; case eCSR_ROAM_DISASSOCIATED: /* bapRoamCompleteCallback with eCSR_ROAM_DISASSOCIATED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamStatus = %s (%d)", __func__, "eCSR_ROAM_DISASSOCIATED", roamStatus); case eCSR_ROAM_LOSTLINK: /* bapRoamCompleteCallback with eCSR_ROAM_LOSTLINK */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamStatus = %s (%d)", __func__, "eCSR_ROAM_LOSTLINK", roamStatus); if (roamResult != eCSR_ROAM_RESULT_NONE) { /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_READY_FOR_CONNECTIONS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); } break; default: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, unsupported CSR roamStatus = %d", __func__, roamStatus); break; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, before switch on roamResult = %d", __func__, roamResult); switch (roamResult) { //JEZ081110: Commented out for testing. Test relies upon IBSS. case eCSR_ROAM_RESULT_IBSS_STARTED: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_IBSS_STARTED", roamResult); case eCSR_ROAM_RESULT_WDS_STARTED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_STARTED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_WDS_STARTED", roamResult); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_START_BSS_SUCCESS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; //JEZ081110: Commented out for testing. Test relies upon IBSS. //JEZ081110: But I cannot rely upon IBSS for the initial testing. case eCSR_ROAM_RESULT_FAILURE: //case eCSR_ROAM_RESULT_NOT_ASSOCIATED: //case eCSR_ROAM_RESULT_IBSS_START_FAILED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_FAILURE or eCSR_ROAM_RESULT_NOT_ASSOCIATED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_FAILURE", roamResult); #ifdef FEATURE_WLAN_BTAMP_UT_RF break; #endif case eCSR_ROAM_RESULT_WDS_START_FAILED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_START_FAILED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_WDS_START_FAILED", roamResult); /* Fill in the event structure */ /* I don't think I should signal a eCSR_ROAM_RESULT_FAILURE * as a eWLAN_BAP_MAC_START_FAILS */ bapEvent.event = eWLAN_BAP_MAC_START_FAILS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; //JEZ081110: Commented out for testing. This handles both Infra STA and IBSS STA. case eCSR_ROAM_RESULT_IBSS_CONNECT: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_IBSS_CONNECT", roamResult); case eCSR_ROAM_RESULT_ASSOCIATED: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_ASSOCIATED", roamResult); case eCSR_ROAM_RESULT_WDS_ASSOCIATED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_ASSOCIATED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_WDS_ASSOCIATED", roamResult); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_CONNECT_COMPLETED; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; //JEZ081110: Commented out for testing. Test relies upon IBSS. //JEZ081110: But I cannot rely upon IBSS for the initial testing. //case eCSR_ROAM_RESULT_FAILURE: case eCSR_ROAM_RESULT_IBSS_START_FAILED: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_IBSS_START_FAILED", roamResult); case eCSR_ROAM_RESULT_NOT_ASSOCIATED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_FAILURE or eCSR_ROAM_RESULT_NOT_ASSOCIATED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_NOT_ASSOCIATED", roamResult); #ifdef FEATURE_WLAN_BTAMP_UT_RF break; #endif case eCSR_ROAM_RESULT_WDS_NOT_ASSOCIATED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_NOT_ASSOCIATED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_WDS_NOT_ASSOCIATED", roamResult); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_CONNECT_FAILED; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; //JEZ081110: I think I have to check for the bssType to //differentiate between IBSS Start and IBSS Join success. //case eCSR_ROAM_RESULT_IBSS_CONNECT: //VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_IBSS_CONNECT", roamResult); //JEZ081110: Commented out for testing. Test relies upon IBSS. // No longer commented out. case eCSR_ROAM_RESULT_WDS_ASSOCIATION_IND: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_ASSOCIATION_IND */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_WDS_ASSOCIATION_IND", roamResult); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_CONNECT_INDICATION; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); /* If BAP doesn't like the incoming association, signal SME/CSR */ if ( status != WLANBAP_STATUS_SUCCESS) halStatus = eHAL_STATUS_FAILURE; break; //JEZ081110: Not supported in SME and CSR, yet. #if 0 case eCSR_ROAM_RESULT_KEY_SET: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_KEY_SET */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_KEY_SET", roamResult); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_KEY_SET_SUCCESS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; #endif //0 case eCSR_ROAM_RESULT_DISASSOC_IND: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_DISASSOC_IND", roamResult); case eCSR_ROAM_RESULT_WDS_DISASSOCIATED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_DISASSOCIATED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_WDS_DISASSOCIATED", roamResult); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_INDICATES_MEDIA_DISCONNECTION; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_READY_FOR_CONNECTIONS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; //JEZ081110: Commented out for testing. Test relies upon IBSS. case eCSR_ROAM_RESULT_IBSS_INACTIVE: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_IBSS_INACTIVE", roamResult); case eCSR_ROAM_RESULT_WDS_STOPPED: /* bapRoamCompleteCallback with eCSR_ROAM_RESULT_WDS_STOPPED */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, CSR roamResult = %s (%d)", __func__, "eCSR_ROAM_RESULT_WDS_STOPPED", roamResult); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_READY_FOR_CONNECTIONS; bapEvent.params = pCsrRoamInfo; bapEvent.u1 = roamStatus; bapEvent.u2 = roamResult; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); break; default: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, unsupported CSR roamResult = %d", __func__, roamResult); break; } #if 0 switch (roamResult) { case eCSR_ROAM_RESULT_IBSS_CONNECT: // we have an IBSS connection... // update our state btampContext->mAssociatedStatus = WLANBAP_STATUS_SUCCESS; btampContext->mAssociated = VOS_TRUE; // update "assocBssid" with the BSSID of the IBSS if (pCsrRoamInfo) memcpy(btampContext->assocBssid, pCsrRoamInfo->peerMacOrBssidForIBSS, 6); // We must update the system role to match that of the // lower layers in case the upper layers decided to try // joining the network in infrastructure mode if the // initial join in IBSS mode fails. Andreas Wolf // (awolf@apple.com) explains the behavior as follows: // "If the client attempts to join an open network and it fails // on the first attempt, it reverts back to b-only mode. This // workaround was specifically put in place to allow the client // to associate to some third party b-only infrastructure APs. // It did not take IBSS into account, it seems that the fallback // always forces infrastructure." btampContext->systemRole = eSYSTEM_STA_IN_IBSS_ROLE; if (mLinkStatus == 0) { // enable the flow of data DBGLOG("%s: marking link as up in %s\n", __func__, "eCSR_ROAM_RESULT_IBSS_CONNECT"); mLinkStatus = 1; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkUp); outputQueue->setCapacity(TRANSMIT_QUEUE_SIZE); outputQueue->start(); // Let them know we are ready ((IO80211Interface*) mNetworkIF)->postMessage(APPLE80211_M_ASSOC_DONE); } else { DBGLOG("%s: link is already up in %s\n", __func__, "eCSR_ROAM_RESULT_IBSS_CONNECT"); } break; case eCSR_ROAM_RESULT_IBSS_INACTIVE: // we have no more IBSS peers, so disable the flow of data if (mLinkStatus != 0) { DBGLOG("%s: marking link as down in %s\n", __func__, "eCSR_ROAM_RESULT_IBSS_INACTIVE"); mLinkStatus = (tANI_U8) 0; // JEZ070627: Revisit ? ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkDown); outputQueue->stop(); outputQueue->setCapacity(0); // update our state btampContext->mAssociated = false; } else { DBGLOG("%s: link already down in %s\n", __func__, "eCSR_ROAM_RESULT_IBSS_INACTIVE"); } break; case eCSR_ROAM_RESULT_ASSOCIATED: btampContext->mAssociatedStatus = APPLE80211_STATUS_SUCCESS; btampContext->mAssociated = true; if ((pCsrRoamInfo) && (pCsrRoamInfo->pBssDesc)) { ccpCsrToAppleScanResult(mPMacObject, pCsrRoamInfo->pBssDesc, &scanResult); /* Save away the IEs used by the AP */ ccpCsrToAssocApiedata( mPMacObject, pCsrRoamInfo->pBssDesc, &(btampContext->apiedata)); if (BssidChanged((tCsrBssid*) btampContext->assocBssid, (ether_addr*) scanResult.asr_bssid)) { memcpy(btampContext->assocBssid, scanResult.asr_bssid, 6); ((IO80211Interface*) mNetworkIF)->postMessage(APPLE80211_M_BSSID_CHANGED ); } } ((IO80211Interface*) mNetworkIF)->postMessage(APPLE80211_M_ASSOC_DONE); if (mLinkStatus == 0) { mLinkStatus = (tANI_U8) 1; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkUp); DBGLOG("%s: marking link as up in %s\n", __func__, "eCSR_ROAM_RESULT_ASSOCIATED"); outputQueue->setCapacity(TRANSMIT_QUEUE_SIZE); outputQueue->start(); } else { DBGLOG("%s: link is already up in %s\n", __func__, "eCSR_ROAM_RESULT_ASSOCIATED"); } break; case eCSR_ROAM_RESULT_NOT_ASSOCIATED: btampContext->mAssociatedStatus = APPLE80211_STATUS_UNAVAILABLE; btampContext->mAssociated = false; if (mLinkStatus != 0) { DBGLOG("%s: marking link as down in %s\n", __func__, "eCSR_ROAM_RESULT_NOT_ASSOCIATED"); mLinkStatus = (tANI_U8) 0; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkDown); } else { DBGLOG("%s: link already down in %s\n", __func__, "eCSR_ROAM_RESULT_NOT_ASSOCIATED"); } break; case eCSR_ROAM_RESULT_FAILURE: btampContext->mAssociatedStatus = APPLE80211_STATUS_UNSPECIFIED_FAILURE; btampContext->mAssociated = false; if (mLinkStatus != 0) { DBGLOG("%s: marking link as down in %s\n", __func__, "eCSR_ROAM_RESULT_FAILURE"); mLinkStatus = (tANI_U8) 0; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkDown); } else { DBGLOG("%s: link already down in %s\n", __func__, "eCSR_ROAM_RESULT_FAILURE"); } break; case eCSR_ROAM_RESULT_DISASSOC_IND: { btampContext->mAssociated = false; if (mLinkStatus != 0) { DBGLOG("%s: marking link as down in %s\n", __func__, "eCSR_ROAM_RESULT_DISASSOC_IND"); mLinkStatus = (tANI_U8) 0; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkDown); } else { DBGLOG("%s: link already down in %s\n", __func__, "eCSR_ROAM_RESULT_DISASSOC_IND"); } //if (pCsrRoamInfo) // For now, leave this commented out. Until CSR changes integrated. { // Now set the reason and status codes. // Actually, the "result code" field in the tSirSmeDisassocInd should be named reasonCode and NOT statusCode. // "Reason Codes" are found in DisAssoc or DeAuth Ind. "Status Code" fields are found in Rsp Mgmt Frame. // For now, we are going to have to (painfully) map the only "result code" type information we have // available at ALL from LIM/CSR. And that is the statusCode field of type tSirResultCodes // BTW, tSirResultCodes is the COMPLETELY WRONG TYPE for this "result code" field. It SHOULD be // of type tSirMacReasonCodes. // Right now, we don't even have that. So, I have to just make up some "reason code" that I will // pretend I found in the incoming DisAssoc Indication. //btampContext->statusCode = ((tpSirSmeDisassocInd) pCallbackInfo)->statusCode; // tSirResultCodes //btampContext->reasonCode = ((tpSirSmeDisassocInd) pCallbackInfo)->statusCode; // tSirResultCodes btampContext->reasonCode = (tANI_U16) eSIR_MAC_UNSPEC_FAILURE_REASON; //tANI_U16 // tSirMacReasonCodes btampContext->deAuthReasonCode = 0; // tANI_U16 // eSIR_SME_DEAUTH_FROM_PEER // Shouldn't the next line really use a tANI_U16? //0; // tANI_U16 // eSIR_SME_DISASSOC_FROM_PEER btampContext->disassocReasonCode = btampContext->reasonCode; // tSirMacReasonCodes // Let's remember the peer who just disassoc'd us //memcpy(btampContext->peerMacAddr, pCsrRoamInfo->peerMacOrBssidForIBSS, 6); } } break; case eCSR_ROAM_RESULT_DEAUTH_IND: { btampContext->mAssociated = false; if (mLinkStatus != 0) { DBGLOG("%s: marking link as down in %s\n", __func__, "eCSR_ROAM_RESULT_DEAUTH_IND"); mLinkStatus = (tANI_U8) 0; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkDown); } else { DBGLOG("%s: link already down in %s\n", __func__, "eCSR_ROAM_RESULT_DEAUTH_IND"); } //if (pCsrRoamInfo) // For now, leave this commented out. Until CSR changes integrated. { // Now set the reason and status codes. // Actually, the "result code" field in the tSirSmeDeauthInd should be named reasonCode and NOT statusCode. // "Reason Codes" are found in DisAssoc or DeAuth Ind. "Status Code" fields are found in Rsp Mgmt Frame. // For now, we are going to have to (painfully) map the only "result code" type information we have // available at ALL from LIM/CSR. And that is the statusCode field of type tSirResultCodes // BTW, tSirResultCodes is the COMPLETELY WRONG TYPE for this "result code" field. It SHOULD be // of type tSirMacReasonCodes. // Right now, we don't even have that. So, I have to just make up some "reason code" that I will // pretend I found in the incoming DeAuth Indication. //btampContext->statusCode = ((tpSirSmeDeauthInd) pCallbackInfo)->statusCode; // tSirResultCodes //btampContext->reasonCode = ((tpSirSmeDeauthInd) pCallbackInfo)->statusCode; // tSirResultCodes btampContext->reasonCode = (tANI_U16) eSIR_MAC_UNSPEC_FAILURE_REASON; //tANI_U16 // tSirMacReasonCodes btampContext->disassocReasonCode = 0; // tANI_U16 // eSIR_SME_DISASSOC_FROM_PEER // Shouldn't the next line really use a tANI_U16? //0; // tANI_U16 // eSIR_SME_DEAUTH_FROM_PEER btampContext->deAuthReasonCode = btampContext->reasonCode; // tSirMacReasonCodes // Let's remember the peer who just de-auth'd us //memcpy(btampContext->peerMacAddr, ((tpSirSmeDeauthInd) pCallbackInfo)->peerMacAddr, 6); } } break; case eCSR_ROAM_RESULT_MIC_ERROR_UNICAST: //if (eCSR_ROAM_MIC_ERROR_IND == roamStatus) // Make sure { if (btampContext->mTKIPCounterMeasures) { ((IO80211Interface*) mNetworkIF)->postMessage(APPLE80211_M_MIC_ERROR_UCAST); DBGLOG("%s: TKIP Countermeasures in effect in %s\n", __func__, "eCSR_ROAM_RESULT_MIC_ERROR_UNICAST"); } else { DBGLOG("%s: TKIP Countermeasures disabled in %s\n", __func__, "eCSR_ROAM_RESULT_MIC_ERROR_UNICAST"); } } break; case eCSR_ROAM_RESULT_MIC_ERROR_GROUP: //if (eCSR_ROAM_MIC_ERROR_IND == roamStatus) // Make sure { if (btampContext->mTKIPCounterMeasures) { ((IO80211Interface*) mNetworkIF)->postMessage(APPLE80211_M_MIC_ERROR_MCAST); DBGLOG("%s: TKIP Countermeasures in effect in %s\n", __func__, "eCSR_ROAM_RESULT_MIC_ERROR_GROUP"); } else { DBGLOG("%s: TKIP Countermeasures disabled in %s\n", __func__, "eCSR_ROAM_RESULT_MIC_ERROR_GROUP"); } } break; default: break; } switch (roamStatus) { case eCSR_ROAM_ROAMING_START: DBGLOG("%s: In %s\n", __func__, "eCSR_ROAM_ROAMING_START"); // This only gets called when CSR decides to roam on its own - due to lostlink. // Apple still needs to be told. if ((pCsrRoamInfo) && (pCsrRoamInfo->pConnectedProfile) && (pCsrRoamInfo->pConnectedProfile->pBssDesc)) { memcpy(bssid.ether_addr_octet, pCsrRoamInfo->pConnectedProfile->pBssDesc->bssId, sizeof(tSirMacAddr)); apple80211Interface->willRoam(&bssid); // Return result isn't significant DBGLOG("%s: willRoam returns\n", __func__); } break; case eCSR_ROAM_SHOULD_ROAM: if ((pCsrRoamInfo) && (pCsrRoamInfo->pBssDesc)) { // pCallbackInfo points to the BSS desc. Convert to Apple Scan Result. halStatus = ccpCsrToAppleScanResult( mPMacObject, pCsrRoamInfo->pBssDesc, &scanResult); if ( halStatus != 0 ) return eHAL_STATUS_FAILURE; roamAccepted = apple80211Interface->shouldRoam(&scanResult); // Return result is crucial if (roamAccepted == true) { // If the roam is acceptable, return SUCCESS DBGLOG("%s: shouldRoam returns \"acceptable\"\n", __func__); //#if 0 // Actually, before returning, immediately signal willRoam // This is a workaround for a CSR bug. Eventually, when // eCSR_ROAM_ASSOCIATION_START gets called WITH callback param p1 // pointing to a tBssDescription, this work-around can be removed. memcpy(bssid.ether_addr_octet, pCsrRoamInfo->pBssDesc->bssId, sizeof(tSirMacAddr)); apple80211Interface->willRoam(&bssid); // Return result isn't significant DBGLOG("%s: willRoam (called out of order) returns\n", __func__); DBGLOG(" with BSSID = " MAC_ADDR_STRING(bssid.ether_addr_octet)); //#endif return eHAL_STATUS_SUCCESS; } else { // If the roam is NOT acceptable, return FAILURE DBGLOG("%s: shouldRoam returns \"NOT acceptable\"\n", __func__); return eHAL_STATUS_FAILURE; } } break; case eCSR_ROAM_DISASSOCIATED: //if (eCSR_ROAM_RESULT_FORCED == roamResult || eCSR_ROAM_RESULT_MIC_ERROR == roamResult) { btampContext->mAssociated = false; if (mLinkStatus != 0) { DBGLOG("%s: marking link as down in %s\n", __func__, "eCSR_ROAM_DISASSOCIATED"); mLinkStatus = (tANI_U8) 0; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkDown); } else { DBGLOG("%s: link already down in %s\n", __func__, "eCSR_ROAM_DISASSOCIATED"); } } break; case eCSR_ROAM_LOSTLINK: btampContext->mAssociatedStatus = APPLE80211_STATUS_UNSPECIFIED_FAILURE; btampContext->mAssociated = false; if (mLinkStatus != 0) { DBGLOG("%s: marking link as down in %s\n", __func__, "eCSR_ROAM_LOSTLINK"); mLinkStatus = (tANI_U8) 0; ((IO80211Interface*) mNetworkIF)->setLinkState(kIO80211NetworkLinkDown); } else { DBGLOG("%s: link already down in %s\n", __func__, "eCSR_ROAM_LOSTLINK"); } break; case eCSR_ROAM_ASSOCIATION_START: DBGLOG("%s: In %s\n", __func__, "eCSR_ROAM_ASSOCIATION_START"); #if 0 // This is the right place to call willRoam - for an "initial" association. // But, unfortunately, when eCSR_ROAM_ASSOCIATION_START gets called, // it doesn't have a pointer to the tBssDescription in the roaming callback // routines parameter p1 (pCallbackInfo in SetWextState). So, don't use this code, yet. if ((pCsrRoamInfo) && (pCsrRoamInfo->pBssDesc) { memcpy(bssid.ether_addr_octet, pCsrRoamInfo->pBssDesc->bssId, 6); apple80211Interface->willRoam(&bssid); // Return result isn't significant DBGLOG("%s: willRoam returns\n", __func__); DBGLOG(" with BSSID = " MAC_ADDR_STRING(bssid.ether_addr_octet)); } #endif //0 break; case eCSR_ROAM_ASSOCIATION_COMPLETION: DBGLOG("%s: In %s\n", __func__, "eCSR_ROAM_ASSOCIATION_COMPLETION"); break; case eCSR_ROAM_MIC_ERROR_IND: // Handled in eCSR_ROAM_RESULT_MIC_ERROR_UNICAST and GROUP, above case eCSR_ROAM_CANCELLED: case eCSR_ROAM_ROAMING_COMPLETION: case eCSR_ROAM_SCAN_FOUND_NEW_BSS: default: break; } #endif //0 return halStatus; } /*---------------------------------------------------------------------------- Host Controller Interface Procedural API ---------------------------------------------------------------------------*/ /** BT v3.0 Link Control commands */ /*---------------------------------------------------------------------------- Each of the next eight command result in asynchronous events (e.g., HCI_PHYSICAL_LINK_COMPLETE_EVENT, HCI_LOGICAL_LINK_COMPLETE_EVENT, etc...) These are signalled thru the event callback. (I.E., (*tpWLAN_BAPEventCB).) ---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPPhysicalLinkCreate() DESCRIPTION Implements the actual HCI Create Physical Link command DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. WLANBAP_GetNewHndl has to be called before every call to WLAN_BAPPhysicalLinkCreate. Since the context is per physical link. pBapHCIPhysLinkCreate: pointer to the "HCI Create Physical Link" Structure. pHddHdl: The context passed in by the caller. (e.g., BSL specific context) IN/OUT pBapHCIEvent: Return event value for the command status event. (The caller of this routine is responsible for sending the Command Status event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIPhysLinkCreate is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPPhysicalLinkCreate ( ptBtampHandle btampHandle, tBtampTLVHCI_Create_Physical_Link_Cmd *pBapHCIPhysLinkCreate, v_PVOID_t pHddHdl, /* BSL passes in its specific context */ /* And I get phy_link_handle from the Command */ tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { tWLAN_BAPEvent bapEvent; /* State machine event */ VOS_STATUS vosStatus; /* I am using btampContext, instead of pBapPhysLinkMachine */ //tWLAN_BAPbapPhysLinkMachine *pBapPhysLinkMachine; ptBtampContext btampContext = (ptBtampContext) btampHandle; /* btampContext value */ v_U8_t status; /* return the BT-AMP status here */ BTAMPFSM_INSTANCEDATA_T *instanceVar = &(btampContext->bapPhysLinkMachine); /* Validate params */ if ((pBapHCIPhysLinkCreate == NULL) || (NULL == btampContext)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: btampHandle value: %p, pBapHCIPhysLinkCreate is %p", __func__, btampHandle, pBapHCIPhysLinkCreate); return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %p", __func__, btampHandle); if(DISCONNECTED != instanceVar->stateVar) { /* Create/Accept Phy link request in invalid state */ status = WLANBAP_ERROR_MAX_NUM_CNCTS; } else { /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_HCI_PHYSICAL_LINK_CREATE; bapEvent.params = pBapHCIPhysLinkCreate; //bapEvent.callback = pBapHCIPhysLinkCreateCB; /* Allocate a new state machine instance */ /* There will only ever be one of these (NB: Don't assume this.) */ /* So for now this returns a pointer to a static structure */ /* (With all state set to initial values) */ vosStatus = WLANBAP_CreateNewPhyLinkCtx ( btampHandle, pBapHCIPhysLinkCreate->phy_link_handle, /* I get phy_link_handle from the Command */ pHddHdl, /* BSL passes in its specific context */ &btampContext, /* Handle to return per assoc btampContext value in */ BT_INITIATOR); /* BT_INITIATOR */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampContext value: %p", __func__, btampContext); /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); } /* Format the command status event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; pBapHCIEvent->u.btampCommandStatusEvent.present = 1; pBapHCIEvent->u.btampCommandStatusEvent.status = status; pBapHCIEvent->u.btampCommandStatusEvent.num_hci_command_packets = 1; pBapHCIEvent->u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_CREATE_PHYSICAL_LINK_CMD; /* ... */ return VOS_STATUS_SUCCESS; } /* WLAN_BAPPhysicalLinkCreate */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPPhysicalLinkAccept() DESCRIPTION Implements the actual HCI Accept Physical Link command DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIPhysLinkAccept: pointer to the "HCI Accept Physical Link" Structure. pHddHdl: The context passed in by the caller. (e.g., BSL specific context) IN/OUT pBapHCIEvent: Return event value for the command status event. (The caller of this routine is responsible for sending the Command Status event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIPhysLinkAccept is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPPhysicalLinkAccept ( ptBtampHandle btampHandle, tBtampTLVHCI_Accept_Physical_Link_Cmd *pBapHCIPhysLinkAccept, v_PVOID_t pHddHdl, /* BSL passes in its specific context */ /* And I get phy_link_handle from the Command */ tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { tWLAN_BAPEvent bapEvent; /* State machine event */ VOS_STATUS vosStatus; /* I am using btampContext, instead of pBapPhysLinkMachine */ //tWLAN_BAPbapPhysLinkMachine *pBapPhysLinkMachine; ptBtampContext btampContext = (ptBtampContext) btampHandle; /* btampContext value */ v_U8_t status; /* return the BT-AMP status here */ BTAMPFSM_INSTANCEDATA_T *instanceVar; /* Validate params */ if ((pBapHCIPhysLinkAccept == NULL) || (NULL == btampContext)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: btampHandle value: %p, pBapHCIPhysLinkAccept is %p", __func__, btampHandle, pBapHCIPhysLinkAccept); return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %p", __func__, btampHandle); instanceVar = &(btampContext->bapPhysLinkMachine); if(DISCONNECTED != instanceVar->stateVar) { /* Create/Accept Phy link request in invalid state */ status = WLANBAP_ERROR_MAX_NUM_CNCTS; } else { /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_HCI_PHYSICAL_LINK_ACCEPT; bapEvent.params = pBapHCIPhysLinkAccept; //bapEvent.callback = pBapHCIPhysLinkAcceptCB; /* Allocate a new state machine instance */ /* There will only ever be one of these (NB: Don't assume this.) */ /* So for now this returns a pointer to a static structure */ /* (With all state set to initial values) */ vosStatus = WLANBAP_CreateNewPhyLinkCtx ( btampHandle, pBapHCIPhysLinkAccept->phy_link_handle, /* I get phy_link_handle from the Command */ pHddHdl, /* BSL passes in its specific context */ &btampContext, /* Handle to return per assoc btampContext value in */ BT_RESPONDER); /* BT_RESPONDER */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampContext value: %p", __func__, btampContext); /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); } /* Format the command status event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; pBapHCIEvent->u.btampCommandStatusEvent.present = 1; pBapHCIEvent->u.btampCommandStatusEvent.status = status; pBapHCIEvent->u.btampCommandStatusEvent.num_hci_command_packets = 1; pBapHCIEvent->u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_ACCEPT_PHYSICAL_LINK_CMD; /* ... */ return VOS_STATUS_SUCCESS; } /* WLAN_BAPPhysicalLinkAccept */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPPhysicalLinkDisconnect() DESCRIPTION Implements the actual HCI Disconnect Physical Link command DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIPhysLinkDisconnect: pointer to the "HCI Disconnect Physical Link" Structure. IN/OUT pBapHCIEvent: Return event value for the command status event. (The caller of this routine is responsible for sending the Command Status event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIPhysLinkDisconnect is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPPhysicalLinkDisconnect ( ptBtampHandle btampHandle, tBtampTLVHCI_Disconnect_Physical_Link_Cmd *pBapHCIPhysLinkDisconnect, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { tWLAN_BAPEvent bapEvent; /* State machine event */ VOS_STATUS vosStatus; /* I am using btampContext, instead of pBapPhysLinkMachine */ //tWLAN_BAPbapPhysLinkMachine *pBapPhysLinkMachine; ptBtampContext btampContext = (ptBtampContext) btampHandle; /* btampContext value */ v_U8_t status; /* return the BT-AMP status here */ /* Validate params */ if (pBapHCIPhysLinkDisconnect == NULL) { return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %p", __func__, btampHandle); /* Validate the Physical link handle */ if (pBapHCIPhysLinkDisconnect->phy_link_handle != btampContext->phy_link_handle) { /* Format the command status event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; pBapHCIEvent->u.btampCommandStatusEvent.present = 1; pBapHCIEvent->u.btampCommandStatusEvent.status = WLANBAP_ERROR_NO_CNCT; pBapHCIEvent->u.btampCommandStatusEvent.num_hci_command_packets = 1; pBapHCIEvent->u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_DISCONNECT_PHYSICAL_LINK_CMD; return VOS_STATUS_SUCCESS; } /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_HCI_PHYSICAL_LINK_DISCONNECT; bapEvent.params = pBapHCIPhysLinkDisconnect; VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampContext value: %p", __func__, btampContext); /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_MAC_READY_FOR_CONNECTIONS; bapEvent.params = pBapHCIPhysLinkDisconnect; /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); /* Format the command status event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; pBapHCIEvent->u.btampCommandStatusEvent.present = 1; pBapHCIEvent->u.btampCommandStatusEvent.status = status; pBapHCIEvent->u.btampCommandStatusEvent.num_hci_command_packets = 1; pBapHCIEvent->u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_DISCONNECT_PHYSICAL_LINK_CMD; /* ... */ return VOS_STATUS_SUCCESS; } /* WLAN_BAPPhysicalLinkDisconnect */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPLogicalLinkCreate() DESCRIPTION Implements the actual HCI Create Logical Link command DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCILogLinkCreate: pointer to the "HCI Create Logical Link" Structure. IN/OUT pBapHCIEvent: Return event value for the command status event. (The caller of this routine is responsible for sending the Command Status event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCILogLinkCreate is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPLogicalLinkCreate ( ptBtampHandle btampHandle, tBtampTLVHCI_Create_Logical_Link_Cmd *pBapHCILogLinkCreate, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { tBtampHCI_Event bapHCIEvent; /* This now encodes ALL event types */ VOS_STATUS vosStatus; ptBtampContext btampContext = (ptBtampContext) btampHandle; v_U16_t log_link_index = 0; BTAMPFSM_INSTANCEDATA_T *instanceVar = &(btampContext->bapPhysLinkMachine); VOS_STATUS retval; v_U16_t index_for_logLinkCtx = 0; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /* Validate params */ if (btampHandle == NULL) { return VOS_STATUS_E_FAULT; } /* Validate params */ if (pBapHCILogLinkCreate == NULL) { return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %p", __func__, btampHandle); /* Validate the BAP state to accept the logical link request Logical Link create/accept requests are allowed only in CONNECTED state */ /* Form and immediately return the command status event... */ bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; bapHCIEvent.u.btampCommandStatusEvent.present = 1; bapHCIEvent.u.btampCommandStatusEvent.num_hci_command_packets = 1; bapHCIEvent.u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_CREATE_LOGICAL_LINK_CMD; retval = VOS_STATUS_E_FAILURE; if(DISCONNECTED == instanceVar->stateVar) { /* Create Logical link request in invalid state */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_CMND_DISALLOWED; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_NO_CNCT; } else if (CONNECTED != instanceVar->stateVar) { /* Create Logical link request in invalid state */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_CMND_DISALLOWED; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_CMND_DISALLOWED; } else if (pBapHCILogLinkCreate->phy_link_handle != btampContext->phy_link_handle) { /* Invalid Physical link handle */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; } else { btampContext->btamp_logical_link_state = WLAN_BAPLogLinkInProgress; if( TRUE == btampContext->btamp_logical_link_cancel_pending ) { pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_NO_CNCT; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_NO_CNCT; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; btampContext->btamp_logical_link_cancel_pending = FALSE; } else { /* If btamp_async_logical_link_create is set, we will seralize the req on MC thread & handle it there after; If the above flag is not set respond to HCI the sync way as before */ if(FALSE == btampContext->btamp_async_logical_link_create) { /* Allocate a logical link index for these flow specs */ vosStatus = WLANBAP_CreateNewLogLinkCtx( btampContext, /* per assoc btampContext value */ pBapHCILogLinkCreate->phy_link_handle, /* I get phy_link_handle from the Command */ pBapHCILogLinkCreate->tx_flow_spec, /* I get tx_flow_spec from the Command */ pBapHCILogLinkCreate->rx_flow_spec, /* I get rx_flow_spec from the Command */ &log_link_index /* Return the logical link index here */ ); if (VOS_STATUS_SUCCESS != vosStatus) { /* Invalid flow spec format */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; } else { retval = VOS_STATUS_SUCCESS; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_STATUS_SUCCESS; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_STATUS_SUCCESS; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkOpen; } } else { btampContext->btamp_logical_link_req_info.phyLinkHandle = pBapHCILogLinkCreate->phy_link_handle; vos_mem_copy(btampContext->btamp_logical_link_req_info.txFlowSpec, pBapHCILogLinkCreate->tx_flow_spec, 18); vos_mem_copy(btampContext->btamp_logical_link_req_info.rxFlowSpec, pBapHCILogLinkCreate->rx_flow_spec, 18); btampContext->btamp_async_logical_link_create = FALSE; vosStatus = btampEstablishLogLink(btampContext); if(VOS_STATUS_SUCCESS == vosStatus) { retval = VOS_STATUS_E_BUSY;//this will make sure event complete is not sent to HCI } else { pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; } } } } vosStatus = (*btampContext->pBapHCIEventCB) ( btampContext->pHddHdl, /* this refers to the BSL per connection context */ &bapHCIEvent, /* This now encodes ALL event types */ VOS_TRUE /* Flag to indicate assoc-specific event */ ); index_for_logLinkCtx = log_link_index >> 8; /* Format the Logical Link Complete event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_LOGICAL_LINK_COMPLETE_EVENT; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.present = 1; /* Return the logical link index here */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.log_link_handle = log_link_index; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.phy_link_handle = pBapHCILogLinkCreate->phy_link_handle; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.flow_spec_id = btampContext->btampLogLinkCtx[index_for_logLinkCtx].btampFlowSpec.flow_spec_id; /* ... */ return retval; } /* WLAN_BAPLogicalLinkCreate */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPLogicalLinkAccept() DESCRIPTION Implements the actual HCI Accept Logical Link command DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCILogLinkAccept: pointer to the "HCI Accept Logical Link" Structure. IN/OUT pBapHCIEvent: Return event value for the command status event. (The caller of this routine is responsible for sending the Command Status event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCILogLinkAccept is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPLogicalLinkAccept ( ptBtampHandle btampHandle, tBtampTLVHCI_Accept_Logical_Link_Cmd *pBapHCILogLinkAccept, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { tBtampHCI_Event bapHCIEvent; /* This now encodes ALL event types */ VOS_STATUS vosStatus; ptBtampContext btampContext = (ptBtampContext) btampHandle; v_U16_t log_link_index = 0; BTAMPFSM_INSTANCEDATA_T *instanceVar = &(btampContext->bapPhysLinkMachine); VOS_STATUS retval; v_U16_t index_for_logLinkCtx; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /* Validate params */ if (btampHandle == NULL) { return VOS_STATUS_E_FAULT; } /* Validate params */ if (pBapHCILogLinkAccept == NULL) { return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %p", __func__, btampHandle); /* Validate the BAP state to accept the logical link request Logical Link create/accept requests are allowed only in CONNECTED state */ /* Form and immediately return the command status event... */ bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; bapHCIEvent.u.btampCommandStatusEvent.present = 1; bapHCIEvent.u.btampCommandStatusEvent.num_hci_command_packets = 1; bapHCIEvent.u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_ACCEPT_LOGICAL_LINK_CMD; retval = VOS_STATUS_E_FAILURE; if(DISCONNECTED == instanceVar->stateVar) { /* Create Logical link request in invalid state */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_CMND_DISALLOWED; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_NO_CNCT; } else if (CONNECTED != instanceVar->stateVar) { /* Create Logical link request in invalid state */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_CMND_DISALLOWED; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_CMND_DISALLOWED; } else if (pBapHCILogLinkAccept->phy_link_handle != btampContext->phy_link_handle) { /* Invalid Physical link handle */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; } else { btampContext->btamp_logical_link_state = WLAN_BAPLogLinkInProgress; if( TRUE == btampContext->btamp_logical_link_cancel_pending ) { pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_NO_CNCT; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_NO_CNCT; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; btampContext->btamp_logical_link_cancel_pending = FALSE; } else { /* If btamp_async_logical_link_create is set, we will seralize the req on MC thread & handle it there after; If the above flag is not set respond to HCI the sync way as before */ if(FALSE == btampContext->btamp_async_logical_link_create) { /* Allocate a logical link index for these flow specs */ vosStatus = WLANBAP_CreateNewLogLinkCtx( btampContext, /* per assoc btampContext value */ pBapHCILogLinkAccept->phy_link_handle, /* I get phy_link_handle from the Command */ pBapHCILogLinkAccept->tx_flow_spec, /* I get tx_flow_spec from the Command */ pBapHCILogLinkAccept->rx_flow_spec, /* I get rx_flow_spec from the Command */ &log_link_index /* Return the logical link index here */ ); if (VOS_STATUS_SUCCESS != vosStatus) { /* Invalid flow spec format */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; } else { retval = VOS_STATUS_SUCCESS; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_STATUS_SUCCESS; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_STATUS_SUCCESS; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkOpen; } } else { btampContext->btamp_logical_link_req_info.phyLinkHandle = pBapHCILogLinkAccept->phy_link_handle; vos_mem_copy(btampContext->btamp_logical_link_req_info.txFlowSpec, pBapHCILogLinkAccept->tx_flow_spec, 18); vos_mem_copy(btampContext->btamp_logical_link_req_info.rxFlowSpec, pBapHCILogLinkAccept->rx_flow_spec, 18); btampContext->btamp_async_logical_link_create = FALSE; vosStatus = btampEstablishLogLink(btampContext); if(VOS_STATUS_SUCCESS == vosStatus) { retval = VOS_STATUS_E_BUSY;//this will make sure event complete is not sent to HCI } else { pBapHCIEvent->u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; } } } } vosStatus = (*btampContext->pBapHCIEventCB) ( btampContext->pHddHdl, /* this refers to the BSL per connection context */ &bapHCIEvent, /* This now encodes ALL event types */ VOS_TRUE /* Flag to indicate assoc-specific event */ ); index_for_logLinkCtx = log_link_index >> 8; /* Format the Logical Link Complete event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_LOGICAL_LINK_COMPLETE_EVENT; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.present = 1; /* Return the logical link index here */ pBapHCIEvent->u.btampLogicalLinkCompleteEvent.log_link_handle = log_link_index; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.phy_link_handle = pBapHCILogLinkAccept->phy_link_handle; pBapHCIEvent->u.btampLogicalLinkCompleteEvent.flow_spec_id = btampContext->btampLogLinkCtx[index_for_logLinkCtx].btampFlowSpec.flow_spec_id; /* ... */ return retval; } /* WLAN_BAPLogicalLinkAccept */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPLogicalLinkDisconnect() DESCRIPTION Implements the actual HCI Disconnect Logical Link command DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCILogLinkDisconnect: pointer to the "HCI Disconnect Logical Link" Structure. IN/OUT pBapHCIEvent: Return event value for the command status event. (The caller of this routine is responsible for sending the Command Status event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCILogLinkDisconnect is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPLogicalLinkDisconnect ( ptBtampHandle btampHandle, tBtampTLVHCI_Disconnect_Logical_Link_Cmd *pBapHCILogLinkDisconnect, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { tBtampHCI_Event bapHCIEvent; /* This now encodes ALL event types */ ptBtampContext btampContext = (ptBtampContext) btampHandle; tpBtampLogLinkCtx pLogLinkContext; VOS_STATUS retval = VOS_STATUS_SUCCESS; v_U8_t log_link_index; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /*------------------------------------------------------------------------ Sanity check ------------------------------------------------------------------------*/ if (( NULL == pBapHCILogLinkDisconnect ) || ( NULL == btampContext)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Critical error: Invalid input parameter on %s", __func__); return VOS_STATUS_E_FAULT; } /* Derive logical link index from handle */ log_link_index = ((pBapHCILogLinkDisconnect->log_link_handle) >> 8); if( log_link_index >= WLANBAP_MAX_LOG_LINKS ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Critical error: Invalid input parameter on %s", __func__); /* Fill in the event code to propagate the event notification to BRM BRM generates the Command status Event based on this.*/ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_DISCONNECT_LOGICAL_LINK_COMPLETE_EVENT; pBapHCIEvent->u.btampDisconnectLogicalLinkCompleteEvent.present = 1; pBapHCIEvent->u.btampDisconnectLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; return VOS_STATUS_E_INVAL; } #ifdef BAP_DEBUG /* Trace the tBtampCtx being passed in. */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "WLAN BAP Context Monitor: btampContext value = %p in %s:%d", btampContext, __func__, __LINE__ ); #endif //BAP_DEBUG bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; bapHCIEvent.u.btampCommandStatusEvent.present = 1; bapHCIEvent.u.btampCommandStatusEvent.num_hci_command_packets = 1; bapHCIEvent.u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_DISCONNECT_LOGICAL_LINK_CMD; /*------------------------------------------------------------------------ FIXME: Validate the Logical Link handle, Generation and freeing... Here the Logical link is not validated and assumed that it is correct to. get the Logical link context. . ------------------------------------------------------------------------*/ pLogLinkContext = &(btampContext->btampLogLinkCtx[log_link_index]); // Validate whether the context is active. if ((VOS_FALSE == pLogLinkContext->present) || (pBapHCILogLinkDisconnect->log_link_handle != pLogLinkContext->log_link_handle)) { /* If status is failed, the platform specific layer generates the command status event with proper status */ pBapHCIEvent->u.btampDisconnectLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_NO_CNCT; retval = VOS_STATUS_E_FAILURE; #ifdef BAP_DEBUG /* Log the error. */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s:%d Invalid Logical Link handle(should be) = %d(%d)", __func__, __LINE__, pBapHCILogLinkDisconnect->log_link_handle, pLogLinkContext->log_link_handle); VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, " Logical Link index = %d", log_link_index); #endif //BAP_DEBUG } else { /* Form and return the command status event... */ bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_STATUS_SUCCESS; pBapHCIEvent->u.btampDisconnectLogicalLinkCompleteEvent.status = WLANBAP_STATUS_SUCCESS; pLogLinkContext->present = VOS_FALSE; pLogLinkContext->uTxPktCompleted = 0; pLogLinkContext->log_link_handle = 0; /* Decrement the total logical link count */ btampContext->total_log_link_index--; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; } /* Notify the Command status Event */ (*btampContext->pBapHCIEventCB) ( btampContext->pHddHdl, /* this refers to the BSL per connection context */ &bapHCIEvent, /* This now encodes ALL event types */ VOS_TRUE /* Flag to indicate assoc-specific event */ ); /* Format the Logical Link Complete event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_DISCONNECT_LOGICAL_LINK_COMPLETE_EVENT; pBapHCIEvent->u.btampDisconnectLogicalLinkCompleteEvent.present = 1; /* Return the logical link index here */ pBapHCIEvent->u.btampDisconnectLogicalLinkCompleteEvent.log_link_handle = pBapHCILogLinkDisconnect->log_link_handle; pBapHCIEvent->u.btampDisconnectLogicalLinkCompleteEvent.reason = WLANBAP_ERROR_TERM_BY_LOCAL_HOST; return retval; } /* WLAN_BAPLogicalLinkDisconnect */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPLogicalLinkCancel() DESCRIPTION Implements the actual HCI Cancel Logical Link command DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCILogLinkCancel: pointer to the "HCI Cancel Logical Link" Structure. IN/OUT pBapHCIEvent: Return event value for the command complete event. (The caller of this routine is responsible for sending the Command Complete event up the HCI interface.) (BTW, the required "HCI Logical Link Complete Event" will be generated by the BAP state machine and sent up via the (*tpWLAN_BAPEventCB).) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCILogLinkCancel is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPLogicalLinkCancel ( ptBtampHandle btampHandle, tBtampTLVHCI_Logical_Link_Cancel_Cmd *pBapHCILogLinkCancel, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { ptBtampContext btampContext; BTAMPFSM_INSTANCEDATA_T *instanceVar; /* Validate params */ if ((btampHandle == NULL) || (pBapHCILogLinkCancel == NULL) || (pBapHCIEvent == NULL)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: Null Parameters Not allowed", __func__); return VOS_STATUS_E_FAULT; } btampContext = (ptBtampContext) btampHandle; instanceVar = &(btampContext->bapPhysLinkMachine); /* Form and immediately return the command status event... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_COMPLETE_EVENT; pBapHCIEvent->u.btampCommandCompleteEvent.present = 1; pBapHCIEvent->u.btampCommandCompleteEvent.command_opcode = BTAMP_TLV_HCI_LOGICAL_LINK_CANCEL_CMD; pBapHCIEvent->u.btampCommandCompleteEvent.num_hci_command_packets = 1; if (pBapHCILogLinkCancel->phy_link_handle != btampContext->phy_link_handle) { pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Logical_Link_Cancel.status = WLANBAP_ERROR_NO_CNCT; } else { /* As the logical link create is returned immediately, the logical link is created and so cancel can not return success. And it returns WLANBAP_ERROR_NO_CNCT if not connected or WLANBAP_ERROR_MAX_NUM_ACL_CNCTS if connected */ if(WLAN_BAPLogLinkClosed == btampContext->btamp_logical_link_state ) { /* Cancel Logical link request in invalid state */ pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Logical_Link_Cancel.status = WLANBAP_ERROR_NO_CNCT; } else if(WLAN_BAPLogLinkOpen == btampContext->btamp_logical_link_state ) { /* Cancel Logical link request in conected state */ pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Logical_Link_Cancel.status = WLANBAP_ERROR_MAX_NUM_ACL_CNCTS; } else if(WLAN_BAPLogLinkInProgress == btampContext->btamp_logical_link_state ) { /* Cancel Logical link request in progress state, need to fail logical link creation as well */ btampContext->btamp_logical_link_cancel_pending = TRUE; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Logical_Link_Cancel.status = WLANBAP_STATUS_SUCCESS; } else { pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Logical_Link_Cancel.status = WLANBAP_ERROR_NO_CNCT; } } pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Logical_Link_Cancel.phy_link_handle = pBapHCILogLinkCancel->phy_link_handle; /* Since the status is not success, the Tx flow spec Id is not meaningful and filling with 0 */ pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Logical_Link_Cancel.tx_flow_spec_id = pBapHCILogLinkCancel->tx_flow_spec_id; return VOS_STATUS_SUCCESS; } /* WLAN_BAPLogicalLinkCancel */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPFlowSpecModify() DESCRIPTION Implements the actual HCI Modify Logical Link command Produces an asynchronous flow spec modify complete event. Through the event callback. DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIFlowSpecModify: pointer to the "HCI Flow Spec Modify" Structure. IN/OUT pBapHCIEvent: Return event value for the command status event. (The caller of this routine is responsible for sending the Command Status event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIFlowSpecModify is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPFlowSpecModify ( ptBtampHandle btampHandle, tBtampTLVHCI_Flow_Spec_Modify_Cmd *pBapHCIFlowSpecModify, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { v_U16_t index_for_logLinkHandle = 0; ptBtampContext btampContext; tpBtampLogLinkCtx pLogLinkContext; v_U32_t retval; VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; tBtampHCI_Event bapHCIEvent; /* This now encodes ALL event types */ /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /* Validate params */ if ((btampHandle == NULL) || (pBapHCIFlowSpecModify == NULL) || (pBapHCIEvent == NULL)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: Null Parameters Not allowed", __func__); return VOS_STATUS_E_FAULT; } btampContext = (ptBtampContext) btampHandle; index_for_logLinkHandle = pBapHCIFlowSpecModify->log_link_handle >> 8; /* Return the logical link index here */ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO, " %s:index_for_logLinkHandle=%d", __func__,index_for_logLinkHandle); bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_STATUS_EVENT; bapHCIEvent.u.btampCommandStatusEvent.present = 1; bapHCIEvent.u.btampCommandStatusEvent.num_hci_command_packets = 1; bapHCIEvent.u.btampCommandStatusEvent.command_opcode = BTAMP_TLV_HCI_FLOW_SPEC_MODIFY_CMD; /*------------------------------------------------------------------------ Evaluate the Tx and Rx Flow specification for this logical link. ------------------------------------------------------------------------*/ // Currently we only support flow specs with service types of BE (0x01) /*------------------------------------------------------------------------ Now configure the Logical Link context. ------------------------------------------------------------------------*/ pLogLinkContext = &(btampContext->btampLogLinkCtx[index_for_logLinkHandle]); /* Extract Tx flow spec into the context structure */ retval = btampUnpackTlvFlow_Spec((void *)btampContext, pBapHCIFlowSpecModify->tx_flow_spec, WLAN_BAP_PAL_FLOW_SPEC_TLV_LEN, &pLogLinkContext->btampFlowSpec); if (retval != BTAMP_PARSE_SUCCESS) { /* Flow spec parsing failed, return failure */ vosStatus = VOS_STATUS_E_FAILURE; pBapHCIEvent->u.btampFlowSpecModifyCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; } else { bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_STATUS_SUCCESS; pBapHCIEvent->u.btampFlowSpecModifyCompleteEvent.status = WLANBAP_STATUS_SUCCESS; } /* Notify the Command status Event */ vosStatus = (*btampContext->pBapHCIEventCB) ( btampContext->pHddHdl, /* this refers to the BSL per connection context */ &bapHCIEvent, /* This now encodes ALL event types */ VOS_TRUE /* Flag to indicate assoc-specific event */ ); /* Form and immediately return the command status event... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_FLOW_SPEC_MODIFY_COMPLETE_EVENT; pBapHCIEvent->u.btampFlowSpecModifyCompleteEvent.present = 1; pBapHCIEvent->u.btampFlowSpecModifyCompleteEvent.log_link_handle = pBapHCIFlowSpecModify->log_link_handle; return vosStatus; } /* WLAN_BAPFlowSpecModify */ void WLAN_BAPEstablishLogicalLink(ptBtampContext btampContext) { tBtampHCI_Event bapHCIEvent; /* This now encodes ALL event types */ v_U16_t log_link_index = 0; v_U16_t index_for_logLinkCtx = 0; VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; if (btampContext == NULL) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: Null Parameters Not allowed", __func__); return; } if( TRUE == btampContext->btamp_logical_link_cancel_pending ) { bapHCIEvent.u.btampCommandStatusEvent.status = WLANBAP_ERROR_NO_CNCT; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; btampContext->btamp_logical_link_cancel_pending = FALSE; } else { /* Allocate a logical link index for these flow specs */ vosStatus = WLANBAP_CreateNewLogLinkCtx( btampContext, /* per assoc btampContext value */ btampContext->btamp_logical_link_req_info.phyLinkHandle, /* I get phy_link_handle from the Command */ btampContext->btamp_logical_link_req_info.txFlowSpec, /* I get tx_flow_spec from the Command */ btampContext->btamp_logical_link_req_info.rxFlowSpec, /* I get rx_flow_spec from the Command */ &log_link_index /* Return the logical link index here */ ); if (VOS_STATUS_SUCCESS != vosStatus) { /* Invalid flow spec format */ bapHCIEvent.u.btampLogicalLinkCompleteEvent.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkClosed; } else { bapHCIEvent.u.btampLogicalLinkCompleteEvent.status = WLANBAP_STATUS_SUCCESS; btampContext->btamp_logical_link_state = WLAN_BAPLogLinkOpen; } } index_for_logLinkCtx = log_link_index >> 8; /* Format the Logical Link Complete event to return... */ bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_LOGICAL_LINK_COMPLETE_EVENT; bapHCIEvent.u.btampLogicalLinkCompleteEvent.present = 1; /* Return the logical link index here */ bapHCIEvent.u.btampLogicalLinkCompleteEvent.log_link_handle = log_link_index; bapHCIEvent.u.btampLogicalLinkCompleteEvent.phy_link_handle = btampContext->btamp_logical_link_req_info.phyLinkHandle; bapHCIEvent.u.btampLogicalLinkCompleteEvent.flow_spec_id = btampContext->btampLogLinkCtx[index_for_logLinkCtx].btampFlowSpec.flow_spec_id; vosStatus = (*btampContext->pBapHCIEventCB) ( btampContext->pHddHdl, /* this refers to the BSL per connection context */ &bapHCIEvent, /* This now encodes ALL event types */ VOS_TRUE /* Flag to indicate assoc-specific event */ ); return; }
gpl-2.0
AospPlus/android_kernel_x86
drivers/usb/host/whci/int.c
2160
2421
/* * Wireless Host Controller (WHC) interrupt handling. * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/uwb/umc.h> #include "../../wusbcore/wusbhc.h" #include "whcd.h" static void transfer_done(struct whc *whc) { queue_work(whc->workqueue, &whc->async_work); queue_work(whc->workqueue, &whc->periodic_work); } irqreturn_t whc_int_handler(struct usb_hcd *hcd) { struct wusbhc *wusbhc = usb_hcd_to_wusbhc(hcd); struct whc *whc = wusbhc_to_whc(wusbhc); u32 sts; sts = le_readl(whc->base + WUSBSTS); if (!(sts & WUSBSTS_INT_MASK)) return IRQ_NONE; le_writel(sts & WUSBSTS_INT_MASK, whc->base + WUSBSTS); if (sts & WUSBSTS_GEN_CMD_DONE) wake_up(&whc->cmd_wq); if (sts & WUSBSTS_HOST_ERR) dev_err(&whc->umc->dev, "FIXME: host system error\n"); if (sts & WUSBSTS_ASYNC_SCHED_SYNCED) wake_up(&whc->async_list_wq); if (sts & WUSBSTS_PERIODIC_SCHED_SYNCED) wake_up(&whc->periodic_list_wq); if (sts & WUSBSTS_DNTS_INT) queue_work(whc->workqueue, &whc->dn_work); /* * A transfer completed (see [WHCI] section 4.7.1.2 for when * this occurs). */ if (sts & (WUSBSTS_INT | WUSBSTS_ERR_INT)) transfer_done(whc); return IRQ_HANDLED; } static int process_dn_buf(struct whc *whc) { struct wusbhc *wusbhc = &whc->wusbhc; struct dn_buf_entry *dn; int processed = 0; for (dn = whc->dn_buf; dn < whc->dn_buf + WHC_N_DN_ENTRIES; dn++) { if (dn->status & WHC_DN_STATUS_VALID) { wusbhc_handle_dn(wusbhc, dn->src_addr, (struct wusb_dn_hdr *)dn->dn_data, dn->msg_size); dn->status &= ~WHC_DN_STATUS_VALID; processed++; } } return processed; } void whc_dn_work(struct work_struct *work) { struct whc *whc = container_of(work, struct whc, dn_work); int processed; do { processed = process_dn_buf(whc); } while (processed); }
gpl-2.0
rickyzhang82/odroid-linux
fs/udf/balloc.c
2928
22054
/* * balloc.c * * PURPOSE * Block allocation handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1999-2001 Ben Fennema * (C) 1999 Stelias Computing Inc * * HISTORY * * 02/24/99 blf Created. * */ #include "udfdecl.h" #include <linux/buffer_head.h> #include <linux/bitops.h> #include "udf_i.h" #include "udf_sb.h" #define udf_clear_bit __test_and_clear_bit_le #define udf_set_bit __test_and_set_bit_le #define udf_test_bit test_bit_le #define udf_find_next_one_bit find_next_bit_le static int read_block_bitmap(struct super_block *sb, struct udf_bitmap *bitmap, unsigned int block, unsigned long bitmap_nr) { struct buffer_head *bh = NULL; int retval = 0; struct kernel_lb_addr loc; loc.logicalBlockNum = bitmap->s_extPosition; loc.partitionReferenceNum = UDF_SB(sb)->s_partition; bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block)); if (!bh) retval = -EIO; bitmap->s_block_bitmap[bitmap_nr] = bh; return retval; } static int __load_block_bitmap(struct super_block *sb, struct udf_bitmap *bitmap, unsigned int block_group) { int retval = 0; int nr_groups = bitmap->s_nr_groups; if (block_group >= nr_groups) { udf_debug("block_group (%d) > nr_groups (%d)\n", block_group, nr_groups); } if (bitmap->s_block_bitmap[block_group]) { return block_group; } else { retval = read_block_bitmap(sb, bitmap, block_group, block_group); if (retval < 0) return retval; return block_group; } } static inline int load_block_bitmap(struct super_block *sb, struct udf_bitmap *bitmap, unsigned int block_group) { int slot; slot = __load_block_bitmap(sb, bitmap, block_group); if (slot < 0) return slot; if (!bitmap->s_block_bitmap[slot]) return -EIO; return slot; } static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt) { struct udf_sb_info *sbi = UDF_SB(sb); struct logicalVolIntegrityDesc *lvid; if (!sbi->s_lvid_bh) return; lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; le32_add_cpu(&lvid->freeSpaceTable[partition], cnt); udf_updated_lvid(sb); } static void udf_bitmap_free_blocks(struct super_block *sb, struct inode *inode, struct udf_bitmap *bitmap, struct kernel_lb_addr *bloc, uint32_t offset, uint32_t count) { struct udf_sb_info *sbi = UDF_SB(sb); struct buffer_head *bh = NULL; struct udf_part_map *partmap; unsigned long block; unsigned long block_group; unsigned long bit; unsigned long i; int bitmap_nr; unsigned long overflow; mutex_lock(&sbi->s_alloc_mutex); partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; if (bloc->logicalBlockNum + count < count || (bloc->logicalBlockNum + count) > partmap->s_partition_len) { udf_debug("%d < %d || %d + %d > %d\n", bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count, partmap->s_partition_len); goto error_return; } block = bloc->logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3); do { overflow = 0; block_group = block >> (sb->s_blocksize_bits + 3); bit = block % (sb->s_blocksize << 3); /* * Check to see if we are freeing blocks across a group boundary. */ if (bit + count > (sb->s_blocksize << 3)) { overflow = bit + count - (sb->s_blocksize << 3); count -= overflow; } bitmap_nr = load_block_bitmap(sb, bitmap, block_group); if (bitmap_nr < 0) goto error_return; bh = bitmap->s_block_bitmap[bitmap_nr]; for (i = 0; i < count; i++) { if (udf_set_bit(bit + i, bh->b_data)) { udf_debug("bit %ld already set\n", bit + i); udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]); } } udf_add_free_space(sb, sbi->s_partition, count); mark_buffer_dirty(bh); if (overflow) { block += count; count = overflow; } } while (overflow); error_return: mutex_unlock(&sbi->s_alloc_mutex); } static int udf_bitmap_prealloc_blocks(struct super_block *sb, struct inode *inode, struct udf_bitmap *bitmap, uint16_t partition, uint32_t first_block, uint32_t block_count) { struct udf_sb_info *sbi = UDF_SB(sb); int alloc_count = 0; int bit, block, block_group, group_start; int nr_groups, bitmap_nr; struct buffer_head *bh; __u32 part_len; mutex_lock(&sbi->s_alloc_mutex); part_len = sbi->s_partmaps[partition].s_partition_len; if (first_block >= part_len) goto out; if (first_block + block_count > part_len) block_count = part_len - first_block; do { nr_groups = udf_compute_nr_groups(sb, partition); block = first_block + (sizeof(struct spaceBitmapDesc) << 3); block_group = block >> (sb->s_blocksize_bits + 3); group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); bitmap_nr = load_block_bitmap(sb, bitmap, block_group); if (bitmap_nr < 0) goto out; bh = bitmap->s_block_bitmap[bitmap_nr]; bit = block % (sb->s_blocksize << 3); while (bit < (sb->s_blocksize << 3) && block_count > 0) { if (!udf_clear_bit(bit, bh->b_data)) goto out; block_count--; alloc_count++; bit++; block++; } mark_buffer_dirty(bh); } while (block_count > 0); out: udf_add_free_space(sb, partition, -alloc_count); mutex_unlock(&sbi->s_alloc_mutex); return alloc_count; } static int udf_bitmap_new_block(struct super_block *sb, struct inode *inode, struct udf_bitmap *bitmap, uint16_t partition, uint32_t goal, int *err) { struct udf_sb_info *sbi = UDF_SB(sb); int newbit, bit = 0, block, block_group, group_start; int end_goal, nr_groups, bitmap_nr, i; struct buffer_head *bh = NULL; char *ptr; int newblock = 0; *err = -ENOSPC; mutex_lock(&sbi->s_alloc_mutex); repeat: if (goal >= sbi->s_partmaps[partition].s_partition_len) goal = 0; nr_groups = bitmap->s_nr_groups; block = goal + (sizeof(struct spaceBitmapDesc) << 3); block_group = block >> (sb->s_blocksize_bits + 3); group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); bitmap_nr = load_block_bitmap(sb, bitmap, block_group); if (bitmap_nr < 0) goto error_return; bh = bitmap->s_block_bitmap[bitmap_nr]; ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start); if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { bit = block % (sb->s_blocksize << 3); if (udf_test_bit(bit, bh->b_data)) goto got_block; end_goal = (bit + 63) & ~63; bit = udf_find_next_one_bit(bh->b_data, end_goal, bit); if (bit < end_goal) goto got_block; ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3)); newbit = (ptr - ((char *)bh->b_data)) << 3; if (newbit < sb->s_blocksize << 3) { bit = newbit; goto search_back; } newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit); if (newbit < sb->s_blocksize << 3) { bit = newbit; goto got_block; } } for (i = 0; i < (nr_groups * 2); i++) { block_group++; if (block_group >= nr_groups) block_group = 0; group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); bitmap_nr = load_block_bitmap(sb, bitmap, block_group); if (bitmap_nr < 0) goto error_return; bh = bitmap->s_block_bitmap[bitmap_nr]; if (i < nr_groups) { ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start); if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { bit = (ptr - ((char *)bh->b_data)) << 3; break; } } else { bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); if (bit < sb->s_blocksize << 3) break; } } if (i >= (nr_groups * 2)) { mutex_unlock(&sbi->s_alloc_mutex); return newblock; } if (bit < sb->s_blocksize << 3) goto search_back; else bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); if (bit >= sb->s_blocksize << 3) { mutex_unlock(&sbi->s_alloc_mutex); return 0; } search_back: i = 0; while (i < 7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data)) { ++i; --bit; } got_block: newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - (sizeof(struct spaceBitmapDesc) << 3); if (!udf_clear_bit(bit, bh->b_data)) { udf_debug("bit already cleared for block %d\n", bit); goto repeat; } mark_buffer_dirty(bh); udf_add_free_space(sb, partition, -1); mutex_unlock(&sbi->s_alloc_mutex); *err = 0; return newblock; error_return: *err = -EIO; mutex_unlock(&sbi->s_alloc_mutex); return 0; } static void udf_table_free_blocks(struct super_block *sb, struct inode *inode, struct inode *table, struct kernel_lb_addr *bloc, uint32_t offset, uint32_t count) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *partmap; uint32_t start, end; uint32_t elen; struct kernel_lb_addr eloc; struct extent_position oepos, epos; int8_t etype; int i; struct udf_inode_info *iinfo; mutex_lock(&sbi->s_alloc_mutex); partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; if (bloc->logicalBlockNum + count < count || (bloc->logicalBlockNum + count) > partmap->s_partition_len) { udf_debug("%d < %d || %d + %d > %d\n", bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count, partmap->s_partition_len); goto error_return; } iinfo = UDF_I(table); udf_add_free_space(sb, sbi->s_partition, count); start = bloc->logicalBlockNum + offset; end = bloc->logicalBlockNum + offset + count - 1; epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry); elen = 0; epos.block = oepos.block = iinfo->i_location; epos.bh = oepos.bh = NULL; while (count && (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == start)) { if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) { uint32_t tmp = ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); count -= tmp; start += tmp; elen = (etype << 30) | (0x40000000 - sb->s_blocksize); } else { elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits)); start += count; count = 0; } udf_write_aext(table, &oepos, &eloc, elen, 1); } else if (eloc.logicalBlockNum == (end + 1)) { if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) { uint32_t tmp = ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); count -= tmp; end -= tmp; eloc.logicalBlockNum -= tmp; elen = (etype << 30) | (0x40000000 - sb->s_blocksize); } else { eloc.logicalBlockNum = start; elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits)); end -= count; count = 0; } udf_write_aext(table, &oepos, &eloc, elen, 1); } if (epos.bh != oepos.bh) { i = -1; oepos.block = epos.block; brelse(oepos.bh); get_bh(epos.bh); oepos.bh = epos.bh; oepos.offset = 0; } else { oepos.offset = epos.offset; } } if (count) { /* * NOTE: we CANNOT use udf_add_aext here, as it can try to * allocate a new block, and since we hold the super block * lock already very bad things would happen :) * * We copy the behavior of udf_add_aext, but instead of * trying to allocate a new block close to the existing one, * we just steal a block from the extent we are trying to add. * * It would be nice if the blocks were close together, but it * isn't required. */ int adsize; struct short_ad *sad = NULL; struct long_ad *lad = NULL; struct allocExtDesc *aed; eloc.logicalBlockNum = start; elen = EXT_RECORDED_ALLOCATED | (count << sb->s_blocksize_bits); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else { brelse(oepos.bh); brelse(epos.bh); goto error_return; } if (epos.offset + (2 * adsize) > sb->s_blocksize) { unsigned char *sptr, *dptr; int loffset; brelse(oepos.bh); oepos = epos; /* Steal a block from the extent being free'd */ epos.block.logicalBlockNum = eloc.logicalBlockNum; eloc.logicalBlockNum++; elen -= sb->s_blocksize; epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, &epos.block, 0)); if (!epos.bh) { brelse(oepos.bh); goto error_return; } aed = (struct allocExtDesc *)(epos.bh->b_data); aed->previousAllocExtLocation = cpu_to_le32(oepos.block.logicalBlockNum); if (epos.offset + adsize > sb->s_blocksize) { loffset = epos.offset; aed->lengthAllocDescs = cpu_to_le32(adsize); sptr = iinfo->i_ext.i_data + epos.offset - adsize; dptr = epos.bh->b_data + sizeof(struct allocExtDesc); memcpy(dptr, sptr, adsize); epos.offset = sizeof(struct allocExtDesc) + adsize; } else { loffset = epos.offset + adsize; aed->lengthAllocDescs = cpu_to_le32(0); if (oepos.bh) { sptr = oepos.bh->b_data + epos.offset; aed = (struct allocExtDesc *) oepos.bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, adsize); } else { sptr = iinfo->i_ext.i_data + epos.offset; iinfo->i_lenAlloc += adsize; mark_inode_dirty(table); } epos.offset = sizeof(struct allocExtDesc); } if (sbi->s_udfrev >= 0x0200) udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1, epos.block.logicalBlockNum, sizeof(struct tag)); else udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 1, epos.block.logicalBlockNum, sizeof(struct tag)); switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = (struct short_ad *)sptr; sad->extLength = cpu_to_le32( EXT_NEXT_EXTENT_ALLOCDECS | sb->s_blocksize); sad->extPosition = cpu_to_le32(epos.block.logicalBlockNum); break; case ICBTAG_FLAG_AD_LONG: lad = (struct long_ad *)sptr; lad->extLength = cpu_to_le32( EXT_NEXT_EXTENT_ALLOCDECS | sb->s_blocksize); lad->extLocation = cpu_to_lelb(epos.block); break; } if (oepos.bh) { udf_update_tag(oepos.bh->b_data, loffset); mark_buffer_dirty(oepos.bh); } else { mark_inode_dirty(table); } } /* It's possible that stealing the block emptied the extent */ if (elen) { udf_write_aext(table, &epos, &eloc, elen, 1); if (!epos.bh) { iinfo->i_lenAlloc += adsize; mark_inode_dirty(table); } else { aed = (struct allocExtDesc *)epos.bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, adsize); udf_update_tag(epos.bh->b_data, epos.offset); mark_buffer_dirty(epos.bh); } } } brelse(epos.bh); brelse(oepos.bh); error_return: mutex_unlock(&sbi->s_alloc_mutex); return; } static int udf_table_prealloc_blocks(struct super_block *sb, struct inode *inode, struct inode *table, uint16_t partition, uint32_t first_block, uint32_t block_count) { struct udf_sb_info *sbi = UDF_SB(sb); int alloc_count = 0; uint32_t elen, adsize; struct kernel_lb_addr eloc; struct extent_position epos; int8_t etype = -1; struct udf_inode_info *iinfo; if (first_block >= sbi->s_partmaps[partition].s_partition_len) return 0; iinfo = UDF_I(table); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else return 0; mutex_lock(&sbi->s_alloc_mutex); epos.offset = sizeof(struct unallocSpaceEntry); epos.block = iinfo->i_location; epos.bh = NULL; eloc.logicalBlockNum = 0xFFFFFFFF; while (first_block != eloc.logicalBlockNum && (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { udf_debug("eloc=%d, elen=%d, first_block=%d\n", eloc.logicalBlockNum, elen, first_block); ; /* empty loop body */ } if (first_block == eloc.logicalBlockNum) { epos.offset -= adsize; alloc_count = (elen >> sb->s_blocksize_bits); if (alloc_count > block_count) { alloc_count = block_count; eloc.logicalBlockNum += alloc_count; elen -= (alloc_count << sb->s_blocksize_bits); udf_write_aext(table, &epos, &eloc, (etype << 30) | elen, 1); } else udf_delete_aext(table, epos, eloc, (etype << 30) | elen); } else { alloc_count = 0; } brelse(epos.bh); if (alloc_count) udf_add_free_space(sb, partition, -alloc_count); mutex_unlock(&sbi->s_alloc_mutex); return alloc_count; } static int udf_table_new_block(struct super_block *sb, struct inode *inode, struct inode *table, uint16_t partition, uint32_t goal, int *err) { struct udf_sb_info *sbi = UDF_SB(sb); uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF; uint32_t newblock = 0, adsize; uint32_t elen, goal_elen = 0; struct kernel_lb_addr eloc, uninitialized_var(goal_eloc); struct extent_position epos, goal_epos; int8_t etype; struct udf_inode_info *iinfo = UDF_I(table); *err = -ENOSPC; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else return newblock; mutex_lock(&sbi->s_alloc_mutex); if (goal >= sbi->s_partmaps[partition].s_partition_len) goal = 0; /* We search for the closest matching block to goal. If we find a exact hit, we stop. Otherwise we keep going till we run out of extents. We store the buffer_head, bloc, and extoffset of the current closest match and use that when we are done. */ epos.offset = sizeof(struct unallocSpaceEntry); epos.block = iinfo->i_location; epos.bh = goal_epos.bh = NULL; while (spread && (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { if (goal >= eloc.logicalBlockNum) { if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) nspread = 0; else nspread = goal - eloc.logicalBlockNum - (elen >> sb->s_blocksize_bits); } else { nspread = eloc.logicalBlockNum - goal; } if (nspread < spread) { spread = nspread; if (goal_epos.bh != epos.bh) { brelse(goal_epos.bh); goal_epos.bh = epos.bh; get_bh(goal_epos.bh); } goal_epos.block = epos.block; goal_epos.offset = epos.offset - adsize; goal_eloc = eloc; goal_elen = (etype << 30) | elen; } } brelse(epos.bh); if (spread == 0xFFFFFFFF) { brelse(goal_epos.bh); mutex_unlock(&sbi->s_alloc_mutex); return 0; } /* Only allocate blocks from the beginning of the extent. That way, we only delete (empty) extents, never have to insert an extent because of splitting */ /* This works, but very poorly.... */ newblock = goal_eloc.logicalBlockNum; goal_eloc.logicalBlockNum++; goal_elen -= sb->s_blocksize; if (goal_elen) udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1); else udf_delete_aext(table, goal_epos, goal_eloc, goal_elen); brelse(goal_epos.bh); udf_add_free_space(sb, partition, -1); mutex_unlock(&sbi->s_alloc_mutex); *err = 0; return newblock; } void udf_free_blocks(struct super_block *sb, struct inode *inode, struct kernel_lb_addr *bloc, uint32_t offset, uint32_t count) { uint16_t partition = bloc->partitionReferenceNum; struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { udf_bitmap_free_blocks(sb, inode, map->s_uspace.s_bitmap, bloc, offset, count); } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { udf_table_free_blocks(sb, inode, map->s_uspace.s_table, bloc, offset, count); } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) { udf_bitmap_free_blocks(sb, inode, map->s_fspace.s_bitmap, bloc, offset, count); } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) { udf_table_free_blocks(sb, inode, map->s_fspace.s_table, bloc, offset, count); } } inline int udf_prealloc_blocks(struct super_block *sb, struct inode *inode, uint16_t partition, uint32_t first_block, uint32_t block_count) { struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) return udf_bitmap_prealloc_blocks(sb, inode, map->s_uspace.s_bitmap, partition, first_block, block_count); else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) return udf_table_prealloc_blocks(sb, inode, map->s_uspace.s_table, partition, first_block, block_count); else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) return udf_bitmap_prealloc_blocks(sb, inode, map->s_fspace.s_bitmap, partition, first_block, block_count); else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) return udf_table_prealloc_blocks(sb, inode, map->s_fspace.s_table, partition, first_block, block_count); else return 0; } inline int udf_new_block(struct super_block *sb, struct inode *inode, uint16_t partition, uint32_t goal, int *err) { struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) return udf_bitmap_new_block(sb, inode, map->s_uspace.s_bitmap, partition, goal, err); else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) return udf_table_new_block(sb, inode, map->s_uspace.s_table, partition, goal, err); else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) return udf_bitmap_new_block(sb, inode, map->s_fspace.s_bitmap, partition, goal, err); else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) return udf_table_new_block(sb, inode, map->s_fspace.s_table, partition, goal, err); else { *err = -EIO; return 0; } }
gpl-2.0
nikhil16242/Prometheus_kernel_golf
drivers/net/fs_enet/mac-fec.c
3184
11953
/* * Freescale Ethernet controllers * * Copyright (c) 2005 Intracom S.A. * by Pantelis Antoniou <panto@intracom.gr> * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/fs.h> #include <linux/platform_device.h> #include <linux/of_device.h> #include <linux/gfp.h> #include <asm/irq.h> #include <asm/uaccess.h> #ifdef CONFIG_8xx #include <asm/8xx_immap.h> #include <asm/pgtable.h> #include <asm/mpc8xx.h> #include <asm/cpm1.h> #endif #include "fs_enet.h" #include "fec.h" /*************************************************/ #if defined(CONFIG_CPM1) /* for a CPM1 __raw_xxx's are sufficient */ #define __fs_out32(addr, x) __raw_writel(x, addr) #define __fs_out16(addr, x) __raw_writew(x, addr) #define __fs_in32(addr) __raw_readl(addr) #define __fs_in16(addr) __raw_readw(addr) #else /* for others play it safe */ #define __fs_out32(addr, x) out_be32(addr, x) #define __fs_out16(addr, x) out_be16(addr, x) #define __fs_in32(addr) in_be32(addr) #define __fs_in16(addr) in_be16(addr) #endif /* write */ #define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v)) /* read */ #define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg) /* set bits */ #define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v)) /* clear bits */ #define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) /* * Delay to wait for FEC reset command to complete (in us) */ #define FEC_RESET_DELAY 50 static int whack_reset(struct fec __iomem *fecp) { int i; FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET); for (i = 0; i < FEC_RESET_DELAY; i++) { if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0) return 0; /* OK */ udelay(1); } return -1; } static int do_pd_setup(struct fs_enet_private *fep) { struct platform_device *ofdev = to_platform_device(fep->dev); fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); if (fep->interrupt == NO_IRQ) return -EINVAL; fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0); if (!fep->fcc.fccp) return -EINVAL; return 0; } #define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) #define FEC_RX_EVENT (FEC_ENET_RXF) #define FEC_TX_EVENT (FEC_ENET_TXF) #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ FEC_ENET_BABT | FEC_ENET_EBERR) static int setup_data(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); if (do_pd_setup(fep) != 0) return -EINVAL; fep->fec.hthi = 0; fep->fec.htlo = 0; fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK; fep->ev_rx = FEC_RX_EVENT; fep->ev_tx = FEC_TX_EVENT; fep->ev_err = FEC_ERR_EVENT_MSK; return 0; } static int allocate_bd(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), &fep->ring_mem_addr, GFP_KERNEL); if (fep->ring_base == NULL) return -ENOMEM; return 0; } static void free_bd(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; if(fep->ring_base) dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), (void __force *)fep->ring_base, fep->ring_mem_addr); } static void cleanup_data(struct net_device *dev) { /* nothing */ } static void set_promiscuous_mode(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); struct fec __iomem *fecp = fep->fec.fecp; FS(fecp, r_cntrl, FEC_RCNTRL_PROM); } static void set_multicast_start(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); fep->fec.hthi = 0; fep->fec.htlo = 0; } static void set_multicast_one(struct net_device *dev, const u8 *mac) { struct fs_enet_private *fep = netdev_priv(dev); int temp, hash_index, i, j; u32 crc, csrVal; u8 byte, msb; crc = 0xffffffff; for (i = 0; i < 6; i++) { byte = mac[i]; for (j = 0; j < 8; j++) { msb = crc >> 31; crc <<= 1; if (msb ^ (byte & 0x1)) crc ^= FEC_CRC_POLY; byte >>= 1; } } temp = (crc & 0x3f) >> 1; hash_index = ((temp & 0x01) << 4) | ((temp & 0x02) << 2) | ((temp & 0x04)) | ((temp & 0x08) >> 2) | ((temp & 0x10) >> 4); csrVal = 1 << hash_index; if (crc & 1) fep->fec.hthi |= csrVal; else fep->fec.htlo |= csrVal; } static void set_multicast_finish(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); struct fec __iomem *fecp = fep->fec.fecp; /* if all multi or too many multicasts; just enable all */ if ((dev->flags & IFF_ALLMULTI) != 0 || netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) { fep->fec.hthi = 0xffffffffU; fep->fec.htlo = 0xffffffffU; } FC(fecp, r_cntrl, FEC_RCNTRL_PROM); FW(fecp, grp_hash_table_high, fep->fec.hthi); FW(fecp, grp_hash_table_low, fep->fec.htlo); } static void set_multicast_list(struct net_device *dev) { struct netdev_hw_addr *ha; if ((dev->flags & IFF_PROMISC) == 0) { set_multicast_start(dev); netdev_for_each_mc_addr(ha, dev) set_multicast_one(dev, ha->addr); set_multicast_finish(dev); } else set_promiscuous_mode(dev); } static void restart(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); struct fec __iomem *fecp = fep->fec.fecp; const struct fs_platform_info *fpi = fep->fpi; dma_addr_t rx_bd_base_phys, tx_bd_base_phys; int r; u32 addrhi, addrlo; struct mii_bus* mii = fep->phydev->bus; struct fec_info* fec_inf = mii->priv; r = whack_reset(fep->fec.fecp); if (r != 0) dev_err(fep->dev, "FEC Reset FAILED!\n"); /* * Set station address. */ addrhi = ((u32) dev->dev_addr[0] << 24) | ((u32) dev->dev_addr[1] << 16) | ((u32) dev->dev_addr[2] << 8) | (u32) dev->dev_addr[3]; addrlo = ((u32) dev->dev_addr[4] << 24) | ((u32) dev->dev_addr[5] << 16); FW(fecp, addr_low, addrhi); FW(fecp, addr_high, addrlo); /* * Reset all multicast. */ FW(fecp, grp_hash_table_high, fep->fec.hthi); FW(fecp, grp_hash_table_low, fep->fec.htlo); /* * Set maximum receive buffer size. */ FW(fecp, r_buff_size, PKT_MAXBLR_SIZE); #ifdef CONFIG_FS_ENET_MPC5121_FEC FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16); #else FW(fecp, r_hash, PKT_MAXBUF_SIZE); #endif /* get physical address */ rx_bd_base_phys = fep->ring_mem_addr; tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring; /* * Set receive and transmit descriptor base. */ FW(fecp, r_des_start, rx_bd_base_phys); FW(fecp, x_des_start, tx_bd_base_phys); fs_init_bds(dev); /* * Enable big endian and don't care about SDMA FC. */ #ifdef CONFIG_FS_ENET_MPC5121_FEC FS(fecp, dma_control, 0xC0000000); #else FW(fecp, fun_code, 0x78000000); #endif /* * Set MII speed. */ FW(fecp, mii_speed, fec_inf->mii_speed); /* * Clear any outstanding interrupt. */ FW(fecp, ievent, 0xffc0); #ifndef CONFIG_FS_ENET_MPC5121_FEC FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29); FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ #else /* * Only set MII mode - do not touch maximum frame length * configured before. */ FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); #endif /* * adjust to duplex mode */ if (fep->phydev->duplex) { FC(fecp, r_cntrl, FEC_RCNTRL_DRT); FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ } else { FS(fecp, r_cntrl, FEC_RCNTRL_DRT); FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ } /* * Enable interrupts we wish to service. */ FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB | FEC_ENET_RXF | FEC_ENET_RXB); /* * And last, enable the transmit and receive processing. */ FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); FW(fecp, r_des_active, 0x01000000); } static void stop(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; struct fec __iomem *fecp = fep->fec.fecp; struct fec_info* feci= fep->phydev->bus->priv; int i; if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) return; /* already down */ FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */ for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) && i < FEC_RESET_DELAY; i++) udelay(1); if (i == FEC_RESET_DELAY) dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n"); /* * Disable FEC. Let only MII interrupts. */ FW(fecp, imask, 0); FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN); fs_cleanup_bds(dev); /* shut down FEC1? that's where the mii bus is */ if (fpi->has_phy) { FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); FW(fecp, ievent, FEC_ENET_MII); FW(fecp, mii_speed, feci->mii_speed); } } static void napi_clear_rx_event(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); struct fec __iomem *fecp = fep->fec.fecp; FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK); } static void napi_enable_rx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); struct fec __iomem *fecp = fep->fec.fecp; FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK); } static void napi_disable_rx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); struct fec __iomem *fecp = fep->fec.fecp; FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK); } static void rx_bd_done(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); struct fec __iomem *fecp = fep->fec.fecp; FW(fecp, r_des_active, 0x01000000); } static void tx_kickstart(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); struct fec __iomem *fecp = fep->fec.fecp; FW(fecp, x_des_active, 0x01000000); } static u32 get_int_events(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); struct fec __iomem *fecp = fep->fec.fecp; return FR(fecp, ievent) & FR(fecp, imask); } static void clear_int_events(struct net_device *dev, u32 int_events) { struct fs_enet_private *fep = netdev_priv(dev); struct fec __iomem *fecp = fep->fec.fecp; FW(fecp, ievent, int_events); } static void ev_error(struct net_device *dev, u32 int_events) { struct fs_enet_private *fep = netdev_priv(dev); dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events); } static int get_regs(struct net_device *dev, void *p, int *sizep) { struct fs_enet_private *fep = netdev_priv(dev); if (*sizep < sizeof(struct fec)) return -EINVAL; memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec)); return 0; } static int get_regs_len(struct net_device *dev) { return sizeof(struct fec); } static void tx_restart(struct net_device *dev) { /* nothing */ } /*************************************************************************/ const struct fs_ops fs_fec_ops = { .setup_data = setup_data, .cleanup_data = cleanup_data, .set_multicast_list = set_multicast_list, .restart = restart, .stop = stop, .napi_clear_rx_event = napi_clear_rx_event, .napi_enable_rx = napi_enable_rx, .napi_disable_rx = napi_disable_rx, .rx_bd_done = rx_bd_done, .tx_kickstart = tx_kickstart, .get_int_events = get_int_events, .clear_int_events = clear_int_events, .ev_error = ev_error, .get_regs = get_regs, .get_regs_len = get_regs_len, .tx_restart = tx_restart, .allocate_bd = allocate_bd, .free_bd = free_bd, };
gpl-2.0
MWisBest/android_kernel_amazon_bowser-common
arch/mips/pci/pci-vr41xx.c
3952
9088
/* * pci-vr41xx.c, PCI Control Unit routines for the NEC VR4100 series. * * Copyright (C) 2001-2003 MontaVista Software Inc. * Author: Yoichi Yuasa <source@mvista.com> * Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org> * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * Changes: * MontaVista Software Inc. <source@mvista.com> * - New creation, NEC VR4122 and VR4131 are supported. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/types.h> #include <asm/cpu.h> #include <asm/io.h> #include <asm/vr41xx/pci.h> #include <asm/vr41xx/vr41xx.h> #include "pci-vr41xx.h" extern struct pci_ops vr41xx_pci_ops; static void __iomem *pciu_base; #define pciu_read(offset) readl(pciu_base + (offset)) #define pciu_write(offset, value) writel((value), pciu_base + (offset)) static struct pci_master_address_conversion pci_master_memory1 = { .bus_base_address = PCI_MASTER_MEM1_BUS_BASE_ADDRESS, .address_mask = PCI_MASTER_MEM1_ADDRESS_MASK, .pci_base_address = PCI_MASTER_MEM1_PCI_BASE_ADDRESS, }; static struct pci_target_address_conversion pci_target_memory1 = { .address_mask = PCI_TARGET_MEM1_ADDRESS_MASK, .bus_base_address = PCI_TARGET_MEM1_BUS_BASE_ADDRESS, }; static struct pci_master_address_conversion pci_master_io = { .bus_base_address = PCI_MASTER_IO_BUS_BASE_ADDRESS, .address_mask = PCI_MASTER_IO_ADDRESS_MASK, .pci_base_address = PCI_MASTER_IO_PCI_BASE_ADDRESS, }; static struct pci_mailbox_address pci_mailbox = { .base_address = PCI_MAILBOX_BASE_ADDRESS, }; static struct pci_target_address_window pci_target_window1 = { .base_address = PCI_TARGET_WINDOW1_BASE_ADDRESS, }; static struct resource pci_mem_resource = { .name = "PCI Memory resources", .start = PCI_MEM_RESOURCE_START, .end = PCI_MEM_RESOURCE_END, .flags = IORESOURCE_MEM, }; static struct resource pci_io_resource = { .name = "PCI I/O resources", .start = PCI_IO_RESOURCE_START, .end = PCI_IO_RESOURCE_END, .flags = IORESOURCE_IO, }; static struct pci_controller_unit_setup vr41xx_pci_controller_unit_setup = { .master_memory1 = &pci_master_memory1, .target_memory1 = &pci_target_memory1, .master_io = &pci_master_io, .exclusive_access = CANNOT_LOCK_FROM_DEVICE, .wait_time_limit_from_irdy_to_trdy = 0, .mailbox = &pci_mailbox, .target_window1 = &pci_target_window1, .master_latency_timer = 0x80, .retry_limit = 0, .arbiter_priority_control = PCI_ARBITRATION_MODE_FAIR, .take_away_gnt_mode = PCI_TAKE_AWAY_GNT_DISABLE, }; static struct pci_controller vr41xx_pci_controller = { .pci_ops = &vr41xx_pci_ops, .mem_resource = &pci_mem_resource, .io_resource = &pci_io_resource, }; void __init vr41xx_pciu_setup(struct pci_controller_unit_setup *setup) { vr41xx_pci_controller_unit_setup = *setup; } static int __init vr41xx_pciu_init(void) { struct pci_controller_unit_setup *setup; struct pci_master_address_conversion *master; struct pci_target_address_conversion *target; struct pci_mailbox_address *mailbox; struct pci_target_address_window *window; unsigned long vtclock, pci_clock_max; uint32_t val; setup = &vr41xx_pci_controller_unit_setup; if (request_mem_region(PCIU_BASE, PCIU_SIZE, "PCIU") == NULL) return -EBUSY; pciu_base = ioremap(PCIU_BASE, PCIU_SIZE); if (pciu_base == NULL) { release_mem_region(PCIU_BASE, PCIU_SIZE); return -EBUSY; } /* Disable PCI interrupt */ vr41xx_disable_pciint(); /* Supply VTClock to PCIU */ vr41xx_supply_clock(PCIU_CLOCK); /* Dummy write, waiting for supply of VTClock. */ vr41xx_disable_pciint(); /* Select PCI clock */ if (setup->pci_clock_max != 0) pci_clock_max = setup->pci_clock_max; else pci_clock_max = PCI_CLOCK_MAX; vtclock = vr41xx_get_vtclock_frequency(); if (vtclock < pci_clock_max) pciu_write(PCICLKSELREG, EQUAL_VTCLOCK); else if ((vtclock / 2) < pci_clock_max) pciu_write(PCICLKSELREG, HALF_VTCLOCK); else if (current_cpu_data.processor_id >= PRID_VR4131_REV2_1 && (vtclock / 3) < pci_clock_max) pciu_write(PCICLKSELREG, ONE_THIRD_VTCLOCK); else if ((vtclock / 4) < pci_clock_max) pciu_write(PCICLKSELREG, QUARTER_VTCLOCK); else { printk(KERN_ERR "PCI Clock is over 33MHz.\n"); iounmap(pciu_base); return -EINVAL; } /* Supply PCI clock by PCI bus */ vr41xx_supply_clock(PCI_CLOCK); if (setup->master_memory1 != NULL) { master = setup->master_memory1; val = IBA(master->bus_base_address) | MASTER_MSK(master->address_mask) | WINEN | PCIA(master->pci_base_address); pciu_write(PCIMMAW1REG, val); } else { val = pciu_read(PCIMMAW1REG); val &= ~WINEN; pciu_write(PCIMMAW1REG, val); } if (setup->master_memory2 != NULL) { master = setup->master_memory2; val = IBA(master->bus_base_address) | MASTER_MSK(master->address_mask) | WINEN | PCIA(master->pci_base_address); pciu_write(PCIMMAW2REG, val); } else { val = pciu_read(PCIMMAW2REG); val &= ~WINEN; pciu_write(PCIMMAW2REG, val); } if (setup->target_memory1 != NULL) { target = setup->target_memory1; val = TARGET_MSK(target->address_mask) | WINEN | ITA(target->bus_base_address); pciu_write(PCITAW1REG, val); } else { val = pciu_read(PCITAW1REG); val &= ~WINEN; pciu_write(PCITAW1REG, val); } if (setup->target_memory2 != NULL) { target = setup->target_memory2; val = TARGET_MSK(target->address_mask) | WINEN | ITA(target->bus_base_address); pciu_write(PCITAW2REG, val); } else { val = pciu_read(PCITAW2REG); val &= ~WINEN; pciu_write(PCITAW2REG, val); } if (setup->master_io != NULL) { master = setup->master_io; val = IBA(master->bus_base_address) | MASTER_MSK(master->address_mask) | WINEN | PCIIA(master->pci_base_address); pciu_write(PCIMIOAWREG, val); } else { val = pciu_read(PCIMIOAWREG); val &= ~WINEN; pciu_write(PCIMIOAWREG, val); } if (setup->exclusive_access == CANNOT_LOCK_FROM_DEVICE) pciu_write(PCIEXACCREG, UNLOCK); else pciu_write(PCIEXACCREG, 0); if (current_cpu_type() == CPU_VR4122) pciu_write(PCITRDYVREG, TRDYV(setup->wait_time_limit_from_irdy_to_trdy)); pciu_write(LATTIMEREG, MLTIM(setup->master_latency_timer)); if (setup->mailbox != NULL) { mailbox = setup->mailbox; val = MBADD(mailbox->base_address) | TYPE_32BITSPACE | MSI_MEMORY | PREF_APPROVAL; pciu_write(MAILBAREG, val); } if (setup->target_window1) { window = setup->target_window1; val = PMBA(window->base_address) | TYPE_32BITSPACE | MSI_MEMORY | PREF_APPROVAL; pciu_write(PCIMBA1REG, val); } if (setup->target_window2) { window = setup->target_window2; val = PMBA(window->base_address) | TYPE_32BITSPACE | MSI_MEMORY | PREF_APPROVAL; pciu_write(PCIMBA2REG, val); } val = pciu_read(RETVALREG); val &= ~RTYVAL_MASK; val |= RTYVAL(setup->retry_limit); pciu_write(RETVALREG, val); val = pciu_read(PCIAPCNTREG); val &= ~(TKYGNT | PAPC); switch (setup->arbiter_priority_control) { case PCI_ARBITRATION_MODE_ALTERNATE_0: val |= PAPC_ALTERNATE_0; break; case PCI_ARBITRATION_MODE_ALTERNATE_B: val |= PAPC_ALTERNATE_B; break; default: val |= PAPC_FAIR; break; } if (setup->take_away_gnt_mode == PCI_TAKE_AWAY_GNT_ENABLE) val |= TKYGNT_ENABLE; pciu_write(PCIAPCNTREG, val); pciu_write(COMMANDREG, PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_PARITY | PCI_COMMAND_SERR); /* Clear bus error */ pciu_read(BUSERRADREG); pciu_write(PCIENREG, PCIU_CONFIG_DONE); if (setup->mem_resource != NULL) vr41xx_pci_controller.mem_resource = setup->mem_resource; if (setup->io_resource != NULL) { vr41xx_pci_controller.io_resource = setup->io_resource; } else { set_io_port_base(IO_PORT_BASE); ioport_resource.start = IO_PORT_RESOURCE_START; ioport_resource.end = IO_PORT_RESOURCE_END; } if (setup->master_io) { void __iomem *io_map_base; struct resource *res = vr41xx_pci_controller.io_resource; master = setup->master_io; io_map_base = ioremap(master->bus_base_address, res->end - res->start + 1); if (!io_map_base) return -EBUSY; vr41xx_pci_controller.io_map_base = (unsigned long)io_map_base; } register_pci_controller(&vr41xx_pci_controller); return 0; } arch_initcall(vr41xx_pciu_init);
gpl-2.0
osmc/vero-linux
arch/powerpc/platforms/52xx/mpc5200_simple.c
4208
2382
/* * Support for 'mpc5200-simple-platform' compatible boards. * * Written by Marian Balakowicz <m8@semihalf.com> * Copyright (C) 2007 Semihalf * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Description: * This code implements support for a simple MPC52xx based boards which * do not need a custom platform specific setup. Such boards are * supported assuming the following: * * - GPIO pins are configured by the firmware, * - CDM configuration (clocking) is setup correctly by firmware, * - if the 'fsl,has-wdt' property is present in one of the * gpt nodes, then it is safe to use such gpt to reset the board, * - PCI is supported if enabled in the kernel configuration * and if there is a PCI bus node defined in the device tree. * * Boards that are compatible with this generic platform support * are listed in a 'board' table. */ #undef DEBUG #include <asm/time.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/mpc52xx.h> /* * Setup the architecture */ static void __init mpc5200_simple_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("mpc5200_simple_setup_arch()", 0); /* Map important registers from the internal memory map */ mpc52xx_map_common_devices(); /* Some mpc5200 & mpc5200b related configuration */ mpc5200_setup_xlb_arbiter(); mpc52xx_setup_pci(); } /* list of the supported boards */ static const char *board[] __initdata = { "anonymous,a3m071", "anonymous,a4m072", "anon,charon", "ifm,o2d", "intercontrol,digsy-mtc", "manroland,mucmc52", "manroland,uc101", "phytec,pcm030", "phytec,pcm032", "promess,motionpro", "schindler,cm5200", "tqc,tqm5200", NULL }; /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc5200_simple_probe(void) { return of_flat_dt_match(of_get_flat_dt_root(), board); } define_machine(mpc5200_simple_platform) { .name = "mpc5200-simple-platform", .probe = mpc5200_simple_probe, .setup_arch = mpc5200_simple_setup_arch, .init = mpc52xx_declare_of_platform_devices, .init_IRQ = mpc52xx_init_irq, .get_irq = mpc52xx_get_irq, .restart = mpc52xx_restart, .calibrate_decr = generic_calibrate_decr, };
gpl-2.0
GustavoRD78/78Kernel-ZL-101
arch/x86/kernel/cpu/intel.c
4208
14699
#include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/smp.h> #include <linux/sched.h> #include <linux/thread_info.h> #include <linux/module.h> #include <linux/uaccess.h> #include <asm/processor.h> #include <asm/pgtable.h> #include <asm/msr.h> #include <asm/bugs.h> #include <asm/cpu.h> #ifdef CONFIG_X86_64 #include <linux/topology.h> #include <asm/numa_64.h> #endif #include "cpu.h" #ifdef CONFIG_X86_LOCAL_APIC #include <asm/mpspec.h> #include <asm/apic.h> #endif static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { u64 misc_enable; /* Unmask CPUID levels if masked: */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); c->cpuid_level = cpuid_eax(0); get_cpu_cap(c); } } if ((c->x86 == 0xf && c->x86_model >= 0x03) || (c->x86 == 0x6 && c->x86_model >= 0x0e)) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) { unsigned lower_word; wrmsr(MSR_IA32_UCODE_REV, 0, 0); /* Required by the SDM */ sync_core(); rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode); } /* * Atom erratum AAE44/AAF40/AAG38/AAH41: * * A race condition between speculative fetches and invalidating * a large page. This is worked around in microcode, but we * need the microcode to have already been loaded... so if it is * not, recommend a BIOS update and disable large pages. */ if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && c->microcode < 0x20e) { printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n"); clear_cpu_cap(c, X86_FEATURE_PSE); } #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #else /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ if (c->x86 == 15 && c->x86_cache_alignment == 64) c->x86_cache_alignment = 128; #endif /* CPUID workaround for 0F33/0F34 CPU */ if (c->x86 == 0xF && c->x86_model == 0x3 && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) c->x86_phys_bits = 36; /* * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate * with P/T states and does not stop in deep C-states. * * It is also reliable across cores and sockets. (but not across * cabinets - we turn it off in that case explicitly.) */ if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); if (!check_tsc_unstable()) sched_clock_stable = 1; } /* * There is a known erratum on Pentium III and Core Solo * and Core Duo CPUs. * " Page with PAT set to WC while associated MTRR is UC * may consolidate to UC " * Because of this erratum, it is better to stick with * setting WC in MTRR rather than using PAT on these CPUs. * * Enable PAT WC only on P4, Core 2 or later CPUs. */ if (c->x86 == 6 && c->x86_model < 15) clear_cpu_cap(c, X86_FEATURE_PAT); #ifdef CONFIG_KMEMCHECK /* * P4s have a "fast strings" feature which causes single- * stepping REP instructions to only generate a #DB on * cache-line boundaries. * * Ingo Molnar reported a Pentium D (model 6) and a Xeon * (model 2) with the same problem. */ if (c->x86 == 15) { rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { printk(KERN_INFO "kmemcheck: Disabling fast string operations\n"); misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING; wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); } } #endif /* * If fast string is not enabled in IA32_MISC_ENABLE for any reason, * clear the fast string and enhanced fast string CPU capabilities. */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { printk(KERN_INFO "Disabled fast string operations\n"); setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); setup_clear_cpu_cap(X86_FEATURE_ERMS); } } } #ifdef CONFIG_X86_32 /* * Early probe support logic for ppro memory erratum #50 * * This is called before we do cpu ident work */ int __cpuinit ppro_with_ram_bug(void) { /* Uses data from early_cpu_detect now */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 1 && boot_cpu_data.x86_mask < 8) { printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); return 1; } return 0; } #ifdef CONFIG_X86_F00F_BUG static void __cpuinit trap_init_f00f_bug(void) { __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); /* * Update the IDT descriptor and reload the IDT so that * it uses the read-only mapped virtual address. */ idt_descr.address = fix_to_virt(FIX_F00F_IDT); load_idt(&idt_descr); } #endif static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) { /* calling is from identify_secondary_cpu() ? */ if (!c->cpu_index) return; /* * Mask B, Pentium, but not Pentium MMX */ if (c->x86 == 5 && c->x86_mask >= 1 && c->x86_mask <= 4 && c->x86_model <= 3) { /* * Remember we have B step Pentia with bugs */ WARN_ONCE(1, "WARNING: SMP operation may be unreliable" "with B stepping processors.\n"); } } static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) { unsigned long lo, hi; #ifdef CONFIG_X86_F00F_BUG /* * All current models of Pentium and Pentium with MMX technology CPUs * have the F0 0F bug, which lets nonprivileged users lock up the * system. * Note that the workaround only should be initialized once... */ c->f00f_bug = 0; if (!paravirt_enabled() && c->x86 == 5) { static int f00f_workaround_enabled; c->f00f_bug = 1; if (!f00f_workaround_enabled) { trap_init_f00f_bug(); printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); f00f_workaround_enabled = 1; } } #endif /* * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until * model 3 mask 3 */ if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) clear_cpu_cap(c, X86_FEATURE_SEP); /* * P4 Xeon errata 037 workaround. * Hardware prefetcher may cause stale data to be loaded into the cache. */ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); } } /* * See if we have a good local APIC by checking for buggy Pentia, * i.e. all B steppings and the C2 stepping of P54C when using their * integrated APIC (see 11AP erratum in "Pentium Processor * Specification Update"). */ if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && (c->x86_mask < 0x6 || c->x86_mask == 0xb)) set_cpu_cap(c, X86_FEATURE_11AP); #ifdef CONFIG_X86_INTEL_USERCOPY /* * Set up the preferred alignment for movsl bulk memory moves */ switch (c->x86) { case 4: /* 486: untested */ break; case 5: /* Old Pentia: untested */ break; case 6: /* PII/PIII only like movsl with 8-byte alignment */ movsl_mask.mask = 7; break; case 15: /* P4 is OK down to 8-byte alignment */ movsl_mask.mask = 7; break; } #endif #ifdef CONFIG_X86_NUMAQ numaq_tsc_disable(); #endif intel_smp_check(c); } #else static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) { } #endif static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) { #ifdef CONFIG_NUMA unsigned node; int cpu = smp_processor_id(); /* Don't do the funky fallback heuristics the AMD version employs for now. */ node = numa_cpu_node(cpu); if (node == NUMA_NO_NODE || !node_online(node)) { /* reuse the value from init_cpu_to_node() */ node = cpu_to_node(cpu); } numa_set_node(cpu, node); #endif } /* * find out the number of processor cores on the die */ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) { unsigned int eax, ebx, ecx, edx; if (c->cpuid_level < 4) return 1; /* Intel has a non-standard dependency on %ecx for this CPUID level. */ cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); if (eax & 0x1f) return (eax >> 26) + 1; else return 1; } static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) { /* Intel VMX MSR indicated features */ #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); clear_cpu_cap(c, X86_FEATURE_VNMI); clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); clear_cpu_cap(c, X86_FEATURE_EPT); clear_cpu_cap(c, X86_FEATURE_VPID); rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); msr_ctl = vmx_msr_high | vmx_msr_low; if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) set_cpu_cap(c, X86_FEATURE_VNMI); if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, vmx_msr_low, vmx_msr_high); msr_ctl2 = vmx_msr_high | vmx_msr_low; if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) set_cpu_cap(c, X86_FEATURE_EPT); if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) set_cpu_cap(c, X86_FEATURE_VPID); } } static void __cpuinit init_intel(struct cpuinfo_x86 *c) { unsigned int l2 = 0; early_init_intel(c); intel_workarounds(c); /* * Detect the extended topology information if available. This * will reinitialise the initial_apicid which will be used * in init_intel_cacheinfo() */ detect_extended_topology(c); l2 = init_intel_cacheinfo(c); if (c->cpuid_level > 9) { unsigned eax = cpuid_eax(10); /* Check for version and the number of counters */ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); } if (cpu_has_xmm2) set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); if (cpu_has_ds) { unsigned int l1; rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); if (!(l1 & (1<<11))) set_cpu_cap(c, X86_FEATURE_BTS); if (!(l1 & (1<<12))) set_cpu_cap(c, X86_FEATURE_PEBS); } if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); #ifdef CONFIG_X86_64 if (c->x86 == 15) c->x86_cache_alignment = c->x86_clflush_size * 2; if (c->x86 == 6) set_cpu_cap(c, X86_FEATURE_REP_GOOD); #else /* * Names for the Pentium II/Celeron processors * detectable only by also checking the cache size. * Dixon is NOT a Celeron. */ if (c->x86 == 6) { char *p = NULL; switch (c->x86_model) { case 5: if (l2 == 0) p = "Celeron (Covington)"; else if (l2 == 256) p = "Mobile Pentium II (Dixon)"; break; case 6: if (l2 == 128) p = "Celeron (Mendocino)"; else if (c->x86_mask == 0 || c->x86_mask == 5) p = "Celeron-A"; break; case 8: if (l2 == 128) p = "Celeron (Coppermine)"; break; } if (p) strcpy(c->x86_model_id, p); } if (c->x86 == 15) set_cpu_cap(c, X86_FEATURE_P4); if (c->x86 == 6) set_cpu_cap(c, X86_FEATURE_P3); #endif if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { /* * let's use the legacy cpuid vector 0x1 and 0x4 for topology * detection. */ c->x86_max_cores = intel_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif } /* Work around errata */ srat_detect_node(c); if (cpu_has(c, X86_FEATURE_VMX)) detect_vmx_virtcap(c); /* * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not. * x86_energy_perf_policy(8) is available to change it at run-time */ if (cpu_has(c, X86_FEATURE_EPB)) { u64 epb; rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) { printk_once(KERN_WARNING "ENERGY_PERF_BIAS:" " Set to 'normal', was 'performance'\n" "ENERGY_PERF_BIAS: View and update with" " x86_energy_perf_policy(8)\n"); epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); } } } #ifdef CONFIG_X86_32 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) { /* * Intel PIII Tualatin. This comes in two flavours. * One has 256kb of cache, the other 512. We have no way * to determine which, so we use a boottime override * for the 512kb model, and assume 256 otherwise. */ if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) size = 256; return size; } #endif static const struct cpu_dev __cpuinitconst intel_cpu_dev = { .c_vendor = "Intel", .c_ident = { "GenuineIntel" }, #ifdef CONFIG_X86_32 .c_models = { { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = { [0] = "486 DX-25/33", [1] = "486 DX-50", [2] = "486 SX", [3] = "486 DX/2", [4] = "486 SL", [5] = "486 SX/2", [7] = "486 DX/2-WB", [8] = "486 DX/4", [9] = "486 DX/4-WB" } }, { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = { [0] = "Pentium 60/66 A-step", [1] = "Pentium 60/66", [2] = "Pentium 75 - 200", [3] = "OverDrive PODP5V83", [4] = "Pentium MMX", [7] = "Mobile Pentium 75 - 200", [8] = "Mobile Pentium MMX" } }, { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = { [0] = "Pentium Pro A-step", [1] = "Pentium Pro", [3] = "Pentium II (Klamath)", [4] = "Pentium II (Deschutes)", [5] = "Pentium II (Deschutes)", [6] = "Mobile Pentium II", [7] = "Pentium III (Katmai)", [8] = "Pentium III (Coppermine)", [10] = "Pentium III (Cascades)", [11] = "Pentium III (Tualatin)", } }, { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = { [0] = "Pentium 4 (Unknown)", [1] = "Pentium 4 (Willamette)", [2] = "Pentium 4 (Northwood)", [4] = "Pentium 4 (Foster)", [5] = "Pentium 4 (Foster)", } }, }, .c_size_cache = intel_size_cache, #endif .c_early_init = early_init_intel, .c_init = init_intel, .c_x86_vendor = X86_VENDOR_INTEL, }; cpu_dev_register(intel_cpu_dev);
gpl-2.0
EmmanuelU/wild_kernel_lge_gproj
arch/arm/mach-imx/imx27-dt.c
4720
2864
/* * Copyright 2012 Sascha Hauer, Pengutronix * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <mach/common.h> #include <mach/mx27.h> static const struct of_dev_auxdata imx27_auxdata_lookup[] __initconst = { OF_DEV_AUXDATA("fsl,imx27-uart", MX27_UART1_BASE_ADDR, "imx21-uart.0", NULL), OF_DEV_AUXDATA("fsl,imx27-uart", MX27_UART2_BASE_ADDR, "imx21-uart.1", NULL), OF_DEV_AUXDATA("fsl,imx27-uart", MX27_UART3_BASE_ADDR, "imx21-uart.2", NULL), OF_DEV_AUXDATA("fsl,imx27-fec", MX27_FEC_BASE_ADDR, "imx27-fec.0", NULL), OF_DEV_AUXDATA("fsl,imx27-i2c", MX27_I2C1_BASE_ADDR, "imx-i2c.0", NULL), OF_DEV_AUXDATA("fsl,imx27-i2c", MX27_I2C2_BASE_ADDR, "imx-i2c.1", NULL), OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI1_BASE_ADDR, "imx27-cspi.0", NULL), OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI2_BASE_ADDR, "imx27-cspi.1", NULL), OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI3_BASE_ADDR, "imx27-cspi.2", NULL), OF_DEV_AUXDATA("fsl,imx27-wdt", MX27_WDOG_BASE_ADDR, "imx2-wdt.0", NULL), { /* sentinel */ } }; static int __init imx27_avic_add_irq_domain(struct device_node *np, struct device_node *interrupt_parent) { irq_domain_add_legacy(np, 64, 0, 0, &irq_domain_simple_ops, NULL); return 0; } static int __init imx27_gpio_add_irq_domain(struct device_node *np, struct device_node *interrupt_parent) { static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; gpio_irq_base -= 32; irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL); return 0; } static const struct of_device_id imx27_irq_match[] __initconst = { { .compatible = "fsl,imx27-avic", .data = imx27_avic_add_irq_domain, }, { .compatible = "fsl,imx27-gpio", .data = imx27_gpio_add_irq_domain, }, { /* sentinel */ } }; static void __init imx27_dt_init(void) { of_irq_init(imx27_irq_match); of_platform_populate(NULL, of_default_bus_match_table, imx27_auxdata_lookup, NULL); } static void __init imx27_timer_init(void) { mx27_clocks_init_dt(); } static struct sys_timer imx27_timer = { .init = imx27_timer_init, }; static const char *imx27_dt_board_compat[] __initdata = { "fsl,imx27", NULL }; DT_MACHINE_START(IMX27_DT, "Freescale i.MX27 (Device Tree Support)") .map_io = mx27_map_io, .init_early = imx27_init_early, .init_irq = mx27_init_irq, .handle_irq = imx27_handle_irq, .timer = &imx27_timer, .init_machine = imx27_dt_init, .dt_compat = imx27_dt_board_compat, .restart = mxc_restart, MACHINE_END
gpl-2.0
Altaf-Mahdi/i9505
drivers/scsi/fnic/fnic_scsi.c
5744
48975
/* * Copyright 2008 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/mempool.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/scatterlist.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/delay.h> #include <linux/gfp.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include <scsi/fc/fc_els.h> #include <scsi/fc/fc_fcoe.h> #include <scsi/libfc.h> #include <scsi/fc_frame.h> #include "fnic_io.h" #include "fnic.h" const char *fnic_state_str[] = { [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE", [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE", [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE", [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE", }; static const char *fnic_ioreq_state_str[] = { [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING", [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING", [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE", [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE", }; static const char *fcpio_status_str[] = { [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/ [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER", [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE", [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]", [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED", [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND", [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/ [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT", [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID", [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID", [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH", [FCPIO_FW_ERR] = "FCPIO_FW_ERR", [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED", [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED", [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN", [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED", [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL", [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED", [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND", }; const char *fnic_state_to_str(unsigned int state) { if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state]) return "unknown"; return fnic_state_str[state]; } static const char *fnic_ioreq_state_to_str(unsigned int state) { if (state >= ARRAY_SIZE(fnic_ioreq_state_str) || !fnic_ioreq_state_str[state]) return "unknown"; return fnic_ioreq_state_str[state]; } static const char *fnic_fcpio_status_to_str(unsigned int status) { if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status]) return "unknown"; return fcpio_status_str[status]; } static void fnic_cleanup_io(struct fnic *fnic, int exclude_id); static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic, struct scsi_cmnd *sc) { u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1); return &fnic->io_req_lock[hash]; } /* * Unmap the data buffer and sense buffer for an io_req, * also unmap and free the device-private scatter/gather list. */ static void fnic_release_ioreq_buf(struct fnic *fnic, struct fnic_io_req *io_req, struct scsi_cmnd *sc) { if (io_req->sgl_list_pa) pci_unmap_single(fnic->pdev, io_req->sgl_list_pa, sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt, PCI_DMA_TODEVICE); scsi_dma_unmap(sc); if (io_req->sgl_cnt) mempool_free(io_req->sgl_list_alloc, fnic->io_sgl_pool[io_req->sgl_type]); if (io_req->sense_buf_pa) pci_unmap_single(fnic->pdev, io_req->sense_buf_pa, SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); } /* Free up Copy Wq descriptors. Called with copy_wq lock held */ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) { /* if no Ack received from firmware, then nothing to clean */ if (!fnic->fw_ack_recd[0]) return 1; /* * Update desc_available count based on number of freed descriptors * Account for wraparound */ if (wq->to_clean_index <= fnic->fw_ack_index[0]) wq->ring.desc_avail += (fnic->fw_ack_index[0] - wq->to_clean_index + 1); else wq->ring.desc_avail += (wq->ring.desc_count - wq->to_clean_index + fnic->fw_ack_index[0] + 1); /* * just bump clean index to ack_index+1 accounting for wraparound * this will essentially free up all descriptors between * to_clean_index and fw_ack_index, both inclusive */ wq->to_clean_index = (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; /* we have processed the acks received so far */ fnic->fw_ack_recd[0] = 0; return 0; } /* * fnic_fw_reset_handler * Routine to send reset msg to fw */ int fnic_fw_reset_handler(struct fnic *fnic) { struct vnic_wq_copy *wq = &fnic->wq_copy[0]; int ret = 0; unsigned long flags; skb_queue_purge(&fnic->frame_queue); skb_queue_purge(&fnic->tx_queue); spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) free_wq_copy_descs(fnic, wq); if (!vnic_wq_copy_desc_avail(wq)) ret = -EAGAIN; else fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); if (!ret) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Issued fw reset\n"); else FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Failed to issue fw reset\n"); return ret; } /* * fnic_flogi_reg_handler * Routine to send flogi register msg to fw */ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) { struct vnic_wq_copy *wq = &fnic->wq_copy[0]; enum fcpio_flogi_reg_format_type format; struct fc_lport *lp = fnic->lport; u8 gw_mac[ETH_ALEN]; int ret = 0; unsigned long flags; spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) free_wq_copy_descs(fnic, wq); if (!vnic_wq_copy_desc_avail(wq)) { ret = -EAGAIN; goto flogi_reg_ioreq_end; } if (fnic->ctlr.map_dest) { memset(gw_mac, 0xff, ETH_ALEN); format = FCPIO_FLOGI_REG_DEF_DEST; } else { memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); format = FCPIO_FLOGI_REG_GW_DEST; } if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) { fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, fc_id, gw_mac, fnic->data_src_addr, lp->r_a_tov, lp->e_d_tov); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "FLOGI FIP reg issued fcid %x src %pM dest %pM\n", fc_id, fnic->data_src_addr, gw_mac); } else { fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, format, fc_id, gw_mac); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "FLOGI reg issued fcid %x map %d dest %pM\n", fc_id, fnic->ctlr.map_dest, gw_mac); } flogi_reg_ioreq_end: spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); return ret; } /* * fnic_queue_wq_copy_desc * Routine to enqueue a wq copy desc */ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, struct vnic_wq_copy *wq, struct fnic_io_req *io_req, struct scsi_cmnd *sc, int sg_count) { struct scatterlist *sg; struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); struct fc_rport_libfc_priv *rp = rport->dd_data; struct host_sg_desc *desc; u8 pri_tag = 0; unsigned int i; unsigned long intr_flags; int flags; u8 exch_flags; struct scsi_lun fc_lun; char msg[2]; if (sg_count) { /* For each SGE, create a device desc entry */ desc = io_req->sgl_list; for_each_sg(scsi_sglist(sc), sg, sg_count, i) { desc->addr = cpu_to_le64(sg_dma_address(sg)); desc->len = cpu_to_le32(sg_dma_len(sg)); desc->_resvd = 0; desc++; } io_req->sgl_list_pa = pci_map_single (fnic->pdev, io_req->sgl_list, sizeof(io_req->sgl_list[0]) * sg_count, PCI_DMA_TODEVICE); } io_req->sense_buf_pa = pci_map_single(fnic->pdev, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); int_to_scsilun(sc->device->lun, &fc_lun); pri_tag = FCPIO_ICMND_PTA_SIMPLE; msg[0] = MSG_SIMPLE_TAG; scsi_populate_tag_msg(sc, msg); if (msg[0] == MSG_ORDERED_TAG) pri_tag = FCPIO_ICMND_PTA_ORDERED; /* Enqueue the descriptor in the Copy WQ */ spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) free_wq_copy_descs(fnic, wq); if (unlikely(!vnic_wq_copy_desc_avail(wq))) { spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); return SCSI_MLQUEUE_HOST_BUSY; } flags = 0; if (sc->sc_data_direction == DMA_FROM_DEVICE) flags = FCPIO_ICMND_RDDATA; else if (sc->sc_data_direction == DMA_TO_DEVICE) flags = FCPIO_ICMND_WRDATA; exch_flags = 0; if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) && (rp->flags & FC_RP_FLAGS_RETRY)) exch_flags |= FCPIO_ICMND_SRFLAG_RETRY; fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag, 0, exch_flags, io_req->sgl_cnt, SCSI_SENSE_BUFFERSIZE, io_req->sgl_list_pa, io_req->sense_buf_pa, 0, /* scsi cmd ref, always 0 */ pri_tag, /* scsi pri and tag */ flags, /* command flags */ sc->cmnd, sc->cmd_len, scsi_bufflen(sc), fc_lun.scsi_lun, io_req->port_id, rport->maxframe_size, rp->r_a_tov, rp->e_d_tov); spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); return 0; } /* * fnic_queuecommand * Routine to send a scsi cdb * Called with host_lock held and interrupts disabled. */ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { struct fc_lport *lp; struct fc_rport *rport; struct fnic_io_req *io_req; struct fnic *fnic; struct vnic_wq_copy *wq; int ret; int sg_count; unsigned long flags; unsigned long ptr; rport = starget_to_rport(scsi_target(sc->device)); ret = fc_remote_port_chkready(rport); if (ret) { sc->result = ret; done(sc); return 0; } lp = shost_priv(sc->device->host); if (lp->state != LPORT_ST_READY || !(lp->link_up)) return SCSI_MLQUEUE_HOST_BUSY; /* * Release host lock, use driver resource specific locks from here. * Don't re-enable interrupts in case they were disabled prior to the * caller disabling them. */ spin_unlock(lp->host->host_lock); /* Get a new io_req for this SCSI IO */ fnic = lport_priv(lp); io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); if (!io_req) { ret = SCSI_MLQUEUE_HOST_BUSY; goto out; } memset(io_req, 0, sizeof(*io_req)); /* Map the data buffer */ sg_count = scsi_dma_map(sc); if (sg_count < 0) { mempool_free(io_req, fnic->io_req_pool); goto out; } /* Determine the type of scatter/gather list we need */ io_req->sgl_cnt = sg_count; io_req->sgl_type = FNIC_SGL_CACHE_DFLT; if (sg_count > FNIC_DFLT_SG_DESC_CNT) io_req->sgl_type = FNIC_SGL_CACHE_MAX; if (sg_count) { io_req->sgl_list = mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], GFP_ATOMIC); if (!io_req->sgl_list) { ret = SCSI_MLQUEUE_HOST_BUSY; scsi_dma_unmap(sc); mempool_free(io_req, fnic->io_req_pool); goto out; } /* Cache sgl list allocated address before alignment */ io_req->sgl_list_alloc = io_req->sgl_list; ptr = (unsigned long) io_req->sgl_list; if (ptr % FNIC_SG_DESC_ALIGN) { io_req->sgl_list = (struct host_sg_desc *) (((unsigned long) ptr + FNIC_SG_DESC_ALIGN - 1) & ~(FNIC_SG_DESC_ALIGN - 1)); } } /* initialize rest of io_req */ io_req->port_id = rport->port_id; CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; CMD_SP(sc) = (char *)io_req; sc->scsi_done = done; /* create copy wq desc and enqueue it */ wq = &fnic->wq_copy[0]; ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count); if (ret) { /* * In case another thread cancelled the request, * refetch the pointer under the lock. */ spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); CMD_SP(sc) = NULL; CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; spin_unlock_irqrestore(io_lock, flags); if (io_req) { fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); } } out: /* acquire host lock before returning to SCSI */ spin_lock(lp->host->host_lock); return ret; } DEF_SCSI_QCMD(fnic_queuecommand) /* * fnic_fcpio_fw_reset_cmpl_handler * Routine to handle fw reset completion */ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, struct fcpio_fw_req *desc) { u8 type; u8 hdr_status; struct fcpio_tag tag; int ret = 0; unsigned long flags; fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); /* Clean up all outstanding io requests */ fnic_cleanup_io(fnic, SCSI_NO_TAG); spin_lock_irqsave(&fnic->fnic_lock, flags); /* fnic should be in FC_TRANS_ETH_MODE */ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { /* Check status of reset completion */ if (!hdr_status) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "reset cmpl success\n"); /* Ready to send flogi out */ fnic->state = FNIC_IN_ETH_MODE; } else { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic fw_reset : failed %s\n", fnic_fcpio_status_to_str(hdr_status)); /* * Unable to change to eth mode, cannot send out flogi * Change state to fc mode, so that subsequent Flogi * requests from libFC will cause more attempts to * reset the firmware. Free the cached flogi */ fnic->state = FNIC_IN_FC_MODE; ret = -1; } } else { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Unexpected state %s while processing" " reset cmpl\n", fnic_state_to_str(fnic->state)); ret = -1; } /* Thread removing device blocks till firmware reset is complete */ if (fnic->remove_wait) complete(fnic->remove_wait); /* * If fnic is being removed, or fw reset failed * free the flogi frame. Else, send it out */ if (fnic->remove_wait || ret) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); skb_queue_purge(&fnic->tx_queue); goto reset_cmpl_handler_end; } spin_unlock_irqrestore(&fnic->fnic_lock, flags); fnic_flush_tx(fnic); reset_cmpl_handler_end: return ret; } /* * fnic_fcpio_flogi_reg_cmpl_handler * Routine to handle flogi register completion */ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, struct fcpio_fw_req *desc) { u8 type; u8 hdr_status; struct fcpio_tag tag; int ret = 0; unsigned long flags; fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); /* Update fnic state based on status of flogi reg completion */ spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { /* Check flogi registration completion status */ if (!hdr_status) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "flog reg succeeded\n"); fnic->state = FNIC_IN_FC_MODE; } else { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic flogi reg :failed %s\n", fnic_fcpio_status_to_str(hdr_status)); fnic->state = FNIC_IN_ETH_MODE; ret = -1; } } else { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Unexpected fnic state %s while" " processing flogi reg completion\n", fnic_state_to_str(fnic->state)); ret = -1; } if (!ret) { if (fnic->stop_rx_link_events) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); goto reg_cmpl_handler_end; } spin_unlock_irqrestore(&fnic->fnic_lock, flags); fnic_flush_tx(fnic); queue_work(fnic_event_queue, &fnic->frame_work); } else { spin_unlock_irqrestore(&fnic->fnic_lock, flags); } reg_cmpl_handler_end: return ret; } static inline int is_ack_index_in_range(struct vnic_wq_copy *wq, u16 request_out) { if (wq->to_clean_index <= wq->to_use_index) { /* out of range, stale request_out index */ if (request_out < wq->to_clean_index || request_out >= wq->to_use_index) return 0; } else { /* out of range, stale request_out index */ if (request_out < wq->to_clean_index && request_out >= wq->to_use_index) return 0; } /* request_out index is in range */ return 1; } /* * Mark that ack received and store the Ack index. If there are multiple * acks received before Tx thread cleans it up, the latest value will be * used which is correct behavior. This state should be in the copy Wq * instead of in the fnic */ static inline void fnic_fcpio_ack_handler(struct fnic *fnic, unsigned int cq_index, struct fcpio_fw_req *desc) { struct vnic_wq_copy *wq; u16 request_out = desc->u.ack.request_out; unsigned long flags; /* mark the ack state */ wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); if (is_ack_index_in_range(wq, request_out)) { fnic->fw_ack_index[0] = request_out; fnic->fw_ack_recd[0] = 1; } spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); } /* * fnic_fcpio_icmnd_cmpl_handler * Routine to handle icmnd completions */ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, struct fcpio_fw_req *desc) { u8 type; u8 hdr_status; struct fcpio_tag tag; u32 id; u64 xfer_len = 0; struct fcpio_icmnd_cmpl *icmnd_cmpl; struct fnic_io_req *io_req; struct scsi_cmnd *sc; unsigned long flags; spinlock_t *io_lock; /* Decode the cmpl description to get the io_req id */ fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); fcpio_tag_id_dec(&tag, &id); if (id >= FNIC_MAX_IO_REQ) return; sc = scsi_host_find_tag(fnic->lport->host, id); WARN_ON_ONCE(!sc); if (!sc) return; io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); WARN_ON_ONCE(!io_req); if (!io_req) { spin_unlock_irqrestore(io_lock, flags); return; } /* firmware completed the io */ io_req->io_completed = 1; /* * if SCSI-ML has already issued abort on this command, * ignore completion of the IO. The abts path will clean it up */ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); return; } /* Mark the IO as complete */ CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; icmnd_cmpl = &desc->u.icmnd_cmpl; switch (hdr_status) { case FCPIO_SUCCESS: sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status; xfer_len = scsi_bufflen(sc); scsi_set_resid(sc, icmnd_cmpl->residual); if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) xfer_len -= icmnd_cmpl->residual; /* * If queue_full, then try to reduce queue depth for all * LUNS on the target. Todo: this should be accompanied * by a periodic queue_depth rampup based on successful * IO completion. */ if (icmnd_cmpl->scsi_status == QUEUE_FULL) { struct scsi_device *t_sdev; int qd = 0; shost_for_each_device(t_sdev, sc->device->host) { if (t_sdev->id != sc->device->id) continue; if (t_sdev->queue_depth > 1) { qd = scsi_track_queue_full (t_sdev, t_sdev->queue_depth - 1); if (qd == -1) qd = t_sdev->host->cmd_per_lun; shost_printk(KERN_INFO, fnic->lport->host, "scsi[%d:%d:%d:%d" "] queue full detected," "new depth = %d\n", t_sdev->host->host_no, t_sdev->channel, t_sdev->id, t_sdev->lun, t_sdev->queue_depth); } } } break; case FCPIO_TIMEOUT: /* request was timed out */ sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; break; case FCPIO_ABORTED: /* request was aborted */ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break; case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */ scsi_set_resid(sc, icmnd_cmpl->residual); sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break; case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */ sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; break; case FCPIO_INVALID_HEADER: /* header contains invalid data */ case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */ case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */ case FCPIO_MSS_INVALID: /* request was aborted due to mss error */ case FCPIO_FW_ERR: /* request was terminated due fw error */ default: shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", fnic_fcpio_status_to_str(hdr_status)); sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break; } /* Break link with the SCSI command */ CMD_SP(sc) = NULL; spin_unlock_irqrestore(io_lock, flags); fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); if (sc->sc_data_direction == DMA_FROM_DEVICE) { fnic->lport->host_stats.fcp_input_requests++; fnic->fcp_input_bytes += xfer_len; } else if (sc->sc_data_direction == DMA_TO_DEVICE) { fnic->lport->host_stats.fcp_output_requests++; fnic->fcp_output_bytes += xfer_len; } else fnic->lport->host_stats.fcp_control_requests++; /* Call SCSI completion function to complete the IO */ if (sc->scsi_done) sc->scsi_done(sc); } /* fnic_fcpio_itmf_cmpl_handler * Routine to handle itmf completions */ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, struct fcpio_fw_req *desc) { u8 type; u8 hdr_status; struct fcpio_tag tag; u32 id; struct scsi_cmnd *sc; struct fnic_io_req *io_req; unsigned long flags; spinlock_t *io_lock; fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); fcpio_tag_id_dec(&tag, &id); if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) return; sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); WARN_ON_ONCE(!sc); if (!sc) return; io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); WARN_ON_ONCE(!io_req); if (!io_req) { spin_unlock_irqrestore(io_lock, flags); return; } if (id & FNIC_TAG_ABORT) { /* Completion of abort cmd */ if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { /* This is a late completion. Ignore it */ spin_unlock_irqrestore(io_lock, flags); return; } CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; CMD_ABTS_STATUS(sc) = hdr_status; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "abts cmpl recd. id %d status %s\n", (int)(id & FNIC_TAG_MASK), fnic_fcpio_status_to_str(hdr_status)); /* * If scsi_eh thread is blocked waiting for abts to complete, * signal completion to it. IO will be cleaned in the thread * else clean it in this context */ if (io_req->abts_done) { complete(io_req->abts_done); spin_unlock_irqrestore(io_lock, flags); } else { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "abts cmpl, completing IO\n"); CMD_SP(sc) = NULL; sc->result = (DID_ERROR << 16); spin_unlock_irqrestore(io_lock, flags); fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); if (sc->scsi_done) sc->scsi_done(sc); } } else if (id & FNIC_TAG_DEV_RST) { /* Completion of device reset */ CMD_LR_STATUS(sc) = hdr_status; CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "dev reset cmpl recd. id %d status %s\n", (int)(id & FNIC_TAG_MASK), fnic_fcpio_status_to_str(hdr_status)); if (io_req->dr_done) complete(io_req->dr_done); spin_unlock_irqrestore(io_lock, flags); } else { shost_printk(KERN_ERR, fnic->lport->host, "Unexpected itmf io state %s tag %x\n", fnic_ioreq_state_to_str(CMD_STATE(sc)), id); spin_unlock_irqrestore(io_lock, flags); } } /* * fnic_fcpio_cmpl_handler * Routine to service the cq for wq_copy */ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, unsigned int cq_index, struct fcpio_fw_req *desc) { struct fnic *fnic = vnic_dev_priv(vdev); int ret = 0; switch (desc->hdr.type) { case FCPIO_ACK: /* fw copied copy wq desc to its queue */ fnic_fcpio_ack_handler(fnic, cq_index, desc); break; case FCPIO_ICMND_CMPL: /* fw completed a command */ fnic_fcpio_icmnd_cmpl_handler(fnic, desc); break; case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ fnic_fcpio_itmf_cmpl_handler(fnic, desc); break; case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); break; case FCPIO_RESET_CMPL: /* fw completed reset */ ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc); break; default: FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "firmware completion type %d\n", desc->hdr.type); break; } return ret; } /* * fnic_wq_copy_cmpl_handler * Routine to process wq copy */ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do) { unsigned int wq_work_done = 0; unsigned int i, cq_index; unsigned int cur_work_done; for (i = 0; i < fnic->wq_copy_count; i++) { cq_index = i + fnic->raw_wq_count + fnic->rq_count; cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index], fnic_fcpio_cmpl_handler, copy_work_to_do); wq_work_done += cur_work_done; } return wq_work_done; } static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) { unsigned int i; struct fnic_io_req *io_req; unsigned long flags = 0; struct scsi_cmnd *sc; spinlock_t *io_lock; for (i = 0; i < FNIC_MAX_IO_REQ; i++) { if (i == exclude_id) continue; sc = scsi_host_find_tag(fnic->lport->host, i); if (!sc) continue; io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req) { spin_unlock_irqrestore(io_lock, flags); goto cleanup_scsi_cmd; } CMD_SP(sc) = NULL; spin_unlock_irqrestore(io_lock, flags); /* * If there is a scsi_cmnd associated with this io_req, then * free the corresponding state */ fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); cleanup_scsi_cmd: sc->result = DID_TRANSPORT_DISRUPTED << 16; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:" " DID_TRANSPORT_DISRUPTED\n"); /* Complete the command to SCSI */ if (sc->scsi_done) sc->scsi_done(sc); } } void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, struct fcpio_host_req *desc) { u32 id; struct fnic *fnic = vnic_dev_priv(wq->vdev); struct fnic_io_req *io_req; struct scsi_cmnd *sc; unsigned long flags; spinlock_t *io_lock; /* get the tag reference */ fcpio_tag_id_dec(&desc->hdr.tag, &id); id &= FNIC_TAG_MASK; if (id >= FNIC_MAX_IO_REQ) return; sc = scsi_host_find_tag(fnic->lport->host, id); if (!sc) return; io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); /* Get the IO context which this desc refers to */ io_req = (struct fnic_io_req *)CMD_SP(sc); /* fnic interrupts are turned off by now */ if (!io_req) { spin_unlock_irqrestore(io_lock, flags); goto wq_copy_cleanup_scsi_cmd; } CMD_SP(sc) = NULL; spin_unlock_irqrestore(io_lock, flags); fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); wq_copy_cleanup_scsi_cmd: sc->result = DID_NO_CONNECT << 16; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:" " DID_NO_CONNECT\n"); if (sc->scsi_done) sc->scsi_done(sc); } static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, u32 task_req, u8 *fc_lun, struct fnic_io_req *io_req) { struct vnic_wq_copy *wq = &fnic->wq_copy[0]; unsigned long flags; spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) free_wq_copy_descs(fnic, wq); if (!vnic_wq_copy_desc_avail(wq)) { spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); return 1; } fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, 0, task_req, tag, fc_lun, io_req->port_id, fnic->config.ra_tov, fnic->config.ed_tov); spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); return 0; } void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) { int tag; struct fnic_io_req *io_req; spinlock_t *io_lock; unsigned long flags; struct scsi_cmnd *sc; struct scsi_lun fc_lun; enum fnic_ioreq_state old_ioreq_state; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_rport_reset_exch called portid 0x%06x\n", port_id); if (fnic->in_remove) return; for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { sc = scsi_host_find_tag(fnic->lport->host, tag); if (!sc) continue; io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req || io_req->port_id != port_id) { spin_unlock_irqrestore(io_lock, flags); continue; } /* * Found IO that is still pending with firmware and * belongs to rport that went away */ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); continue; } old_ioreq_state = CMD_STATE(sc); CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; BUG_ON(io_req->abts_done); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_rport_reset_exch: Issuing abts\n"); spin_unlock_irqrestore(io_lock, flags); /* Now queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); if (fnic_queue_abort_io_req(fnic, tag, FCPIO_ITMF_ABT_TASK_TERM, fc_lun.scsi_lun, io_req)) { /* * Revert the cmd state back to old state, if * it hasn't changed in between. This cmd will get * aborted later by scsi_eh, or cleaned up during * lun reset */ io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) CMD_STATE(sc) = old_ioreq_state; spin_unlock_irqrestore(io_lock, flags); } } } void fnic_terminate_rport_io(struct fc_rport *rport) { int tag; struct fnic_io_req *io_req; spinlock_t *io_lock; unsigned long flags; struct scsi_cmnd *sc; struct scsi_lun fc_lun; struct fc_rport_libfc_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct fnic *fnic = lport_priv(lport); struct fc_rport *cmd_rport; enum fnic_ioreq_state old_ioreq_state; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_terminate_rport_io called" " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n", rport->port_name, rport->node_name, rport->port_id); if (fnic->in_remove) return; for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { sc = scsi_host_find_tag(fnic->lport->host, tag); if (!sc) continue; cmd_rport = starget_to_rport(scsi_target(sc->device)); if (rport != cmd_rport) continue; io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req || rport != cmd_rport) { spin_unlock_irqrestore(io_lock, flags); continue; } /* * Found IO that is still pending with firmware and * belongs to rport that went away */ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); continue; } old_ioreq_state = CMD_STATE(sc); CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; BUG_ON(io_req->abts_done); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_terminate_rport_io: Issuing abts\n"); spin_unlock_irqrestore(io_lock, flags); /* Now queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); if (fnic_queue_abort_io_req(fnic, tag, FCPIO_ITMF_ABT_TASK_TERM, fc_lun.scsi_lun, io_req)) { /* * Revert the cmd state back to old state, if * it hasn't changed in between. This cmd will get * aborted later by scsi_eh, or cleaned up during * lun reset */ io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) CMD_STATE(sc) = old_ioreq_state; spin_unlock_irqrestore(io_lock, flags); } } } /* * This function is exported to SCSI for sending abort cmnds. * A SCSI IO is represented by a io_req in the driver. * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO. */ int fnic_abort_cmd(struct scsi_cmnd *sc) { struct fc_lport *lp; struct fnic *fnic; struct fnic_io_req *io_req; struct fc_rport *rport; spinlock_t *io_lock; unsigned long flags; int ret = SUCCESS; u32 task_req; struct scsi_lun fc_lun; DECLARE_COMPLETION_ONSTACK(tm_done); /* Wait for rport to unblock */ fc_block_scsi_eh(sc); /* Get local-port, check ready and link up */ lp = shost_priv(sc->device->host); fnic = lport_priv(lp); rport = starget_to_rport(scsi_target(sc->device)); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n", rport->port_id, sc->device->lun, sc->request->tag); if (lp->state != LPORT_ST_READY || !(lp->link_up)) { ret = FAILED; goto fnic_abort_cmd_end; } /* * Avoid a race between SCSI issuing the abort and the device * completing the command. * * If the command is already completed by the fw cmpl code, * we just return SUCCESS from here. This means that the abort * succeeded. In the SCSI ML, since the timeout for command has * happened, the completion wont actually complete the command * and it will be considered as an aborted command * * The CMD_SP will not be cleared except while holding io_req_lock. */ io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req) { spin_unlock_irqrestore(io_lock, flags); goto fnic_abort_cmd_end; } io_req->abts_done = &tm_done; if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); goto wait_pending; } /* * Command is still pending, need to abort it * If the firmware completes the command after this point, * the completion wont be done till mid-layer, since abort * has already started. */ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; spin_unlock_irqrestore(io_lock, flags); /* * Check readiness of the remote port. If the path to remote * port is up, then send abts to the remote port to terminate * the IO. Else, just locally terminate the IO in the firmware */ if (fc_remote_port_chkready(rport) == 0) task_req = FCPIO_ITMF_ABT_TASK; else task_req = FCPIO_ITMF_ABT_TASK_TERM; /* Now queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req, fc_lun.scsi_lun, io_req)) { spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (io_req) io_req->abts_done = NULL; spin_unlock_irqrestore(io_lock, flags); ret = FAILED; goto fnic_abort_cmd_end; } /* * We queued an abort IO, wait for its completion. * Once the firmware completes the abort command, it will * wake up this thread. */ wait_pending: wait_for_completion_timeout(&tm_done, msecs_to_jiffies (2 * fnic->config.ra_tov + fnic->config.ed_tov)); /* Check the abort status */ spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req) { spin_unlock_irqrestore(io_lock, flags); ret = FAILED; goto fnic_abort_cmd_end; } io_req->abts_done = NULL; /* fw did not complete abort, timed out */ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); ret = FAILED; goto fnic_abort_cmd_end; } /* * firmware completed the abort, check the status, * free the io_req irrespective of failure or success */ if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS) ret = FAILED; CMD_SP(sc) = NULL; spin_unlock_irqrestore(io_lock, flags); fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); fnic_abort_cmd_end: FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Returning from abort cmd %s\n", (ret == SUCCESS) ? "SUCCESS" : "FAILED"); return ret; } static inline int fnic_queue_dr_io_req(struct fnic *fnic, struct scsi_cmnd *sc, struct fnic_io_req *io_req) { struct vnic_wq_copy *wq = &fnic->wq_copy[0]; struct scsi_lun fc_lun; int ret = 0; unsigned long intr_flags; spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) free_wq_copy_descs(fnic, wq); if (!vnic_wq_copy_desc_avail(wq)) { ret = -EAGAIN; goto lr_io_req_end; } /* fill in the lun info */ int_to_scsilun(sc->device->lun, &fc_lun); fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST, 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG, fc_lun.scsi_lun, io_req->port_id, fnic->config.ra_tov, fnic->config.ed_tov); lr_io_req_end: spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); return ret; } /* * Clean up any pending aborts on the lun * For each outstanding IO on this lun, whose abort is not completed by fw, * issue a local abort. Wait for abort to complete. Return 0 if all commands * successfully aborted, 1 otherwise */ static int fnic_clean_pending_aborts(struct fnic *fnic, struct scsi_cmnd *lr_sc) { int tag; struct fnic_io_req *io_req; spinlock_t *io_lock; unsigned long flags; int ret = 0; struct scsi_cmnd *sc; struct scsi_lun fc_lun; struct scsi_device *lun_dev = lr_sc->device; DECLARE_COMPLETION_ONSTACK(tm_done); for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { sc = scsi_host_find_tag(fnic->lport->host, tag); /* * ignore this lun reset cmd or cmds that do not belong to * this lun */ if (!sc || sc == lr_sc || sc->device != lun_dev) continue; io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req || sc->device != lun_dev) { spin_unlock_irqrestore(io_lock, flags); continue; } /* * Found IO that is still pending with firmware and * belongs to the LUN that we are resetting */ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Found IO in %s on lun\n", fnic_ioreq_state_to_str(CMD_STATE(sc))); BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING); CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; io_req->abts_done = &tm_done; spin_unlock_irqrestore(io_lock, flags); /* Now queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); if (fnic_queue_abort_io_req(fnic, tag, FCPIO_ITMF_ABT_TASK_TERM, fc_lun.scsi_lun, io_req)) { spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (io_req) io_req->abts_done = NULL; spin_unlock_irqrestore(io_lock, flags); ret = 1; goto clean_pending_aborts_end; } wait_for_completion_timeout(&tm_done, msecs_to_jiffies (fnic->config.ed_tov)); /* Recheck cmd state to check if it is now aborted */ spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req) { spin_unlock_irqrestore(io_lock, flags); ret = 1; goto clean_pending_aborts_end; } io_req->abts_done = NULL; /* if abort is still pending with fw, fail */ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); ret = 1; goto clean_pending_aborts_end; } CMD_SP(sc) = NULL; spin_unlock_irqrestore(io_lock, flags); fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); } clean_pending_aborts_end: return ret; } /* * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN * fail to get aborted. It calls driver's eh_device_reset with a SCSI command * on the LUN. */ int fnic_device_reset(struct scsi_cmnd *sc) { struct fc_lport *lp; struct fnic *fnic; struct fnic_io_req *io_req; struct fc_rport *rport; int status; int ret = FAILED; spinlock_t *io_lock; unsigned long flags; DECLARE_COMPLETION_ONSTACK(tm_done); /* Wait for rport to unblock */ fc_block_scsi_eh(sc); /* Get local-port, check ready and link up */ lp = shost_priv(sc->device->host); fnic = lport_priv(lp); rport = starget_to_rport(scsi_target(sc->device)); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Device reset called FCID 0x%x, LUN 0x%x\n", rport->port_id, sc->device->lun); if (lp->state != LPORT_ST_READY || !(lp->link_up)) goto fnic_device_reset_end; /* Check if remote port up */ if (fc_remote_port_chkready(rport)) goto fnic_device_reset_end; io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); /* * If there is a io_req attached to this command, then use it, * else allocate a new one. */ if (!io_req) { io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); if (!io_req) { spin_unlock_irqrestore(io_lock, flags); goto fnic_device_reset_end; } memset(io_req, 0, sizeof(*io_req)); io_req->port_id = rport->port_id; CMD_SP(sc) = (char *)io_req; } io_req->dr_done = &tm_done; CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE; spin_unlock_irqrestore(io_lock, flags); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n", sc->request->tag); /* * issue the device reset, if enqueue failed, clean up the ioreq * and break assoc with scsi cmd */ if (fnic_queue_dr_io_req(fnic, sc, io_req)) { spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (io_req) io_req->dr_done = NULL; goto fnic_device_reset_clean; } /* * Wait on the local completion for LUN reset. The io_req may be * freed while we wait since we hold no lock. */ wait_for_completion_timeout(&tm_done, msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req) { spin_unlock_irqrestore(io_lock, flags); goto fnic_device_reset_end; } io_req->dr_done = NULL; status = CMD_LR_STATUS(sc); spin_unlock_irqrestore(io_lock, flags); /* * If lun reset not completed, bail out with failed. io_req * gets cleaned up during higher levels of EH */ if (status == FCPIO_INVALID_CODE) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Device reset timed out\n"); goto fnic_device_reset_end; } /* Completed, but not successful, clean up the io_req, return fail */ if (status != FCPIO_SUCCESS) { spin_lock_irqsave(io_lock, flags); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Device reset completed - failed\n"); io_req = (struct fnic_io_req *)CMD_SP(sc); goto fnic_device_reset_clean; } /* * Clean up any aborts on this lun that have still not * completed. If any of these fail, then LUN reset fails. * clean_pending_aborts cleans all cmds on this lun except * the lun reset cmd. If all cmds get cleaned, the lun reset * succeeds */ if (fnic_clean_pending_aborts(fnic, sc)) { spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Device reset failed" " since could not abort all IOs\n"); goto fnic_device_reset_clean; } /* Clean lun reset command */ spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (io_req) /* Completed, and successful */ ret = SUCCESS; fnic_device_reset_clean: if (io_req) CMD_SP(sc) = NULL; spin_unlock_irqrestore(io_lock, flags); if (io_req) { fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); } fnic_device_reset_end: FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Returning from device reset %s\n", (ret == SUCCESS) ? "SUCCESS" : "FAILED"); return ret; } /* Clean up all IOs, clean up libFC local port */ int fnic_reset(struct Scsi_Host *shost) { struct fc_lport *lp; struct fnic *fnic; int ret = SUCCESS; lp = shost_priv(shost); fnic = lport_priv(lp); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_reset called\n"); /* * Reset local port, this will clean up libFC exchanges, * reset remote port sessions, and if link is up, begin flogi */ if (lp->tt.lport_reset(lp)) ret = FAILED; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Returning from fnic reset %s\n", (ret == SUCCESS) ? "SUCCESS" : "FAILED"); return ret; } /* * SCSI Error handling calls driver's eh_host_reset if all prior * error handling levels return FAILED. If host reset completes * successfully, and if link is up, then Fabric login begins. * * Host Reset is the highest level of error recovery. If this fails, then * host is offlined by SCSI. * */ int fnic_host_reset(struct scsi_cmnd *sc) { int ret; unsigned long wait_host_tmo; struct Scsi_Host *shost = sc->device->host; struct fc_lport *lp = shost_priv(shost); /* * If fnic_reset is successful, wait for fabric login to complete * scsi-ml tries to send a TUR to every device if host reset is * successful, so before returning to scsi, fabric should be up */ ret = fnic_reset(shost); if (ret == SUCCESS) { wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; ret = FAILED; while (time_before(jiffies, wait_host_tmo)) { if ((lp->state == LPORT_ST_READY) && (lp->link_up)) { ret = SUCCESS; break; } ssleep(1); } } return ret; } /* * This fxn is called from libFC when host is removed */ void fnic_scsi_abort_io(struct fc_lport *lp) { int err = 0; unsigned long flags; enum fnic_state old_state; struct fnic *fnic = lport_priv(lp); DECLARE_COMPLETION_ONSTACK(remove_wait); /* Issue firmware reset for fnic, wait for reset to complete */ spin_lock_irqsave(&fnic->fnic_lock, flags); fnic->remove_wait = &remove_wait; old_state = fnic->state; fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); spin_unlock_irqrestore(&fnic->fnic_lock, flags); err = fnic_fw_reset_handler(fnic); if (err) { spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) fnic->state = old_state; fnic->remove_wait = NULL; spin_unlock_irqrestore(&fnic->fnic_lock, flags); return; } /* Wait for firmware reset to complete */ wait_for_completion_timeout(&remove_wait, msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT)); spin_lock_irqsave(&fnic->fnic_lock, flags); fnic->remove_wait = NULL; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_scsi_abort_io %s\n", (fnic->state == FNIC_IN_ETH_MODE) ? "SUCCESS" : "FAILED"); spin_unlock_irqrestore(&fnic->fnic_lock, flags); } /* * This fxn called from libFC to clean up driver IO state on link down */ void fnic_scsi_cleanup(struct fc_lport *lp) { unsigned long flags; enum fnic_state old_state; struct fnic *fnic = lport_priv(lp); /* issue fw reset */ spin_lock_irqsave(&fnic->fnic_lock, flags); old_state = fnic->state; fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (fnic_fw_reset_handler(fnic)) { spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) fnic->state = old_state; spin_unlock_irqrestore(&fnic->fnic_lock, flags); } } void fnic_empty_scsi_cleanup(struct fc_lport *lp) { } void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) { struct fnic *fnic = lport_priv(lp); /* Non-zero sid, nothing to do */ if (sid) goto call_fc_exch_mgr_reset; if (did) { fnic_rport_exch_reset(fnic, did); goto call_fc_exch_mgr_reset; } /* * sid = 0, did = 0 * link down or device being removed */ if (!fnic->in_remove) fnic_scsi_cleanup(lp); else fnic_scsi_abort_io(lp); /* call libFC exch mgr reset to reset its exchanges */ call_fc_exch_mgr_reset: fc_exch_mgr_reset(lp, sid, did); }
gpl-2.0
rutvik95/android_kernel_samsung_i9060
arch/s390/crypto/des_s390.c
7280
16684
/* * Cryptographic API. * * s390 implementation of the DES Cipher Algorithm. * * Copyright IBM Corp. 2003,2011 * Author(s): Thomas Spatzier * Jan Glauber (jan.glauber@de.ibm.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/crypto.h> #include <crypto/algapi.h> #include <crypto/des.h> #include "crypt_s390.h" #define DES3_KEY_SIZE (3 * DES_KEY_SIZE) static u8 *ctrblk; struct s390_des_ctx { u8 iv[DES_BLOCK_SIZE]; u8 key[DES3_KEY_SIZE]; }; static int des_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len) { struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); u32 *flags = &tfm->crt_flags; u32 tmp[DES_EXPKEY_WORDS]; /* check for weak keys */ if (!des_ekey(tmp, key) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } memcpy(ctx->key, key, key_len); return 0; } static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); crypt_s390_km(KM_DEA_ENCRYPT, ctx->key, out, in, DES_BLOCK_SIZE); } static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); crypt_s390_km(KM_DEA_DECRYPT, ctx->key, out, in, DES_BLOCK_SIZE); } static struct crypto_alg des_alg = { .cra_name = "des", .cra_driver_name = "des-s390", .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_des_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(des_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = DES_KEY_SIZE, .cia_max_keysize = DES_KEY_SIZE, .cia_setkey = des_setkey, .cia_encrypt = des_encrypt, .cia_decrypt = des_decrypt, } } }; static int ecb_desall_crypt(struct blkcipher_desc *desc, long func, u8 *key, struct blkcipher_walk *walk) { int ret = blkcipher_walk_virt(desc, walk); unsigned int nbytes; while ((nbytes = walk->nbytes)) { /* only use complete blocks */ unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); u8 *out = walk->dst.virt.addr; u8 *in = walk->src.virt.addr; ret = crypt_s390_km(func, key, out, in, n); BUG_ON((ret < 0) || (ret != n)); nbytes &= DES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, walk, nbytes); } return ret; } static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, u8 *iv, struct blkcipher_walk *walk) { int ret = blkcipher_walk_virt(desc, walk); unsigned int nbytes = walk->nbytes; if (!nbytes) goto out; memcpy(iv, walk->iv, DES_BLOCK_SIZE); do { /* only use complete blocks */ unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); u8 *out = walk->dst.virt.addr; u8 *in = walk->src.virt.addr; ret = crypt_s390_kmc(func, iv, out, in, n); BUG_ON((ret < 0) || (ret != n)); nbytes &= DES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, walk, nbytes); } while ((nbytes = walk->nbytes)); memcpy(walk->iv, iv, DES_BLOCK_SIZE); out: return ret; } static int ecb_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, ctx->key, &walk); } static int ecb_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return ecb_desall_crypt(desc, KM_DEA_DECRYPT, ctx->key, &walk); } static struct crypto_alg ecb_des_alg = { .cra_name = "ecb(des)", .cra_driver_name = "ecb-des-s390", .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_des_ctx), .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list), .cra_u = { .blkcipher = { .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = des_setkey, .encrypt = ecb_des_encrypt, .decrypt = ecb_des_decrypt, } } }; static int cbc_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk); } static int cbc_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk); } static struct crypto_alg cbc_des_alg = { .cra_name = "cbc(des)", .cra_driver_name = "cbc-des-s390", .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_des_ctx), .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list), .cra_u = { .blkcipher = { .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, .setkey = des_setkey, .encrypt = cbc_des_encrypt, .decrypt = cbc_des_decrypt, } } }; /* * RFC2451: * * For DES-EDE3, there is no known need to reject weak or * complementation keys. Any weakness is obviated by the use of * multiple keys. * * However, if the first two or last two independent 64-bit keys are * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the * same as DES. Implementers MUST reject keys that exhibit this * property. * */ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len) { struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); u32 *flags = &tfm->crt_flags; if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], DES_KEY_SIZE)) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } memcpy(ctx->key, key, key_len); return 0; } static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); crypt_s390_km(KM_TDEA_192_ENCRYPT, ctx->key, dst, src, DES_BLOCK_SIZE); } static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); crypt_s390_km(KM_TDEA_192_DECRYPT, ctx->key, dst, src, DES_BLOCK_SIZE); } static struct crypto_alg des3_alg = { .cra_name = "des3_ede", .cra_driver_name = "des3_ede-s390", .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_des_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(des3_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = DES3_KEY_SIZE, .cia_max_keysize = DES3_KEY_SIZE, .cia_setkey = des3_setkey, .cia_encrypt = des3_encrypt, .cia_decrypt = des3_decrypt, } } }; static int ecb_des3_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, ctx->key, &walk); } static int ecb_des3_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, ctx->key, &walk); } static struct crypto_alg ecb_des3_alg = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "ecb-des3_ede-s390", .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_des_ctx), .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT( ecb_des3_alg.cra_list), .cra_u = { .blkcipher = { .min_keysize = DES3_KEY_SIZE, .max_keysize = DES3_KEY_SIZE, .setkey = des3_setkey, .encrypt = ecb_des3_encrypt, .decrypt = ecb_des3_decrypt, } } }; static int cbc_des3_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk); } static int cbc_des3_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk); } static struct crypto_alg cbc_des3_alg = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-des3_ede-s390", .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_des_ctx), .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT( cbc_des3_alg.cra_list), .cra_u = { .blkcipher = { .min_keysize = DES3_KEY_SIZE, .max_keysize = DES3_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, .setkey = des3_setkey, .encrypt = cbc_des3_encrypt, .decrypt = cbc_des3_decrypt, } } }; static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, struct s390_des_ctx *ctx, struct blkcipher_walk *walk) { int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); unsigned int i, n, nbytes; u8 buf[DES_BLOCK_SIZE]; u8 *out, *in; memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { out = walk->dst.virt.addr; in = walk->src.virt.addr; while (nbytes >= DES_BLOCK_SIZE) { /* align to block size, max. PAGE_SIZE */ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1); for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) { memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE); crypto_inc(ctrblk + i, DES_BLOCK_SIZE); } ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk); BUG_ON((ret < 0) || (ret != n)); if (n > DES_BLOCK_SIZE) memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE, DES_BLOCK_SIZE); crypto_inc(ctrblk, DES_BLOCK_SIZE); out += n; in += n; nbytes -= n; } ret = blkcipher_walk_done(desc, walk, nbytes); } /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { out = walk->dst.virt.addr; in = walk->src.virt.addr; ret = crypt_s390_kmctr(func, ctx->key, buf, in, DES_BLOCK_SIZE, ctrblk); BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE); memcpy(out, buf, nbytes); crypto_inc(ctrblk, DES_BLOCK_SIZE); ret = blkcipher_walk_done(desc, walk, 0); } memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE); return ret; } static int ctr_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return ctr_desall_crypt(desc, KMCTR_DEA_ENCRYPT, ctx, &walk); } static int ctr_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return ctr_desall_crypt(desc, KMCTR_DEA_DECRYPT, ctx, &walk); } static struct crypto_alg ctr_des_alg = { .cra_name = "ctr(des)", .cra_driver_name = "ctr-des-s390", .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct s390_des_ctx), .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(ctr_des_alg.cra_list), .cra_u = { .blkcipher = { .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, .setkey = des_setkey, .encrypt = ctr_des_encrypt, .decrypt = ctr_des_decrypt, } } }; static int ctr_des3_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return ctr_desall_crypt(desc, KMCTR_TDEA_192_ENCRYPT, ctx, &walk); } static int ctr_des3_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return ctr_desall_crypt(desc, KMCTR_TDEA_192_DECRYPT, ctx, &walk); } static struct crypto_alg ctr_des3_alg = { .cra_name = "ctr(des3_ede)", .cra_driver_name = "ctr-des3_ede-s390", .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct s390_des_ctx), .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(ctr_des3_alg.cra_list), .cra_u = { .blkcipher = { .min_keysize = DES3_KEY_SIZE, .max_keysize = DES3_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, .setkey = des3_setkey, .encrypt = ctr_des3_encrypt, .decrypt = ctr_des3_decrypt, } } }; static int __init des_s390_init(void) { int ret; if (!crypt_s390_func_available(KM_DEA_ENCRYPT, CRYPT_S390_MSA) || !crypt_s390_func_available(KM_TDEA_192_ENCRYPT, CRYPT_S390_MSA)) return -EOPNOTSUPP; ret = crypto_register_alg(&des_alg); if (ret) goto des_err; ret = crypto_register_alg(&ecb_des_alg); if (ret) goto ecb_des_err; ret = crypto_register_alg(&cbc_des_alg); if (ret) goto cbc_des_err; ret = crypto_register_alg(&des3_alg); if (ret) goto des3_err; ret = crypto_register_alg(&ecb_des3_alg); if (ret) goto ecb_des3_err; ret = crypto_register_alg(&cbc_des3_alg); if (ret) goto cbc_des3_err; if (crypt_s390_func_available(KMCTR_DEA_ENCRYPT, CRYPT_S390_MSA | CRYPT_S390_MSA4) && crypt_s390_func_available(KMCTR_TDEA_192_ENCRYPT, CRYPT_S390_MSA | CRYPT_S390_MSA4)) { ret = crypto_register_alg(&ctr_des_alg); if (ret) goto ctr_des_err; ret = crypto_register_alg(&ctr_des3_alg); if (ret) goto ctr_des3_err; ctrblk = (u8 *) __get_free_page(GFP_KERNEL); if (!ctrblk) { ret = -ENOMEM; goto ctr_mem_err; } } out: return ret; ctr_mem_err: crypto_unregister_alg(&ctr_des3_alg); ctr_des3_err: crypto_unregister_alg(&ctr_des_alg); ctr_des_err: crypto_unregister_alg(&cbc_des3_alg); cbc_des3_err: crypto_unregister_alg(&ecb_des3_alg); ecb_des3_err: crypto_unregister_alg(&des3_alg); des3_err: crypto_unregister_alg(&cbc_des_alg); cbc_des_err: crypto_unregister_alg(&ecb_des_alg); ecb_des_err: crypto_unregister_alg(&des_alg); des_err: goto out; } static void __exit des_s390_exit(void) { if (ctrblk) { crypto_unregister_alg(&ctr_des_alg); crypto_unregister_alg(&ctr_des3_alg); free_page((unsigned long) ctrblk); } crypto_unregister_alg(&cbc_des3_alg); crypto_unregister_alg(&ecb_des3_alg); crypto_unregister_alg(&des3_alg); crypto_unregister_alg(&cbc_des_alg); crypto_unregister_alg(&ecb_des_alg); crypto_unregister_alg(&des_alg); } module_init(des_s390_init); module_exit(des_s390_exit); MODULE_ALIAS("des"); MODULE_ALIAS("des3_ede"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
gpl-2.0
djmatt604/kernel_samsung_msm8660-common
arch/arm/mach-bcmring/clock.c
8048
5120
/***************************************************************************** * Copyright 2001 - 2009 Broadcom Corporation. All rights reserved. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available at * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a * license other than the GPL, without Broadcom's express prior written * consent. *****************************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/string.h> #include <linux/clk.h> #include <linux/spinlock.h> #include <linux/clkdev.h> #include <mach/csp/hw_cfg.h> #include <mach/csp/chipcHw_def.h> #include <mach/csp/chipcHw_reg.h> #include <mach/csp/chipcHw_inline.h> #include "clock.h" #define clk_is_primary(x) ((x)->type & CLK_TYPE_PRIMARY) #define clk_is_pll1(x) ((x)->type & CLK_TYPE_PLL1) #define clk_is_pll2(x) ((x)->type & CLK_TYPE_PLL2) #define clk_is_programmable(x) ((x)->type & CLK_TYPE_PROGRAMMABLE) #define clk_is_bypassable(x) ((x)->type & CLK_TYPE_BYPASSABLE) #define clk_is_using_xtal(x) ((x)->mode & CLK_MODE_XTAL) static DEFINE_SPINLOCK(clk_lock); static void __clk_enable(struct clk *clk) { if (!clk) return; /* enable parent clock first */ if (clk->parent) __clk_enable(clk->parent); if (clk->use_cnt++ == 0) { if (clk_is_pll1(clk)) { /* PLL1 */ chipcHw_pll1Enable(clk->rate_hz, 0); } else if (clk_is_pll2(clk)) { /* PLL2 */ chipcHw_pll2Enable(clk->rate_hz); } else if (clk_is_using_xtal(clk)) { /* source is crystal */ if (!clk_is_primary(clk)) chipcHw_bypassClockEnable(clk->csp_id); } else { /* source is PLL */ chipcHw_setClockEnable(clk->csp_id); } } } int clk_enable(struct clk *clk) { unsigned long flags; if (!clk) return -EINVAL; spin_lock_irqsave(&clk_lock, flags); __clk_enable(clk); spin_unlock_irqrestore(&clk_lock, flags); return 0; } EXPORT_SYMBOL(clk_enable); static void __clk_disable(struct clk *clk) { if (!clk) return; BUG_ON(clk->use_cnt == 0); if (--clk->use_cnt == 0) { if (clk_is_pll1(clk)) { /* PLL1 */ chipcHw_pll1Disable(); } else if (clk_is_pll2(clk)) { /* PLL2 */ chipcHw_pll2Disable(); } else if (clk_is_using_xtal(clk)) { /* source is crystal */ if (!clk_is_primary(clk)) chipcHw_bypassClockDisable(clk->csp_id); } else { /* source is PLL */ chipcHw_setClockDisable(clk->csp_id); } } if (clk->parent) __clk_disable(clk->parent); } void clk_disable(struct clk *clk) { unsigned long flags; if (!clk) return; spin_lock_irqsave(&clk_lock, flags); __clk_disable(clk); spin_unlock_irqrestore(&clk_lock, flags); } EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { if (!clk) return 0; return clk->rate_hz; } EXPORT_SYMBOL(clk_get_rate); long clk_round_rate(struct clk *clk, unsigned long rate) { unsigned long flags; unsigned long actual; unsigned long rate_hz; if (!clk) return -EINVAL; if (!clk_is_programmable(clk)) return -EINVAL; if (clk->use_cnt) return -EBUSY; spin_lock_irqsave(&clk_lock, flags); actual = clk->parent->rate_hz; rate_hz = min(actual, rate); spin_unlock_irqrestore(&clk_lock, flags); return rate_hz; } EXPORT_SYMBOL(clk_round_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { unsigned long flags; unsigned long actual; unsigned long rate_hz; if (!clk) return -EINVAL; if (!clk_is_programmable(clk)) return -EINVAL; if (clk->use_cnt) return -EBUSY; spin_lock_irqsave(&clk_lock, flags); actual = clk->parent->rate_hz; rate_hz = min(actual, rate); rate_hz = chipcHw_setClockFrequency(clk->csp_id, rate_hz); clk->rate_hz = rate_hz; spin_unlock_irqrestore(&clk_lock, flags); return 0; } EXPORT_SYMBOL(clk_set_rate); struct clk *clk_get_parent(struct clk *clk) { if (!clk) return NULL; return clk->parent; } EXPORT_SYMBOL(clk_get_parent); int clk_set_parent(struct clk *clk, struct clk *parent) { unsigned long flags; struct clk *old_parent; if (!clk || !parent) return -EINVAL; if (!clk_is_primary(parent) || !clk_is_bypassable(clk)) return -EINVAL; /* if more than one user, parent is not allowed */ if (clk->use_cnt > 1) return -EBUSY; if (clk->parent == parent) return 0; spin_lock_irqsave(&clk_lock, flags); old_parent = clk->parent; clk->parent = parent; if (clk_is_using_xtal(parent)) clk->mode |= CLK_MODE_XTAL; else clk->mode &= (~CLK_MODE_XTAL); /* if clock is active */ if (clk->use_cnt != 0) { clk->use_cnt--; /* enable clock with the new parent */ __clk_enable(clk); /* disable the old parent */ __clk_disable(old_parent); } spin_unlock_irqrestore(&clk_lock, flags); return 0; } EXPORT_SYMBOL(clk_set_parent);
gpl-2.0
aloksinha2001/picuntu-3.0.8-alok
arch/arm/mach-bcmring/csp/chipc/chipcHw.c
8048
26628
/***************************************************************************** * Copyright 2003 - 2008 Broadcom Corporation. All rights reserved. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available at * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a * license other than the GPL, without Broadcom's express prior written * consent. *****************************************************************************/ /****************************************************************************/ /** * @file chipcHw.c * * @brief Low level Various CHIP clock controlling routines * * @note * * These routines provide basic clock controlling functionality only. */ /****************************************************************************/ /* ---- Include Files ---------------------------------------------------- */ #include <csp/errno.h> #include <csp/stdint.h> #include <csp/module.h> #include <mach/csp/chipcHw_def.h> #include <mach/csp/chipcHw_inline.h> #include <csp/reg.h> #include <csp/delay.h> /* ---- Private Constants and Types --------------------------------------- */ /* VPM alignment algorithm uses this */ #define MAX_PHASE_ADJUST_COUNT 0xFFFF /* Max number of times allowed to adjust the phase */ #define MAX_PHASE_ALIGN_ATTEMPTS 10 /* Max number of attempt to align the phase */ /* Local definition of clock type */ #define PLL_CLOCK 1 /* PLL Clock */ #define NON_PLL_CLOCK 2 /* Divider clock */ static int chipcHw_divide(int num, int denom) __attribute__ ((section(".aramtext"))); /****************************************************************************/ /** * @brief Set clock fequency for miscellaneous configurable clocks * * This function sets clock frequency * * @return Configured clock frequency in hertz * */ /****************************************************************************/ chipcHw_freq chipcHw_getClockFrequency(chipcHw_CLOCK_e clock /* [ IN ] Configurable clock */ ) { volatile uint32_t *pPLLReg = (uint32_t *) 0x0; volatile uint32_t *pClockCtrl = (uint32_t *) 0x0; volatile uint32_t *pDependentClock = (uint32_t *) 0x0; uint32_t vcoFreqPll1Hz = 0; /* Effective VCO frequency for PLL1 in Hz */ uint32_t vcoFreqPll2Hz = 0; /* Effective VCO frequency for PLL2 in Hz */ uint32_t dependentClockType = 0; uint32_t vcoHz = 0; /* Get VCO frequencies */ if ((pChipcHw->PLLPreDivider & chipcHw_REG_PLL_PREDIVIDER_NDIV_MODE_MASK) != chipcHw_REG_PLL_PREDIVIDER_NDIV_MODE_INTEGER) { uint64_t adjustFreq = 0; vcoFreqPll1Hz = chipcHw_XTAL_FREQ_Hz * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, chipcHw_REG_PLL_PREDIVIDER_P2) * ((pChipcHw->PLLPreDivider & chipcHw_REG_PLL_PREDIVIDER_NDIV_MASK) >> chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT); /* Adjusted frequency due to chipcHw_REG_PLL_DIVIDER_NDIV_f_SS */ adjustFreq = (uint64_t) chipcHw_XTAL_FREQ_Hz * (uint64_t) chipcHw_REG_PLL_DIVIDER_NDIV_f_SS * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, (chipcHw_REG_PLL_PREDIVIDER_P2 * (uint64_t) chipcHw_REG_PLL_DIVIDER_FRAC)); vcoFreqPll1Hz += (uint32_t) adjustFreq; } else { vcoFreqPll1Hz = chipcHw_XTAL_FREQ_Hz * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, chipcHw_REG_PLL_PREDIVIDER_P2) * ((pChipcHw->PLLPreDivider & chipcHw_REG_PLL_PREDIVIDER_NDIV_MASK) >> chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT); } vcoFreqPll2Hz = chipcHw_XTAL_FREQ_Hz * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, chipcHw_REG_PLL_PREDIVIDER_P2) * ((pChipcHw->PLLPreDivider2 & chipcHw_REG_PLL_PREDIVIDER_NDIV_MASK) >> chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT); switch (clock) { case chipcHw_CLOCK_DDR: pPLLReg = &pChipcHw->DDRClock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_ARM: pPLLReg = &pChipcHw->ARMClock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_ESW: pPLLReg = &pChipcHw->ESWClock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_VPM: pPLLReg = &pChipcHw->VPMClock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_ESW125: pPLLReg = &pChipcHw->ESW125Clock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_UART: pPLLReg = &pChipcHw->UARTClock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_SDIO0: pPLLReg = &pChipcHw->SDIO0Clock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_SDIO1: pPLLReg = &pChipcHw->SDIO1Clock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_SPI: pPLLReg = &pChipcHw->SPIClock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_ETM: pPLLReg = &pChipcHw->ETMClock; vcoHz = vcoFreqPll1Hz; break; case chipcHw_CLOCK_USB: pPLLReg = &pChipcHw->USBClock; vcoHz = vcoFreqPll2Hz; break; case chipcHw_CLOCK_LCD: pPLLReg = &pChipcHw->LCDClock; vcoHz = vcoFreqPll2Hz; break; case chipcHw_CLOCK_APM: pPLLReg = &pChipcHw->APMClock; vcoHz = vcoFreqPll2Hz; break; case chipcHw_CLOCK_BUS: pClockCtrl = &pChipcHw->ACLKClock; pDependentClock = &pChipcHw->ARMClock; vcoHz = vcoFreqPll1Hz; dependentClockType = PLL_CLOCK; break; case chipcHw_CLOCK_OTP: pClockCtrl = &pChipcHw->OTPClock; break; case chipcHw_CLOCK_I2C: pClockCtrl = &pChipcHw->I2CClock; break; case chipcHw_CLOCK_I2S0: pClockCtrl = &pChipcHw->I2S0Clock; break; case chipcHw_CLOCK_RTBUS: pClockCtrl = &pChipcHw->RTBUSClock; pDependentClock = &pChipcHw->ACLKClock; dependentClockType = NON_PLL_CLOCK; break; case chipcHw_CLOCK_APM100: pClockCtrl = &pChipcHw->APM100Clock; pDependentClock = &pChipcHw->APMClock; vcoHz = vcoFreqPll2Hz; dependentClockType = PLL_CLOCK; break; case chipcHw_CLOCK_TSC: pClockCtrl = &pChipcHw->TSCClock; break; case chipcHw_CLOCK_LED: pClockCtrl = &pChipcHw->LEDClock; break; case chipcHw_CLOCK_I2S1: pClockCtrl = &pChipcHw->I2S1Clock; break; } if (pPLLReg) { /* Obtain PLL clock frequency */ if (*pPLLReg & chipcHw_REG_PLL_CLOCK_BYPASS_SELECT) { /* Return crystal clock frequency when bypassed */ return chipcHw_XTAL_FREQ_Hz; } else if (clock == chipcHw_CLOCK_DDR) { /* DDR frequency is configured in PLLDivider register */ return chipcHw_divide (vcoHz, (((pChipcHw->PLLDivider & 0xFF000000) >> 24) ? ((pChipcHw->PLLDivider & 0xFF000000) >> 24) : 256)); } else { /* From chip revision number B0, LCD clock is internally divided by 2 */ if ((pPLLReg == &pChipcHw->LCDClock) && (chipcHw_getChipRevisionNumber() != chipcHw_REV_NUMBER_A0)) { vcoHz >>= 1; } /* Obtain PLL clock frequency using VCO dividers */ return chipcHw_divide(vcoHz, ((*pPLLReg & chipcHw_REG_PLL_CLOCK_MDIV_MASK) ? (*pPLLReg & chipcHw_REG_PLL_CLOCK_MDIV_MASK) : 256)); } } else if (pClockCtrl) { /* Obtain divider clock frequency */ uint32_t div; uint32_t freq = 0; if (*pClockCtrl & chipcHw_REG_DIV_CLOCK_BYPASS_SELECT) { /* Return crystal clock frequency when bypassed */ return chipcHw_XTAL_FREQ_Hz; } else if (pDependentClock) { /* Identify the dependent clock frequency */ switch (dependentClockType) { case PLL_CLOCK: if (*pDependentClock & chipcHw_REG_PLL_CLOCK_BYPASS_SELECT) { /* Use crystal clock frequency when dependent PLL clock is bypassed */ freq = chipcHw_XTAL_FREQ_Hz; } else { /* Obtain PLL clock frequency using VCO dividers */ div = *pDependentClock & chipcHw_REG_PLL_CLOCK_MDIV_MASK; freq = div ? chipcHw_divide(vcoHz, div) : 0; } break; case NON_PLL_CLOCK: if (pDependentClock == (uint32_t *) &pChipcHw->ACLKClock) { freq = chipcHw_getClockFrequency (chipcHw_CLOCK_BUS); } else { if (*pDependentClock & chipcHw_REG_DIV_CLOCK_BYPASS_SELECT) { /* Use crystal clock frequency when dependent divider clock is bypassed */ freq = chipcHw_XTAL_FREQ_Hz; } else { /* Obtain divider clock frequency using XTAL dividers */ div = *pDependentClock & chipcHw_REG_DIV_CLOCK_DIV_MASK; freq = chipcHw_divide (chipcHw_XTAL_FREQ_Hz, (div ? div : 256)); } } break; } } else { /* Dependent on crystal clock */ freq = chipcHw_XTAL_FREQ_Hz; } div = *pClockCtrl & chipcHw_REG_DIV_CLOCK_DIV_MASK; return chipcHw_divide(freq, (div ? div : 256)); } return 0; } /****************************************************************************/ /** * @brief Set clock fequency for miscellaneous configurable clocks * * This function sets clock frequency * * @return Configured clock frequency in Hz * */ /****************************************************************************/ chipcHw_freq chipcHw_setClockFrequency(chipcHw_CLOCK_e clock, /* [ IN ] Configurable clock */ uint32_t freq /* [ IN ] Clock frequency in Hz */ ) { volatile uint32_t *pPLLReg = (uint32_t *) 0x0; volatile uint32_t *pClockCtrl = (uint32_t *) 0x0; volatile uint32_t *pDependentClock = (uint32_t *) 0x0; uint32_t vcoFreqPll1Hz = 0; /* Effective VCO frequency for PLL1 in Hz */ uint32_t desVcoFreqPll1Hz = 0; /* Desired VCO frequency for PLL1 in Hz */ uint32_t vcoFreqPll2Hz = 0; /* Effective VCO frequency for PLL2 in Hz */ uint32_t dependentClockType = 0; uint32_t vcoHz = 0; uint32_t desVcoHz = 0; /* Get VCO frequencies */ if ((pChipcHw->PLLPreDivider & chipcHw_REG_PLL_PREDIVIDER_NDIV_MODE_MASK) != chipcHw_REG_PLL_PREDIVIDER_NDIV_MODE_INTEGER) { uint64_t adjustFreq = 0; vcoFreqPll1Hz = chipcHw_XTAL_FREQ_Hz * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, chipcHw_REG_PLL_PREDIVIDER_P2) * ((pChipcHw->PLLPreDivider & chipcHw_REG_PLL_PREDIVIDER_NDIV_MASK) >> chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT); /* Adjusted frequency due to chipcHw_REG_PLL_DIVIDER_NDIV_f_SS */ adjustFreq = (uint64_t) chipcHw_XTAL_FREQ_Hz * (uint64_t) chipcHw_REG_PLL_DIVIDER_NDIV_f_SS * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, (chipcHw_REG_PLL_PREDIVIDER_P2 * (uint64_t) chipcHw_REG_PLL_DIVIDER_FRAC)); vcoFreqPll1Hz += (uint32_t) adjustFreq; /* Desired VCO frequency */ desVcoFreqPll1Hz = chipcHw_XTAL_FREQ_Hz * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, chipcHw_REG_PLL_PREDIVIDER_P2) * (((pChipcHw->PLLPreDivider & chipcHw_REG_PLL_PREDIVIDER_NDIV_MASK) >> chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT) + 1); } else { vcoFreqPll1Hz = desVcoFreqPll1Hz = chipcHw_XTAL_FREQ_Hz * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, chipcHw_REG_PLL_PREDIVIDER_P2) * ((pChipcHw->PLLPreDivider & chipcHw_REG_PLL_PREDIVIDER_NDIV_MASK) >> chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT); } vcoFreqPll2Hz = chipcHw_XTAL_FREQ_Hz * chipcHw_divide(chipcHw_REG_PLL_PREDIVIDER_P1, chipcHw_REG_PLL_PREDIVIDER_P2) * ((pChipcHw->PLLPreDivider2 & chipcHw_REG_PLL_PREDIVIDER_NDIV_MASK) >> chipcHw_REG_PLL_PREDIVIDER_NDIV_SHIFT); switch (clock) { case chipcHw_CLOCK_DDR: /* Configure the DDR_ctrl:BUS ratio settings */ { REG_LOCAL_IRQ_SAVE; /* Dvide DDR_phy by two to obtain DDR_ctrl clock */ pChipcHw->DDRClock = (pChipcHw->DDRClock & ~chipcHw_REG_PLL_CLOCK_TO_BUS_RATIO_MASK) | ((((freq / 2) / chipcHw_getClockFrequency(chipcHw_CLOCK_BUS)) - 1) << chipcHw_REG_PLL_CLOCK_TO_BUS_RATIO_SHIFT); REG_LOCAL_IRQ_RESTORE; } pPLLReg = &pChipcHw->DDRClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_ARM: pPLLReg = &pChipcHw->ARMClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_ESW: pPLLReg = &pChipcHw->ESWClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_VPM: /* Configure the VPM:BUS ratio settings */ { REG_LOCAL_IRQ_SAVE; pChipcHw->VPMClock = (pChipcHw->VPMClock & ~chipcHw_REG_PLL_CLOCK_TO_BUS_RATIO_MASK) | ((chipcHw_divide (freq, chipcHw_getClockFrequency(chipcHw_CLOCK_BUS)) - 1) << chipcHw_REG_PLL_CLOCK_TO_BUS_RATIO_SHIFT); REG_LOCAL_IRQ_RESTORE; } pPLLReg = &pChipcHw->VPMClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_ESW125: pPLLReg = &pChipcHw->ESW125Clock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_UART: pPLLReg = &pChipcHw->UARTClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_SDIO0: pPLLReg = &pChipcHw->SDIO0Clock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_SDIO1: pPLLReg = &pChipcHw->SDIO1Clock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_SPI: pPLLReg = &pChipcHw->SPIClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_ETM: pPLLReg = &pChipcHw->ETMClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; break; case chipcHw_CLOCK_USB: pPLLReg = &pChipcHw->USBClock; vcoHz = vcoFreqPll2Hz; desVcoHz = vcoFreqPll2Hz; break; case chipcHw_CLOCK_LCD: pPLLReg = &pChipcHw->LCDClock; vcoHz = vcoFreqPll2Hz; desVcoHz = vcoFreqPll2Hz; break; case chipcHw_CLOCK_APM: pPLLReg = &pChipcHw->APMClock; vcoHz = vcoFreqPll2Hz; desVcoHz = vcoFreqPll2Hz; break; case chipcHw_CLOCK_BUS: pClockCtrl = &pChipcHw->ACLKClock; pDependentClock = &pChipcHw->ARMClock; vcoHz = vcoFreqPll1Hz; desVcoHz = desVcoFreqPll1Hz; dependentClockType = PLL_CLOCK; break; case chipcHw_CLOCK_OTP: pClockCtrl = &pChipcHw->OTPClock; break; case chipcHw_CLOCK_I2C: pClockCtrl = &pChipcHw->I2CClock; break; case chipcHw_CLOCK_I2S0: pClockCtrl = &pChipcHw->I2S0Clock; break; case chipcHw_CLOCK_RTBUS: pClockCtrl = &pChipcHw->RTBUSClock; pDependentClock = &pChipcHw->ACLKClock; dependentClockType = NON_PLL_CLOCK; break; case chipcHw_CLOCK_APM100: pClockCtrl = &pChipcHw->APM100Clock; pDependentClock = &pChipcHw->APMClock; vcoHz = vcoFreqPll2Hz; desVcoHz = vcoFreqPll2Hz; dependentClockType = PLL_CLOCK; break; case chipcHw_CLOCK_TSC: pClockCtrl = &pChipcHw->TSCClock; break; case chipcHw_CLOCK_LED: pClockCtrl = &pChipcHw->LEDClock; break; case chipcHw_CLOCK_I2S1: pClockCtrl = &pChipcHw->I2S1Clock; break; } if (pPLLReg) { /* Select XTAL as bypass source */ reg32_modify_and(pPLLReg, ~chipcHw_REG_PLL_CLOCK_SOURCE_GPIO); reg32_modify_or(pPLLReg, chipcHw_REG_PLL_CLOCK_BYPASS_SELECT); /* For DDR settings use only the PLL divider clock */ if (pPLLReg == &pChipcHw->DDRClock) { /* Set M1DIV for PLL1, which controls the DDR clock */ reg32_write(&pChipcHw->PLLDivider, (pChipcHw->PLLDivider & 0x00FFFFFF) | ((chipcHw_REG_PLL_DIVIDER_MDIV (desVcoHz, freq)) << 24)); /* Calculate expected frequency */ freq = chipcHw_divide(vcoHz, (((pChipcHw->PLLDivider & 0xFF000000) >> 24) ? ((pChipcHw->PLLDivider & 0xFF000000) >> 24) : 256)); } else { /* From chip revision number B0, LCD clock is internally divided by 2 */ if ((pPLLReg == &pChipcHw->LCDClock) && (chipcHw_getChipRevisionNumber() != chipcHw_REV_NUMBER_A0)) { desVcoHz >>= 1; vcoHz >>= 1; } /* Set MDIV to change the frequency */ reg32_modify_and(pPLLReg, ~(chipcHw_REG_PLL_CLOCK_MDIV_MASK)); reg32_modify_or(pPLLReg, chipcHw_REG_PLL_DIVIDER_MDIV(desVcoHz, freq)); /* Calculate expected frequency */ freq = chipcHw_divide(vcoHz, ((*(pPLLReg) & chipcHw_REG_PLL_CLOCK_MDIV_MASK) ? (*(pPLLReg) & chipcHw_REG_PLL_CLOCK_MDIV_MASK) : 256)); } /* Wait for for atleast 200ns as per the protocol to change frequency */ udelay(1); /* Do not bypass */ reg32_modify_and(pPLLReg, ~chipcHw_REG_PLL_CLOCK_BYPASS_SELECT); /* Return the configured frequency */ return freq; } else if (pClockCtrl) { uint32_t divider = 0; /* Divider clock should not be bypassed */ reg32_modify_and(pClockCtrl, ~chipcHw_REG_DIV_CLOCK_BYPASS_SELECT); /* Identify the clock source */ if (pDependentClock) { switch (dependentClockType) { case PLL_CLOCK: divider = chipcHw_divide(chipcHw_divide (desVcoHz, (*pDependentClock & chipcHw_REG_PLL_CLOCK_MDIV_MASK)), freq); break; case NON_PLL_CLOCK: { uint32_t sourceClock = 0; if (pDependentClock == (uint32_t *) &pChipcHw->ACLKClock) { sourceClock = chipcHw_getClockFrequency (chipcHw_CLOCK_BUS); } else { uint32_t div = *pDependentClock & chipcHw_REG_DIV_CLOCK_DIV_MASK; sourceClock = chipcHw_divide (chipcHw_XTAL_FREQ_Hz, ((div) ? div : 256)); } divider = chipcHw_divide(sourceClock, freq); } break; } } else { divider = chipcHw_divide(chipcHw_XTAL_FREQ_Hz, freq); } if (divider) { REG_LOCAL_IRQ_SAVE; /* Set the divider to obtain the required frequency */ *pClockCtrl = (*pClockCtrl & (~chipcHw_REG_DIV_CLOCK_DIV_MASK)) | (((divider > 256) ? chipcHw_REG_DIV_CLOCK_DIV_256 : divider) & chipcHw_REG_DIV_CLOCK_DIV_MASK); REG_LOCAL_IRQ_RESTORE; return freq; } } return 0; } EXPORT_SYMBOL(chipcHw_setClockFrequency); /****************************************************************************/ /** * @brief Set VPM clock in sync with BUS clock for Chip Rev #A0 * * This function does the phase adjustment between VPM and BUS clock * * @return >= 0 : On success (# of adjustment required) * -1 : On failure * */ /****************************************************************************/ static int vpmPhaseAlignA0(void) { uint32_t phaseControl; uint32_t phaseValue; uint32_t prevPhaseComp; int iter = 0; int adjustCount = 0; int count = 0; for (iter = 0; (iter < MAX_PHASE_ALIGN_ATTEMPTS) && (adjustCount < MAX_PHASE_ADJUST_COUNT); iter++) { phaseControl = (pChipcHw->VPMClock & chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK) >> chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT; phaseValue = 0; prevPhaseComp = 0; /* Step 1: Look for falling PH_COMP transition */ /* Read the contents of VPM Clock resgister */ phaseValue = pChipcHw->VPMClock; do { /* Store previous value of phase comparator */ prevPhaseComp = phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP; /* Change the value of PH_CTRL. */ reg32_write(&pChipcHw->VPMClock, (pChipcHw->VPMClock & (~chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK)) | (phaseControl << chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT)); /* Wait atleast 20 ns */ udelay(1); /* Toggle the LOAD_CH after phase control is written. */ pChipcHw->VPMClock ^= chipcHw_REG_PLL_CLOCK_PHASE_UPDATE_ENABLE; /* Read the contents of VPM Clock resgister. */ phaseValue = pChipcHw->VPMClock; if ((phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP) == 0x0) { phaseControl = (0x3F & (phaseControl - 1)); } else { /* Increment to the Phase count value for next write, if Phase is not stable. */ phaseControl = (0x3F & (phaseControl + 1)); } /* Count number of adjustment made */ adjustCount++; } while (((prevPhaseComp == (phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP)) || /* Look for a transition */ ((phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP) != 0x0)) && /* Look for a falling edge */ (adjustCount < MAX_PHASE_ADJUST_COUNT) /* Do not exceed the limit while trying */ ); if (adjustCount >= MAX_PHASE_ADJUST_COUNT) { /* Failed to align VPM phase after MAX_PHASE_ADJUST_COUNT tries */ return -1; } /* Step 2: Keep moving forward to make sure falling PH_COMP transition was valid */ for (count = 0; (count < 5) && ((phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP) == 0); count++) { phaseControl = (0x3F & (phaseControl + 1)); reg32_write(&pChipcHw->VPMClock, (pChipcHw->VPMClock & (~chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK)) | (phaseControl << chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT)); /* Wait atleast 20 ns */ udelay(1); /* Toggle the LOAD_CH after phase control is written. */ pChipcHw->VPMClock ^= chipcHw_REG_PLL_CLOCK_PHASE_UPDATE_ENABLE; phaseValue = pChipcHw->VPMClock; /* Count number of adjustment made */ adjustCount++; } if (adjustCount >= MAX_PHASE_ADJUST_COUNT) { /* Failed to align VPM phase after MAX_PHASE_ADJUST_COUNT tries */ return -1; } if (count != 5) { /* Detected false transition */ continue; } /* Step 3: Keep moving backward to make sure falling PH_COMP transition was stable */ for (count = 0; (count < 3) && ((phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP) == 0); count++) { phaseControl = (0x3F & (phaseControl - 1)); reg32_write(&pChipcHw->VPMClock, (pChipcHw->VPMClock & (~chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK)) | (phaseControl << chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT)); /* Wait atleast 20 ns */ udelay(1); /* Toggle the LOAD_CH after phase control is written. */ pChipcHw->VPMClock ^= chipcHw_REG_PLL_CLOCK_PHASE_UPDATE_ENABLE; phaseValue = pChipcHw->VPMClock; /* Count number of adjustment made */ adjustCount++; } if (adjustCount >= MAX_PHASE_ADJUST_COUNT) { /* Failed to align VPM phase after MAX_PHASE_ADJUST_COUNT tries */ return -1; } if (count != 3) { /* Detected noisy transition */ continue; } /* Step 4: Keep moving backward before the original transition took place. */ for (count = 0; (count < 5); count++) { phaseControl = (0x3F & (phaseControl - 1)); reg32_write(&pChipcHw->VPMClock, (pChipcHw->VPMClock & (~chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK)) | (phaseControl << chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT)); /* Wait atleast 20 ns */ udelay(1); /* Toggle the LOAD_CH after phase control is written. */ pChipcHw->VPMClock ^= chipcHw_REG_PLL_CLOCK_PHASE_UPDATE_ENABLE; phaseValue = pChipcHw->VPMClock; /* Count number of adjustment made */ adjustCount++; } if (adjustCount >= MAX_PHASE_ADJUST_COUNT) { /* Failed to align VPM phase after MAX_PHASE_ADJUST_COUNT tries */ return -1; } if ((phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP) == 0) { /* Detected false transition */ continue; } /* Step 5: Re discover the valid transition */ do { /* Store previous value of phase comparator */ prevPhaseComp = phaseValue; /* Change the value of PH_CTRL. */ reg32_write(&pChipcHw->VPMClock, (pChipcHw->VPMClock & (~chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK)) | (phaseControl << chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT)); /* Wait atleast 20 ns */ udelay(1); /* Toggle the LOAD_CH after phase control is written. */ pChipcHw->VPMClock ^= chipcHw_REG_PLL_CLOCK_PHASE_UPDATE_ENABLE; /* Read the contents of VPM Clock resgister. */ phaseValue = pChipcHw->VPMClock; if ((phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP) == 0x0) { phaseControl = (0x3F & (phaseControl - 1)); } else { /* Increment to the Phase count value for next write, if Phase is not stable. */ phaseControl = (0x3F & (phaseControl + 1)); } /* Count number of adjustment made */ adjustCount++; } while (((prevPhaseComp == (phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP)) || ((phaseValue & chipcHw_REG_PLL_CLOCK_PHASE_COMP) != 0x0)) && (adjustCount < MAX_PHASE_ADJUST_COUNT)); if (adjustCount >= MAX_PHASE_ADJUST_COUNT) { /* Failed to align VPM phase after MAX_PHASE_ADJUST_COUNT tries */ return -1; } else { /* Valid phase must have detected */ break; } } /* For VPM Phase should be perfectly aligned. */ phaseControl = (((pChipcHw->VPMClock >> chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT) - 1) & 0x3F); { REG_LOCAL_IRQ_SAVE; pChipcHw->VPMClock = (pChipcHw->VPMClock & ~chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK) | (phaseControl << chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT); /* Load new phase value */ pChipcHw->VPMClock ^= chipcHw_REG_PLL_CLOCK_PHASE_UPDATE_ENABLE; REG_LOCAL_IRQ_RESTORE; } /* Return the status */ return (int)adjustCount; } /****************************************************************************/ /** * @brief Set VPM clock in sync with BUS clock * * This function does the phase adjustment between VPM and BUS clock * * @return >= 0 : On success (# of adjustment required) * -1 : On failure * */ /****************************************************************************/ int chipcHw_vpmPhaseAlign(void) { if (chipcHw_getChipRevisionNumber() == chipcHw_REV_NUMBER_A0) { return vpmPhaseAlignA0(); } else { uint32_t phaseControl = chipcHw_getVpmPhaseControl(); uint32_t phaseValue = 0; int adjustCount = 0; /* Disable VPM access */ pChipcHw->Spare1 &= ~chipcHw_REG_SPARE1_VPM_BUS_ACCESS_ENABLE; /* Disable HW VPM phase alignment */ chipcHw_vpmHwPhaseAlignDisable(); /* Enable SW VPM phase alignment */ chipcHw_vpmSwPhaseAlignEnable(); /* Adjust VPM phase */ while (adjustCount < MAX_PHASE_ADJUST_COUNT) { phaseValue = chipcHw_getVpmHwPhaseAlignStatus(); /* Adjust phase control value */ if (phaseValue > 0xF) { /* Increment phase control value */ phaseControl++; } else if (phaseValue < 0xF) { /* Decrement phase control value */ phaseControl--; } else { /* Enable VPM access */ pChipcHw->Spare1 |= chipcHw_REG_SPARE1_VPM_BUS_ACCESS_ENABLE; /* Return adjust count */ return adjustCount; } /* Change the value of PH_CTRL. */ reg32_write(&pChipcHw->VPMClock, (pChipcHw->VPMClock & (~chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_MASK)) | (phaseControl << chipcHw_REG_PLL_CLOCK_PHASE_CONTROL_SHIFT)); /* Wait atleast 20 ns */ udelay(1); /* Toggle the LOAD_CH after phase control is written. */ pChipcHw->VPMClock ^= chipcHw_REG_PLL_CLOCK_PHASE_UPDATE_ENABLE; /* Count adjustment */ adjustCount++; } } /* Disable VPM access */ pChipcHw->Spare1 &= ~chipcHw_REG_SPARE1_VPM_BUS_ACCESS_ENABLE; return -1; } /****************************************************************************/ /** * @brief Local Divide function * * This function does the divide * * @return divide value * */ /****************************************************************************/ static int chipcHw_divide(int num, int denom) { int r; int t = 1; /* Shift denom and t up to the largest value to optimize algorithm */ /* t contains the units of each divide */ while ((denom & 0x40000000) == 0) { /* fails if denom=0 */ denom = denom << 1; t = t << 1; } /* Initialize the result */ r = 0; do { /* Determine if there exists a positive remainder */ if ((num - denom) >= 0) { /* Accumlate t to the result and calculate a new remainder */ num = num - denom; r = r + t; } /* Continue to shift denom and shift t down to 0 */ denom = denom >> 1; t = t >> 1; } while (t != 0); return r; }
gpl-2.0
darkobas/android_kernel_oneplus_msm8974
arch/ia64/kernel/module.c
8304
27595
/* * IA-64-specific support for kernel module loader. * * Copyright (C) 2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * * Loosely based on patch by Rusty Russell. */ /* relocs tested so far: DIR64LSB FPTR64LSB GPREL22 LDXMOV LDXMOV LTOFF22 LTOFF22X LTOFF22X LTOFF_FPTR22 PCREL21B (for br.call only; br.cond is not supported out of modules!) PCREL60B (for brl.cond only; brl.call is not supported for modules!) PCREL64LSB SECREL32LSB SEGREL64LSB */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/elf.h> #include <linux/moduleloader.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <asm/patch.h> #include <asm/unaligned.h> #define ARCH_MODULE_DEBUG 0 #if ARCH_MODULE_DEBUG # define DEBUGP printk # define inline #else # define DEBUGP(fmt , a...) #endif #ifdef CONFIG_ITANIUM # define USE_BRL 0 #else # define USE_BRL 1 #endif #define MAX_LTOFF ((uint64_t) (1 << 22)) /* max. allowable linkage-table offset */ /* Define some relocation helper macros/types: */ #define FORMAT_SHIFT 0 #define FORMAT_BITS 3 #define FORMAT_MASK ((1 << FORMAT_BITS) - 1) #define VALUE_SHIFT 3 #define VALUE_BITS 5 #define VALUE_MASK ((1 << VALUE_BITS) - 1) enum reloc_target_format { /* direct encoded formats: */ RF_NONE = 0, RF_INSN14 = 1, RF_INSN22 = 2, RF_INSN64 = 3, RF_32MSB = 4, RF_32LSB = 5, RF_64MSB = 6, RF_64LSB = 7, /* formats that cannot be directly decoded: */ RF_INSN60, RF_INSN21B, /* imm21 form 1 */ RF_INSN21M, /* imm21 form 2 */ RF_INSN21F /* imm21 form 3 */ }; enum reloc_value_formula { RV_DIRECT = 4, /* S + A */ RV_GPREL = 5, /* @gprel(S + A) */ RV_LTREL = 6, /* @ltoff(S + A) */ RV_PLTREL = 7, /* @pltoff(S + A) */ RV_FPTR = 8, /* @fptr(S + A) */ RV_PCREL = 9, /* S + A - P */ RV_LTREL_FPTR = 10, /* @ltoff(@fptr(S + A)) */ RV_SEGREL = 11, /* @segrel(S + A) */ RV_SECREL = 12, /* @secrel(S + A) */ RV_BDREL = 13, /* BD + A */ RV_LTV = 14, /* S + A (like RV_DIRECT, except frozen at static link-time) */ RV_PCREL2 = 15, /* S + A - P */ RV_SPECIAL = 16, /* various (see below) */ RV_RSVD17 = 17, RV_TPREL = 18, /* @tprel(S + A) */ RV_LTREL_TPREL = 19, /* @ltoff(@tprel(S + A)) */ RV_DTPMOD = 20, /* @dtpmod(S + A) */ RV_LTREL_DTPMOD = 21, /* @ltoff(@dtpmod(S + A)) */ RV_DTPREL = 22, /* @dtprel(S + A) */ RV_LTREL_DTPREL = 23, /* @ltoff(@dtprel(S + A)) */ RV_RSVD24 = 24, RV_RSVD25 = 25, RV_RSVD26 = 26, RV_RSVD27 = 27 /* 28-31 reserved for implementation-specific purposes. */ }; #define N(reloc) [R_IA64_##reloc] = #reloc static const char *reloc_name[256] = { N(NONE), N(IMM14), N(IMM22), N(IMM64), N(DIR32MSB), N(DIR32LSB), N(DIR64MSB), N(DIR64LSB), N(GPREL22), N(GPREL64I), N(GPREL32MSB), N(GPREL32LSB), N(GPREL64MSB), N(GPREL64LSB), N(LTOFF22), N(LTOFF64I), N(PLTOFF22), N(PLTOFF64I), N(PLTOFF64MSB), N(PLTOFF64LSB), N(FPTR64I), N(FPTR32MSB), N(FPTR32LSB), N(FPTR64MSB), N(FPTR64LSB), N(PCREL60B), N(PCREL21B), N(PCREL21M), N(PCREL21F), N(PCREL32MSB), N(PCREL32LSB), N(PCREL64MSB), N(PCREL64LSB), N(LTOFF_FPTR22), N(LTOFF_FPTR64I), N(LTOFF_FPTR32MSB), N(LTOFF_FPTR32LSB), N(LTOFF_FPTR64MSB), N(LTOFF_FPTR64LSB), N(SEGREL32MSB), N(SEGREL32LSB), N(SEGREL64MSB), N(SEGREL64LSB), N(SECREL32MSB), N(SECREL32LSB), N(SECREL64MSB), N(SECREL64LSB), N(REL32MSB), N(REL32LSB), N(REL64MSB), N(REL64LSB), N(LTV32MSB), N(LTV32LSB), N(LTV64MSB), N(LTV64LSB), N(PCREL21BI), N(PCREL22), N(PCREL64I), N(IPLTMSB), N(IPLTLSB), N(COPY), N(LTOFF22X), N(LDXMOV), N(TPREL14), N(TPREL22), N(TPREL64I), N(TPREL64MSB), N(TPREL64LSB), N(LTOFF_TPREL22), N(DTPMOD64MSB), N(DTPMOD64LSB), N(LTOFF_DTPMOD22), N(DTPREL14), N(DTPREL22), N(DTPREL64I), N(DTPREL32MSB), N(DTPREL32LSB), N(DTPREL64MSB), N(DTPREL64LSB), N(LTOFF_DTPREL22) }; #undef N /* Opaque struct for insns, to protect against derefs. */ struct insn; static inline uint64_t bundle (const struct insn *insn) { return (uint64_t) insn & ~0xfUL; } static inline int slot (const struct insn *insn) { return (uint64_t) insn & 0x3; } static int apply_imm64 (struct module *mod, struct insn *insn, uint64_t val) { if (slot(insn) != 2) { printk(KERN_ERR "%s: invalid slot number %d for IMM64\n", mod->name, slot(insn)); return 0; } ia64_patch_imm64((u64) insn, val); return 1; } static int apply_imm60 (struct module *mod, struct insn *insn, uint64_t val) { if (slot(insn) != 2) { printk(KERN_ERR "%s: invalid slot number %d for IMM60\n", mod->name, slot(insn)); return 0; } if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) { printk(KERN_ERR "%s: value %ld out of IMM60 range\n", mod->name, (long) val); return 0; } ia64_patch_imm60((u64) insn, val); return 1; } static int apply_imm22 (struct module *mod, struct insn *insn, uint64_t val) { if (val + (1 << 21) >= (1 << 22)) { printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (long)val); return 0; } ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */ | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */ | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */ | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */)); return 1; } static int apply_imm21b (struct module *mod, struct insn *insn, uint64_t val) { if (val + (1 << 20) >= (1 << 21)) { printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (long)val); return 0; } ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */ | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */)); return 1; } #if USE_BRL struct plt_entry { /* Three instruction bundles in PLT. */ unsigned char bundle[2][16]; }; static const struct plt_entry ia64_plt_template = { { { 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */ 0x00, 0x00, 0x00, 0x60 }, { 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.many gp=TARGET_GP */ 0x08, 0x00, 0x00, 0xc0 } } }; static int patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp) { if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp) && apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2), (target_ip - (int64_t) plt->bundle[1]) / 16)) return 1; return 0; } unsigned long plt_target (struct plt_entry *plt) { uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1]; long off; b0 = b[0]; b1 = b[1]; off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */ | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */ | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */ return (long) plt->bundle[1] + 16*off; } #else /* !USE_BRL */ struct plt_entry { /* Three instruction bundles in PLT. */ unsigned char bundle[3][16]; }; static const struct plt_entry ia64_plt_template = { { { 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* movl r16=TARGET_IP */ 0x02, 0x00, 0x00, 0x60 }, { 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */ 0x00, 0x00, 0x00, 0x60 }, { 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */ 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */ 0x60, 0x00, 0x80, 0x00 /* br.few b6 */ } } }; static int patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp) { if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip) && apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp)) return 1; return 0; } unsigned long plt_target (struct plt_entry *plt) { uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0]; b0 = b[0]; b1 = b[1]; return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */ | ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */ | ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */ | ((b1 & 0x0000100000000000) >> 23) /* ic -> bit 21 */ | ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40) /* imm41 -> bit 22 */ | ((b1 & 0x0800000000000000) << 4)); /* i -> bit 63 */ } #endif /* !USE_BRL */ void module_free (struct module *mod, void *module_region) { if (mod && mod->arch.init_unw_table && module_region == mod->module_init) { unw_remove_unwind_table(mod->arch.init_unw_table); mod->arch.init_unw_table = NULL; } vfree(module_region); } /* Have we already seen one of these relocations? */ /* FIXME: we could look in other sections, too --RR */ static int duplicate_reloc (const Elf64_Rela *rela, unsigned int num) { unsigned int i; for (i = 0; i < num; i++) { if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend) return 1; } return 0; } /* Count how many GOT entries we may need */ static unsigned int count_gots (const Elf64_Rela *rela, unsigned int num) { unsigned int i, ret = 0; /* Sure, this is order(n^2), but it's usually short, and not time critical */ for (i = 0; i < num; i++) { switch (ELF64_R_TYPE(rela[i].r_info)) { case R_IA64_LTOFF22: case R_IA64_LTOFF22X: case R_IA64_LTOFF64I: case R_IA64_LTOFF_FPTR22: case R_IA64_LTOFF_FPTR64I: case R_IA64_LTOFF_FPTR32MSB: case R_IA64_LTOFF_FPTR32LSB: case R_IA64_LTOFF_FPTR64MSB: case R_IA64_LTOFF_FPTR64LSB: if (!duplicate_reloc(rela, i)) ret++; break; } } return ret; } /* Count how many PLT entries we may need */ static unsigned int count_plts (const Elf64_Rela *rela, unsigned int num) { unsigned int i, ret = 0; /* Sure, this is order(n^2), but it's usually short, and not time critical */ for (i = 0; i < num; i++) { switch (ELF64_R_TYPE(rela[i].r_info)) { case R_IA64_PCREL21B: case R_IA64_PLTOFF22: case R_IA64_PLTOFF64I: case R_IA64_PLTOFF64MSB: case R_IA64_PLTOFF64LSB: case R_IA64_IPLTMSB: case R_IA64_IPLTLSB: if (!duplicate_reloc(rela, i)) ret++; break; } } return ret; } /* We need to create an function-descriptors for any internal function which is referenced. */ static unsigned int count_fdescs (const Elf64_Rela *rela, unsigned int num) { unsigned int i, ret = 0; /* Sure, this is order(n^2), but it's usually short, and not time critical. */ for (i = 0; i < num; i++) { switch (ELF64_R_TYPE(rela[i].r_info)) { case R_IA64_FPTR64I: case R_IA64_FPTR32LSB: case R_IA64_FPTR32MSB: case R_IA64_FPTR64LSB: case R_IA64_FPTR64MSB: case R_IA64_LTOFF_FPTR22: case R_IA64_LTOFF_FPTR32LSB: case R_IA64_LTOFF_FPTR32MSB: case R_IA64_LTOFF_FPTR64I: case R_IA64_LTOFF_FPTR64LSB: case R_IA64_LTOFF_FPTR64MSB: case R_IA64_IPLTMSB: case R_IA64_IPLTLSB: /* * Jumps to static functions sometimes go straight to their * offset. Of course, that may not be possible if the jump is * from init -> core or vice. versa, so we need to generate an * FDESC (and PLT etc) for that. */ case R_IA64_PCREL21B: if (!duplicate_reloc(rela, i)) ret++; break; } } return ret; } int module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0; Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum; /* * To store the PLTs and function-descriptors, we expand the .text section for * core module-code and the .init.text section for initialization code. */ for (s = sechdrs; s < sechdrs_end; ++s) if (strcmp(".core.plt", secstrings + s->sh_name) == 0) mod->arch.core_plt = s; else if (strcmp(".init.plt", secstrings + s->sh_name) == 0) mod->arch.init_plt = s; else if (strcmp(".got", secstrings + s->sh_name) == 0) mod->arch.got = s; else if (strcmp(".opd", secstrings + s->sh_name) == 0) mod->arch.opd = s; else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0) mod->arch.unwind = s; #ifdef CONFIG_PARAVIRT else if (strcmp(".paravirt_bundles", secstrings + s->sh_name) == 0) mod->arch.paravirt_bundles = s; else if (strcmp(".paravirt_insts", secstrings + s->sh_name) == 0) mod->arch.paravirt_insts = s; #endif if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { printk(KERN_ERR "%s: sections missing\n", mod->name); return -ENOEXEC; } /* GOT and PLTs can occur in any relocated section... */ for (s = sechdrs + 1; s < sechdrs_end; ++s) { const Elf64_Rela *rels = (void *)ehdr + s->sh_offset; unsigned long numrels = s->sh_size/sizeof(Elf64_Rela); if (s->sh_type != SHT_RELA) continue; gots += count_gots(rels, numrels); fdescs += count_fdescs(rels, numrels); if (strstr(secstrings + s->sh_name, ".init")) init_plts += count_plts(rels, numrels); else core_plts += count_plts(rels, numrels); } mod->arch.core_plt->sh_type = SHT_NOBITS; mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; mod->arch.core_plt->sh_addralign = 16; mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry); mod->arch.init_plt->sh_type = SHT_NOBITS; mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; mod->arch.init_plt->sh_addralign = 16; mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry); mod->arch.got->sh_type = SHT_NOBITS; mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC; mod->arch.got->sh_addralign = 8; mod->arch.got->sh_size = gots * sizeof(struct got_entry); mod->arch.opd->sh_type = SHT_NOBITS; mod->arch.opd->sh_flags = SHF_ALLOC; mod->arch.opd->sh_addralign = 8; mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc); DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n", __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size, mod->arch.got->sh_size, mod->arch.opd->sh_size); return 0; } static inline int in_init (const struct module *mod, uint64_t addr) { return addr - (uint64_t) mod->module_init < mod->init_size; } static inline int in_core (const struct module *mod, uint64_t addr) { return addr - (uint64_t) mod->module_core < mod->core_size; } static inline int is_internal (const struct module *mod, uint64_t value) { return in_init(mod, value) || in_core(mod, value); } /* * Get gp-relative offset for the linkage-table entry of VALUE. */ static uint64_t get_ltoff (struct module *mod, uint64_t value, int *okp) { struct got_entry *got, *e; if (!*okp) return 0; got = (void *) mod->arch.got->sh_addr; for (e = got; e < got + mod->arch.next_got_entry; ++e) if (e->val == value) goto found; /* Not enough GOT entries? */ BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size)); e->val = value; ++mod->arch.next_got_entry; found: return (uint64_t) e - mod->arch.gp; } static inline int gp_addressable (struct module *mod, uint64_t value) { return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF; } /* Get PC-relative PLT entry for this value. Returns 0 on failure. */ static uint64_t get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp) { struct plt_entry *plt, *plt_end; uint64_t target_ip, target_gp; if (!*okp) return 0; if (in_init(mod, (uint64_t) insn)) { plt = (void *) mod->arch.init_plt->sh_addr; plt_end = (void *) plt + mod->arch.init_plt->sh_size; } else { plt = (void *) mod->arch.core_plt->sh_addr; plt_end = (void *) plt + mod->arch.core_plt->sh_size; } /* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */ target_ip = ((uint64_t *) value)[0]; target_gp = ((uint64_t *) value)[1]; /* Look for existing PLT entry. */ while (plt->bundle[0][0]) { if (plt_target(plt) == target_ip) goto found; if (++plt >= plt_end) BUG(); } *plt = ia64_plt_template; if (!patch_plt(mod, plt, target_ip, target_gp)) { *okp = 0; return 0; } #if ARCH_MODULE_DEBUG if (plt_target(plt) != target_ip) { printk("%s: mistargeted PLT: wanted %lx, got %lx\n", __func__, target_ip, plt_target(plt)); *okp = 0; return 0; } #endif found: return (uint64_t) plt; } /* Get function descriptor for VALUE. */ static uint64_t get_fdesc (struct module *mod, uint64_t value, int *okp) { struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr; if (!*okp) return 0; if (!value) { printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name); return 0; } if (!is_internal(mod, value)) /* * If it's not a module-local entry-point, "value" already points to a * function-descriptor. */ return value; /* Look for existing function descriptor. */ while (fdesc->ip) { if (fdesc->ip == value) return (uint64_t)fdesc; if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size) BUG(); } /* Create new one */ fdesc->ip = value; fdesc->gp = mod->arch.gp; return (uint64_t) fdesc; } static inline int do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, Elf64_Shdr *sec, void *location) { enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK; enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK; uint64_t val; int ok = 1; val = sym->st_value + addend; switch (formula) { case RV_SEGREL: /* segment base is arbitrarily chosen to be 0 for kernel modules */ case RV_DIRECT: break; case RV_GPREL: val -= mod->arch.gp; break; case RV_LTREL: val = get_ltoff(mod, val, &ok); break; case RV_PLTREL: val = get_plt(mod, location, val, &ok); break; case RV_FPTR: val = get_fdesc(mod, val, &ok); break; case RV_SECREL: val -= sec->sh_addr; break; case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break; case RV_PCREL: switch (r_type) { case R_IA64_PCREL21B: if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) || (in_core(mod, val) && in_init(mod, (uint64_t)location))) { /* * Init section may have been allocated far away from core, * if the branch won't reach, then allocate a plt for it. */ uint64_t delta = ((int64_t)val - (int64_t)location) / 16; if (delta + (1 << 20) >= (1 << 21)) { val = get_fdesc(mod, val, &ok); val = get_plt(mod, location, val, &ok); } } else if (!is_internal(mod, val)) val = get_plt(mod, location, val, &ok); /* FALL THROUGH */ default: val -= bundle(location); break; case R_IA64_PCREL32MSB: case R_IA64_PCREL32LSB: case R_IA64_PCREL64MSB: case R_IA64_PCREL64LSB: val -= (uint64_t) location; break; } switch (r_type) { case R_IA64_PCREL60B: format = RF_INSN60; break; case R_IA64_PCREL21B: format = RF_INSN21B; break; case R_IA64_PCREL21M: format = RF_INSN21M; break; case R_IA64_PCREL21F: format = RF_INSN21F; break; default: break; } break; case RV_BDREL: val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); break; case RV_LTV: /* can link-time value relocs happen here? */ BUG(); break; case RV_PCREL2: if (r_type == R_IA64_PCREL21BI) { if (!is_internal(mod, val)) { printk(KERN_ERR "%s: %s reloc against " "non-local symbol (%lx)\n", __func__, reloc_name[r_type], (unsigned long)val); return -ENOEXEC; } format = RF_INSN21B; } val -= bundle(location); break; case RV_SPECIAL: switch (r_type) { case R_IA64_IPLTMSB: case R_IA64_IPLTLSB: val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok); format = RF_64LSB; if (r_type == R_IA64_IPLTMSB) format = RF_64MSB; break; case R_IA64_SUB: val = addend - sym->st_value; format = RF_INSN64; break; case R_IA64_LTOFF22X: if (gp_addressable(mod, val)) val -= mod->arch.gp; else val = get_ltoff(mod, val, &ok); format = RF_INSN22; break; case R_IA64_LDXMOV: if (gp_addressable(mod, val)) { /* turn "ld8" into "mov": */ DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location); ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL); } return 0; default: if (reloc_name[r_type]) printk(KERN_ERR "%s: special reloc %s not supported", mod->name, reloc_name[r_type]); else printk(KERN_ERR "%s: unknown special reloc %x\n", mod->name, r_type); return -ENOEXEC; } break; case RV_TPREL: case RV_LTREL_TPREL: case RV_DTPMOD: case RV_LTREL_DTPMOD: case RV_DTPREL: case RV_LTREL_DTPREL: printk(KERN_ERR "%s: %s reloc not supported\n", mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?"); return -ENOEXEC; default: printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type); return -ENOEXEC; } if (!ok) return -ENOEXEC; DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val, reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend); switch (format) { case RF_INSN21B: ok = apply_imm21b(mod, location, (int64_t) val / 16); break; case RF_INSN22: ok = apply_imm22(mod, location, val); break; case RF_INSN64: ok = apply_imm64(mod, location, val); break; case RF_INSN60: ok = apply_imm60(mod, location, (int64_t) val / 16); break; case RF_32LSB: put_unaligned(val, (uint32_t *) location); break; case RF_64LSB: put_unaligned(val, (uint64_t *) location); break; case RF_32MSB: /* ia64 Linux is little-endian... */ case RF_64MSB: /* ia64 Linux is little-endian... */ case RF_INSN14: /* must be within-module, i.e., resolved by "ld -r" */ case RF_INSN21M: /* must be within-module, i.e., resolved by "ld -r" */ case RF_INSN21F: /* must be within-module, i.e., resolved by "ld -r" */ printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n", mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?"); return -ENOEXEC; default: printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n", mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format); return -ENOEXEC; } return ok ? 0 : -ENOEXEC; } int apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *mod) { unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela); Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr; Elf64_Shdr *target_sec; int ret; DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__, relsec, n, sechdrs[relsec].sh_info); target_sec = sechdrs + sechdrs[relsec].sh_info; if (target_sec->sh_entsize == ~0UL) /* * If target section wasn't allocated, we don't need to relocate it. * Happens, e.g., for debug sections. */ return 0; if (!mod->arch.gp) { /* * XXX Should have an arch-hook for running this after final section * addresses have been selected... */ uint64_t gp; if (mod->core_size > MAX_LTOFF) /* * This takes advantage of fact that SHF_ARCH_SMALL gets allocated * at the end of the module. */ gp = mod->core_size - MAX_LTOFF / 2; else gp = mod->core_size / 2; gp = (uint64_t) mod->module_core + ((gp + 7) & -8); mod->arch.gp = gp; DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); } for (i = 0; i < n; i++) { ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info), ((Elf64_Sym *) sechdrs[symindex].sh_addr + ELF64_R_SYM(rela[i].r_info)), rela[i].r_addend, target_sec, (void *) target_sec->sh_addr + rela[i].r_offset); if (ret < 0) return ret; } return 0; } /* * Modules contain a single unwind table which covers both the core and the init text * sections but since the two are not contiguous, we need to split this table up such that * we can register (and unregister) each "segment" separately. Fortunately, this sounds * more complicated than it really is. */ static void register_unwind_table (struct module *mod) { struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr; struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start); struct unw_table_entry tmp, *e1, *e2, *core, *init; unsigned long num_init = 0, num_core = 0; /* First, count how many init and core unwind-table entries there are. */ for (e1 = start; e1 < end; ++e1) if (in_init(mod, e1->start_offset)) ++num_init; else ++num_core; /* * Second, sort the table such that all unwind-table entries for the init and core * text sections are nicely separated. We do this with a stupid bubble sort * (unwind tables don't get ridiculously huge). */ for (e1 = start; e1 < end; ++e1) { for (e2 = e1 + 1; e2 < end; ++e2) { if (e2->start_offset < e1->start_offset) { tmp = *e1; *e1 = *e2; *e2 = tmp; } } } /* * Third, locate the init and core segments in the unwind table: */ if (in_init(mod, start->start_offset)) { init = start; core = start + num_init; } else { core = start; init = start + num_core; } DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__, mod->name, mod->arch.gp, num_init, num_core); /* * Fourth, register both tables (if not empty). */ if (num_core > 0) { mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, core, core + num_core); DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__, mod->arch.core_unw_table, core, core + num_core); } if (num_init > 0) { mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, init, init + num_init); DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__, mod->arch.init_unw_table, init, init + num_init); } } int module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { DEBUGP("%s: init: entry=%p\n", __func__, mod->init); if (mod->arch.unwind) register_unwind_table(mod); #ifdef CONFIG_PARAVIRT if (mod->arch.paravirt_bundles) { struct paravirt_patch_site_bundle *start = (struct paravirt_patch_site_bundle *) mod->arch.paravirt_bundles->sh_addr; struct paravirt_patch_site_bundle *end = (struct paravirt_patch_site_bundle *) (mod->arch.paravirt_bundles->sh_addr + mod->arch.paravirt_bundles->sh_size); paravirt_patch_apply_bundle(start, end); } if (mod->arch.paravirt_insts) { struct paravirt_patch_site_inst *start = (struct paravirt_patch_site_inst *) mod->arch.paravirt_insts->sh_addr; struct paravirt_patch_site_inst *end = (struct paravirt_patch_site_inst *) (mod->arch.paravirt_insts->sh_addr + mod->arch.paravirt_insts->sh_size); paravirt_patch_apply_inst(start, end); } #endif return 0; } void module_arch_cleanup (struct module *mod) { if (mod->arch.init_unw_table) unw_remove_unwind_table(mod->arch.init_unw_table); if (mod->arch.core_unw_table) unw_remove_unwind_table(mod->arch.core_unw_table); }
gpl-2.0
maniacx/android_kernel_htcleo-3.0_older
arch/x86/xen/suspend.c
10352
1740
#include <linux/types.h> #include <linux/clockchips.h> #include <xen/interface/xen.h> #include <xen/grant_table.h> #include <xen/events.h> #include <asm/xen/hypercall.h> #include <asm/xen/page.h> #include <asm/fixmap.h> #include "xen-ops.h" #include "mmu.h" void xen_arch_pre_suspend(void) { xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn); xen_start_info->console.domU.mfn = mfn_to_pfn(xen_start_info->console.domU.mfn); BUG_ON(!irqs_disabled()); HYPERVISOR_shared_info = &xen_dummy_shared_info; if (HYPERVISOR_update_va_mapping(fix_to_virt(FIX_PARAVIRT_BOOTMAP), __pte_ma(0), 0)) BUG(); } void xen_arch_hvm_post_suspend(int suspend_cancelled) { #ifdef CONFIG_XEN_PVHVM int cpu; xen_hvm_init_shared_info(); xen_callback_vector(); xen_unplug_emulated_devices(); if (xen_feature(XENFEAT_hvm_safe_pvclock)) { for_each_online_cpu(cpu) { xen_setup_runstate_info(cpu); } } #endif } void xen_arch_post_suspend(int suspend_cancelled) { xen_build_mfn_list_list(); xen_setup_shared_info(); if (suspend_cancelled) { xen_start_info->store_mfn = pfn_to_mfn(xen_start_info->store_mfn); xen_start_info->console.domU.mfn = pfn_to_mfn(xen_start_info->console.domU.mfn); } else { #ifdef CONFIG_SMP BUG_ON(xen_cpu_initialized_map == NULL); cpumask_copy(xen_cpu_initialized_map, cpu_online_mask); #endif xen_vcpu_restore(); } } static void xen_vcpu_notify_restore(void *data) { unsigned long reason = (unsigned long)data; /* Boot processor notified via generic timekeeping_resume() */ if ( smp_processor_id() == 0) return; clockevents_notify(reason, NULL); } void xen_arch_resume(void) { on_each_cpu(xen_vcpu_notify_restore, (void *)CLOCK_EVT_NOTIFY_RESUME, 1); }
gpl-2.0
skelton/amlogic_common_3050
arch/x86/math-emu/fpu_aux.c
12656
4394
/*---------------------------------------------------------------------------+ | fpu_aux.c | | | | Code to implement some of the FPU auxiliary instructions. | | | | Copyright (C) 1992,1993,1994,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ #include "fpu_system.h" #include "exception.h" #include "fpu_emu.h" #include "status_w.h" #include "control_w.h" static void fnop(void) { } static void fclex(void) { partial_status &= ~(SW_Backward | SW_Summary | SW_Stack_Fault | SW_Precision | SW_Underflow | SW_Overflow | SW_Zero_Div | SW_Denorm_Op | SW_Invalid); no_ip_update = 1; } /* Needs to be externally visible */ void finit_soft_fpu(struct i387_soft_struct *soft) { struct address *oaddr, *iaddr; memset(soft, 0, sizeof(*soft)); soft->cwd = 0x037f; soft->swd = 0; soft->ftop = 0; /* We don't keep top in the status word internally. */ soft->twd = 0xffff; /* The behaviour is different from that detailed in Section 15.1.6 of the Intel manual */ oaddr = (struct address *)&soft->foo; oaddr->offset = 0; oaddr->selector = 0; iaddr = (struct address *)&soft->fip; iaddr->offset = 0; iaddr->selector = 0; iaddr->opcode = 0; soft->no_update = 1; } void finit(void) { finit_soft_fpu(&current->thread.fpu.state->soft); } /* * These are nops on the i387.. */ #define feni fnop #define fdisi fnop #define fsetpm fnop static FUNC const finit_table[] = { feni, fdisi, fclex, finit, fsetpm, FPU_illegal, FPU_illegal, FPU_illegal }; void finit_(void) { (finit_table[FPU_rm]) (); } static void fstsw_ax(void) { *(short *)&FPU_EAX = status_word(); no_ip_update = 1; } static FUNC const fstsw_table[] = { fstsw_ax, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal }; void fstsw_(void) { (fstsw_table[FPU_rm]) (); } static FUNC const fp_nop_table[] = { fnop, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal, FPU_illegal }; void fp_nop(void) { (fp_nop_table[FPU_rm]) (); } void fld_i_(void) { FPU_REG *st_new_ptr; int i; u_char tag; if (STACK_OVERFLOW) { FPU_stack_overflow(); return; } /* fld st(i) */ i = FPU_rm; if (NOT_EMPTY(i)) { reg_copy(&st(i), st_new_ptr); tag = FPU_gettagi(i); push(); FPU_settag0(tag); } else { if (control_word & CW_Invalid) { /* The masked response */ FPU_stack_underflow(); } else EXCEPTION(EX_StackUnder); } } void fxch_i(void) { /* fxch st(i) */ FPU_REG t; int i = FPU_rm; FPU_REG *st0_ptr = &st(0), *sti_ptr = &st(i); long tag_word = fpu_tag_word; int regnr = top & 7, regnri = ((regnr + i) & 7); u_char st0_tag = (tag_word >> (regnr * 2)) & 3; u_char sti_tag = (tag_word >> (regnri * 2)) & 3; if (st0_tag == TAG_Empty) { if (sti_tag == TAG_Empty) { FPU_stack_underflow(); FPU_stack_underflow_i(i); return; } if (control_word & CW_Invalid) { /* Masked response */ FPU_copy_to_reg0(sti_ptr, sti_tag); } FPU_stack_underflow_i(i); return; } if (sti_tag == TAG_Empty) { if (control_word & CW_Invalid) { /* Masked response */ FPU_copy_to_regi(st0_ptr, st0_tag, i); } FPU_stack_underflow(); return; } clear_C1(); reg_copy(st0_ptr, &t); reg_copy(sti_ptr, st0_ptr); reg_copy(&t, sti_ptr); tag_word &= ~(3 << (regnr * 2)) & ~(3 << (regnri * 2)); tag_word |= (sti_tag << (regnr * 2)) | (st0_tag << (regnri * 2)); fpu_tag_word = tag_word; } void ffree_(void) { /* ffree st(i) */ FPU_settagi(FPU_rm, TAG_Empty); } void ffreep(void) { /* ffree st(i) + pop - unofficial code */ FPU_settagi(FPU_rm, TAG_Empty); FPU_pop(); } void fst_i_(void) { /* fst st(i) */ FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm); } void fstp_i(void) { /* fstp st(i) */ FPU_copy_to_regi(&st(0), FPU_gettag0(), FPU_rm); FPU_pop(); }
gpl-2.0
CyanogenMod/android_kernel_asus_moorefield
scripts/kconfig/images.c
16496
6565
/* * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org> * Released under the terms of the GNU GPL v2.0. */ static const char *xpm_load[] = { "22 22 5 1", ". c None", "# c #000000", "c c #838100", "a c #ffff00", "b c #ffffff", "......................", "......................", "......................", "............####....#.", "...........#....##.##.", "..................###.", ".................####.", ".####...........#####.", "#abab##########.......", "#babababababab#.......", "#ababababababa#.......", "#babababababab#.......", "#ababab###############", "#babab##cccccccccccc##", "#abab##cccccccccccc##.", "#bab##cccccccccccc##..", "#ab##cccccccccccc##...", "#b##cccccccccccc##....", "###cccccccccccc##.....", "##cccccccccccc##......", "###############.......", "......................"}; static const char *xpm_save[] = { "22 22 5 1", ". c None", "# c #000000", "a c #838100", "b c #c5c2c5", "c c #cdb6d5", "......................", ".####################.", ".#aa#bbbbbbbbbbbb#bb#.", ".#aa#bbbbbbbbbbbb#bb#.", ".#aa#bbbbbbbbbcbb####.", ".#aa#bbbccbbbbbbb#aa#.", ".#aa#bbbccbbbbbbb#aa#.", ".#aa#bbbbbbbbbbbb#aa#.", ".#aa#bbbbbbbbbbbb#aa#.", ".#aa#bbbbbbbbbbbb#aa#.", ".#aa#bbbbbbbbbbbb#aa#.", ".#aaa############aaa#.", ".#aaaaaaaaaaaaaaaaaa#.", ".#aaaaaaaaaaaaaaaaaa#.", ".#aaa#############aa#.", ".#aaa#########bbb#aa#.", ".#aaa#########bbb#aa#.", ".#aaa#########bbb#aa#.", ".#aaa#########bbb#aa#.", ".#aaa#########bbb#aa#.", "..##################..", "......................"}; static const char *xpm_back[] = { "22 22 3 1", ". c None", "# c #000083", "a c #838183", "......................", "......................", "......................", "......................", "......................", "...........######a....", "..#......##########...", "..##...####......##a..", "..###.###.........##..", "..######..........##..", "..#####...........##..", "..######..........##..", "..#######.........##..", "..########.......##a..", "...............a###...", "...............###....", "......................", "......................", "......................", "......................", "......................", "......................"}; static const char *xpm_tree_view[] = { "22 22 2 1", ". c None", "# c #000000", "......................", "......................", "......#...............", "......#...............", "......#...............", "......#...............", "......#...............", "......########........", "......#...............", "......#...............", "......#...............", "......#...............", "......#...............", "......########........", "......#...............", "......#...............", "......#...............", "......#...............", "......#...............", "......########........", "......................", "......................"}; static const char *xpm_single_view[] = { "22 22 2 1", ". c None", "# c #000000", "......................", "......................", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "..........#...........", "......................", "......................"}; static const char *xpm_split_view[] = { "22 22 2 1", ". c None", "# c #000000", "......................", "......................", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......#......#........", "......................", "......................"}; static const char *xpm_symbol_no[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . . ", " . . ", " . . ", " . . ", " . . ", " . . ", " . . ", " .......... ", " "}; static const char *xpm_symbol_mod[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . . ", " . .. . ", " . .... . ", " . .... . ", " . .. . ", " . . ", " . . ", " .......... ", " "}; static const char *xpm_symbol_yes[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . . ", " . . . ", " . .. . ", " . . .. . ", " . .... . ", " . .. . ", " . . ", " .......... ", " "}; static const char *xpm_choice_no[] = { "12 12 2 1", " c white", ". c black", " ", " .... ", " .. .. ", " . . ", " . . ", " . . ", " . . ", " . . ", " . . ", " .. .. ", " .... ", " "}; static const char *xpm_choice_yes[] = { "12 12 2 1", " c white", ". c black", " ", " .... ", " .. .. ", " . . ", " . .. . ", " . .... . ", " . .... . ", " . .. . ", " . . ", " .. .. ", " .... ", " "}; static const char *xpm_menu[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . .. . ", " . .... . ", " . ...... . ", " . ...... . ", " . .... . ", " . .. . ", " . . ", " .......... ", " "}; static const char *xpm_menu_inv[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " .......... ", " .. ...... ", " .. .... ", " .. .. ", " .. .. ", " .. .... ", " .. ...... ", " .......... ", " .......... ", " "}; static const char *xpm_menuback[] = { "12 12 2 1", " c white", ". c black", " ", " .......... ", " . . ", " . .. . ", " . .... . ", " . ...... . ", " . ...... . ", " . .... . ", " . .. . ", " . . ", " .......... ", " "}; static const char *xpm_void[] = { "12 12 2 1", " c white", ". c black", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "};
gpl-2.0
TheTypoMaster/linux_kernel_2.6.32.67
net/netfilter/nf_log.c
113
7259
#include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <linux/seq_file.h> #include <net/protocol.h> #include <net/netfilter/nf_log.h> #include "nf_internals.h" /* Internal logging interface, which relies on the real LOG target modules */ #define NF_LOG_PREFIXLEN 128 #define NFLOGGER_NAME_LEN 64 static const struct nf_logger *nf_loggers[NFPROTO_NUMPROTO] __read_mostly; static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly; static DEFINE_MUTEX(nf_log_mutex); static struct nf_logger *__find_logger(int pf, const char *str_logger) { struct nf_logger *t; list_for_each_entry(t, &nf_loggers_l[pf], list[pf]) { if (!strnicmp(str_logger, t->name, strlen(t->name))) return t; } return NULL; } /* return EEXIST if the same logger is registred, 0 on success. */ int nf_log_register(u_int8_t pf, struct nf_logger *logger) { const struct nf_logger *llog; int i; if (pf >= ARRAY_SIZE(nf_loggers)) return -EINVAL; for (i = 0; i < ARRAY_SIZE(logger->list); i++) INIT_LIST_HEAD(&logger->list[i]); mutex_lock(&nf_log_mutex); if (pf == NFPROTO_UNSPEC) { for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) list_add_tail(&(logger->list[i]), &(nf_loggers_l[i])); } else { /* register at end of list to honor first register win */ list_add_tail(&logger->list[pf], &nf_loggers_l[pf]); llog = rcu_dereference(nf_loggers[pf]); if (llog == NULL) rcu_assign_pointer(nf_loggers[pf], logger); } mutex_unlock(&nf_log_mutex); return 0; } EXPORT_SYMBOL(nf_log_register); void nf_log_unregister(struct nf_logger *logger) { const struct nf_logger *c_logger; int i; mutex_lock(&nf_log_mutex); for (i = 0; i < ARRAY_SIZE(nf_loggers); i++) { c_logger = rcu_dereference(nf_loggers[i]); if (c_logger == logger) rcu_assign_pointer(nf_loggers[i], NULL); list_del(&logger->list[i]); } mutex_unlock(&nf_log_mutex); synchronize_rcu(); } EXPORT_SYMBOL(nf_log_unregister); int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) { if (pf >= ARRAY_SIZE(nf_loggers)) return -EINVAL; mutex_lock(&nf_log_mutex); if (__find_logger(pf, logger->name) == NULL) { mutex_unlock(&nf_log_mutex); return -ENOENT; } rcu_assign_pointer(nf_loggers[pf], logger); mutex_unlock(&nf_log_mutex); return 0; } EXPORT_SYMBOL(nf_log_bind_pf); void nf_log_unbind_pf(u_int8_t pf) { if (pf >= ARRAY_SIZE(nf_loggers)) return; mutex_lock(&nf_log_mutex); rcu_assign_pointer(nf_loggers[pf], NULL); mutex_unlock(&nf_log_mutex); } EXPORT_SYMBOL(nf_log_unbind_pf); void nf_log_packet(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *fmt, ...) { va_list args; char prefix[NF_LOG_PREFIXLEN]; const struct nf_logger *logger; rcu_read_lock(); logger = rcu_dereference(nf_loggers[pf]); if (logger) { va_start(args, fmt); vsnprintf(prefix, sizeof(prefix), fmt, args); va_end(args); logger->logfn(pf, hooknum, skb, in, out, loginfo, prefix); } rcu_read_unlock(); } EXPORT_SYMBOL(nf_log_packet); #ifdef CONFIG_PROC_FS static void *seq_start(struct seq_file *seq, loff_t *pos) { mutex_lock(&nf_log_mutex); if (*pos >= ARRAY_SIZE(nf_loggers)) return NULL; return pos; } static void *seq_next(struct seq_file *s, void *v, loff_t *pos) { (*pos)++; if (*pos >= ARRAY_SIZE(nf_loggers)) return NULL; return pos; } static void seq_stop(struct seq_file *s, void *v) { mutex_unlock(&nf_log_mutex); } static int seq_show(struct seq_file *s, void *v) { loff_t *pos = v; const struct nf_logger *logger; struct nf_logger *t; int ret; logger = nf_loggers[*pos]; if (!logger) ret = seq_printf(s, "%2lld NONE (", *pos); else ret = seq_printf(s, "%2lld %s (", *pos, logger->name); if (ret < 0) return ret; list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) { ret = seq_printf(s, "%s", t->name); if (ret < 0) return ret; if (&t->list[*pos] != nf_loggers_l[*pos].prev) { ret = seq_printf(s, ","); if (ret < 0) return ret; } } return seq_printf(s, ")\n"); } static const struct seq_operations nflog_seq_ops = { .start = seq_start, .next = seq_next, .stop = seq_stop, .show = seq_show, }; static int nflog_open(struct inode *inode, struct file *file) { return seq_open(file, &nflog_seq_ops); } static const struct file_operations nflog_file_ops = { .owner = THIS_MODULE, .open = nflog_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* PROC_FS */ #ifdef CONFIG_SYSCTL static struct ctl_path nf_log_sysctl_path[] = { { .procname = "net", .ctl_name = CTL_NET, }, { .procname = "netfilter", .ctl_name = NET_NETFILTER, }, { .procname = "nf_log", .ctl_name = CTL_UNNUMBERED, }, { } }; static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1]; static struct ctl_table_header *nf_log_dir_header; static int nf_log_proc_dostring(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { const struct nf_logger *logger; char buf[NFLOGGER_NAME_LEN]; size_t size = *lenp; int r = 0; int tindex = (unsigned long)table->extra1; if (write) { if (size > sizeof(buf)) size = sizeof(buf); if (copy_from_user(buf, buffer, size)) return -EFAULT; if (!strcmp(buf, "NONE")) { nf_log_unbind_pf(tindex); return 0; } mutex_lock(&nf_log_mutex); logger = __find_logger(tindex, buf); if (logger == NULL) { mutex_unlock(&nf_log_mutex); return -ENOENT; } rcu_assign_pointer(nf_loggers[tindex], logger); mutex_unlock(&nf_log_mutex); } else { mutex_lock(&nf_log_mutex); logger = nf_loggers[tindex]; if (!logger) table->data = "NONE"; else table->data = logger->name; r = proc_dostring(table, write, buffer, lenp, ppos); mutex_unlock(&nf_log_mutex); } return r; } static __init int netfilter_log_sysctl_init(void) { int i; for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) { snprintf(nf_log_sysctl_fnames[i-NFPROTO_UNSPEC], 3, "%d", i); nf_log_sysctl_table[i].ctl_name = CTL_UNNUMBERED; nf_log_sysctl_table[i].procname = nf_log_sysctl_fnames[i-NFPROTO_UNSPEC]; nf_log_sysctl_table[i].data = NULL; nf_log_sysctl_table[i].maxlen = NFLOGGER_NAME_LEN * sizeof(char); nf_log_sysctl_table[i].mode = 0644; nf_log_sysctl_table[i].proc_handler = nf_log_proc_dostring; nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i; } nf_log_dir_header = register_sysctl_paths(nf_log_sysctl_path, nf_log_sysctl_table); if (!nf_log_dir_header) return -ENOMEM; return 0; } #else static __init int netfilter_log_sysctl_init(void) { return 0; } #endif /* CONFIG_SYSCTL */ int __init netfilter_log_init(void) { int i, r; #ifdef CONFIG_PROC_FS if (!proc_create("nf_log", S_IRUGO, proc_net_netfilter, &nflog_file_ops)) return -1; #endif /* Errors will trigger panic, unroll on error is unnecessary. */ r = netfilter_log_sysctl_init(); if (r < 0) return r; for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) INIT_LIST_HEAD(&(nf_loggers_l[i])); return 0; }
gpl-2.0
TeamBliss-Devices/android_kernel_samsung_smdk4412
net/decnet/netfilter/dn_rtmsg.c
113
3784
/* * DECnet An implementation of the DECnet protocol suite for the LINUX * operating system. DECnet is implemented using the BSD Socket * interface as the means of communication with the user level. * * DECnet Routing Message Grabulator * * (C) 2000 ChyGwyn Limited - http://www.chygwyn.com/ * This code may be copied under the GPL v.2 or at your option * any later version. * * Author: Steven Whitehouse <steve@chygwyn.com> * */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/netfilter.h> #include <linux/spinlock.h> #include <linux/netlink.h> #include <linux/netfilter_decnet.h> #include <net/sock.h> #include <net/flow.h> #include <net/dn.h> #include <net/dn_route.h> static struct sock *dnrmg = NULL; static struct sk_buff *dnrmg_build_message(struct sk_buff *rt_skb, int *errp) { struct sk_buff *skb = NULL; size_t size; sk_buff_data_t old_tail; struct nlmsghdr *nlh; unsigned char *ptr; struct nf_dn_rtmsg *rtm; size = NLMSG_SPACE(rt_skb->len); size += NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg)); skb = alloc_skb(size, GFP_ATOMIC); if (!skb) goto nlmsg_failure; old_tail = skb->tail; nlh = NLMSG_PUT(skb, 0, 0, 0, size - sizeof(*nlh)); rtm = (struct nf_dn_rtmsg *)NLMSG_DATA(nlh); rtm->nfdn_ifindex = rt_skb->dev->ifindex; ptr = NFDN_RTMSG(rtm); skb_copy_from_linear_data(rt_skb, ptr, rt_skb->len); nlh->nlmsg_len = skb->tail - old_tail; return skb; nlmsg_failure: if (skb) kfree_skb(skb); *errp = -ENOMEM; if (net_ratelimit()) printk(KERN_ERR "dn_rtmsg: error creating netlink message\n"); return NULL; } static void dnrmg_send_peer(struct sk_buff *skb) { struct sk_buff *skb2; int status = 0; int group = 0; unsigned char flags = *skb->data; switch(flags & DN_RT_CNTL_MSK) { case DN_RT_PKT_L1RT: group = DNRNG_NLGRP_L1; break; case DN_RT_PKT_L2RT: group = DNRNG_NLGRP_L2; break; default: return; } skb2 = dnrmg_build_message(skb, &status); if (skb2 == NULL) return; NETLINK_CB(skb2).dst_group = group; netlink_broadcast(dnrmg, skb2, 0, group, GFP_ATOMIC); } static unsigned int dnrmg_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { dnrmg_send_peer(skb); return NF_ACCEPT; } #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) static inline void dnrmg_receive_user_skb(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) return; if (!capable(CAP_NET_ADMIN)) RCV_SKB_FAIL(-EPERM); /* Eventually we might send routing messages too */ RCV_SKB_FAIL(-EINVAL); } static struct nf_hook_ops dnrmg_ops __read_mostly = { .hook = dnrmg_hook, .pf = PF_DECnet, .hooknum = NF_DN_ROUTE, .priority = NF_DN_PRI_DNRTMSG, }; static int __init dn_rtmsg_init(void) { int rv = 0; dnrmg = netlink_kernel_create(&init_net, NETLINK_DNRTMSG, DNRNG_NLGRP_MAX, dnrmg_receive_user_skb, NULL, THIS_MODULE); if (dnrmg == NULL) { printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket"); return -ENOMEM; } rv = nf_register_hook(&dnrmg_ops); if (rv) { netlink_kernel_release(dnrmg); } return rv; } static void __exit dn_rtmsg_fini(void) { nf_unregister_hook(&dnrmg_ops); netlink_kernel_release(dnrmg); } MODULE_DESCRIPTION("DECnet Routing Message Grabulator"); MODULE_AUTHOR("Steven Whitehouse <steve@chygwyn.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_DNRTMSG); module_init(dn_rtmsg_init); module_exit(dn_rtmsg_fini);
gpl-2.0
albertghtoun/gcc-libitm
gcc/testsuite/gcc.c-torture/execute/20040703-1.c
113
3342
/* PR 16341 */ #define PART_PRECISION (sizeof (cpp_num_part) * 8) typedef unsigned int cpp_num_part; typedef struct cpp_num cpp_num; struct cpp_num { cpp_num_part high; cpp_num_part low; int unsignedp; /* True if value should be treated as unsigned. */ int overflow; /* True if the most recent calculation overflowed. */ }; static int num_positive (cpp_num num, unsigned int precision) { if (precision > PART_PRECISION) { precision -= PART_PRECISION; return (num.high & (cpp_num_part) 1 << (precision - 1)) == 0; } return (num.low & (cpp_num_part) 1 << (precision - 1)) == 0; } static cpp_num num_trim (cpp_num num, unsigned int precision) { if (precision > PART_PRECISION) { precision -= PART_PRECISION; if (precision < PART_PRECISION) num.high &= ((cpp_num_part) 1 << precision) - 1; } else { if (precision < PART_PRECISION) num.low &= ((cpp_num_part) 1 << precision) - 1; num.high = 0; } return num; } /* Shift NUM, of width PRECISION, right by N bits. */ static cpp_num num_rshift (cpp_num num, unsigned int precision, unsigned int n) { cpp_num_part sign_mask; int x = num_positive (num, precision); if (num.unsignedp || x) sign_mask = 0; else sign_mask = ~(cpp_num_part) 0; if (n >= precision) num.high = num.low = sign_mask; else { /* Sign-extend. */ if (precision < PART_PRECISION) num.high = sign_mask, num.low |= sign_mask << precision; else if (precision < 2 * PART_PRECISION) num.high |= sign_mask << (precision - PART_PRECISION); if (n >= PART_PRECISION) { n -= PART_PRECISION; num.low = num.high; num.high = sign_mask; } if (n) { num.low = (num.low >> n) | (num.high << (PART_PRECISION - n)); num.high = (num.high >> n) | (sign_mask << (PART_PRECISION - n)); } } num = num_trim (num, precision); num.overflow = 0; return num; } #define num_zerop(num) ((num.low | num.high) == 0) #define num_eq(num1, num2) (num1.low == num2.low && num1.high == num2.high) cpp_num num_lshift (cpp_num num, unsigned int precision, unsigned int n) { if (n >= precision) { num.overflow = !num.unsignedp && !num_zerop (num); num.high = num.low = 0; } else { cpp_num orig; unsigned int m = n; orig = num; if (m >= PART_PRECISION) { m -= PART_PRECISION; num.high = num.low; num.low = 0; } if (m) { num.high = (num.high << m) | (num.low >> (PART_PRECISION - m)); num.low <<= m; } num = num_trim (num, precision); if (num.unsignedp) num.overflow = 0; else { cpp_num maybe_orig = num_rshift (num, precision, n); num.overflow = !num_eq (orig, maybe_orig); } } return num; } unsigned int precision = 64; unsigned int n = 16; cpp_num num = { 0, 3, 0, 0 }; int main() { cpp_num res = num_lshift (num, 64, n); if (res.low != 0x30000) abort (); if (res.high != 0) abort (); if (res.overflow != 0) abort (); exit (0); }
gpl-2.0
Mrcl1450/f2fs
arch/powerpc/platforms/powermac/setup.c
881
15404
/* * Powermac setup and early boot code plus other random bits. * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Adapted for Power Macintosh by Paul Mackerras * Copyright (C) 1996 Paul Mackerras (paulus@samba.org) * * Derived from "arch/alpha/kernel/setup.c" * Copyright (C) 1995 Linus Torvalds * * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ /* * bootup setup stuff.. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/export.h> #include <linux/user.h> #include <linux/tty.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/major.h> #include <linux/initrd.h> #include <linux/vt_kern.h> #include <linux/console.h> #include <linux/pci.h> #include <linux/adb.h> #include <linux/cuda.h> #include <linux/pmu.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/bitops.h> #include <linux/suspend.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/memblock.h> #include <asm/reg.h> #include <asm/sections.h> #include <asm/prom.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/pci-bridge.h> #include <asm/ohare.h> #include <asm/mediabay.h> #include <asm/machdep.h> #include <asm/dma.h> #include <asm/cputable.h> #include <asm/btext.h> #include <asm/pmac_feature.h> #include <asm/time.h> #include <asm/mmu_context.h> #include <asm/iommu.h> #include <asm/smu.h> #include <asm/pmc.h> #include <asm/udbg.h> #include "pmac.h" #undef SHOW_GATWICK_IRQS int ppc_override_l2cr = 0; int ppc_override_l2cr_value; int has_l2cache = 0; int pmac_newworld; static int current_root_goodness = -1; extern struct machdep_calls pmac_md; #define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */ #ifdef CONFIG_PPC64 int sccdbg; #endif sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN; EXPORT_SYMBOL(sys_ctrler); #ifdef CONFIG_PMAC_SMU unsigned long smu_cmdbuf_abs; EXPORT_SYMBOL(smu_cmdbuf_abs); #endif static void pmac_show_cpuinfo(struct seq_file *m) { struct device_node *np; const char *pp; int plen; int mbmodel; unsigned int mbflags; char* mbname; mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_MODEL, 0); mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_FLAGS, 0); if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME, (long) &mbname) != 0) mbname = "Unknown"; /* find motherboard type */ seq_printf(m, "machine\t\t: "); np = of_find_node_by_path("/"); if (np != NULL) { pp = of_get_property(np, "model", NULL); if (pp != NULL) seq_printf(m, "%s\n", pp); else seq_printf(m, "PowerMac\n"); pp = of_get_property(np, "compatible", &plen); if (pp != NULL) { seq_printf(m, "motherboard\t:"); while (plen > 0) { int l = strlen(pp) + 1; seq_printf(m, " %s", pp); plen -= l; pp += l; } seq_printf(m, "\n"); } of_node_put(np); } else seq_printf(m, "PowerMac\n"); /* print parsed model */ seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname); seq_printf(m, "pmac flags\t: %08x\n", mbflags); /* find l2 cache info */ np = of_find_node_by_name(NULL, "l2-cache"); if (np == NULL) np = of_find_node_by_type(NULL, "cache"); if (np != NULL) { const unsigned int *ic = of_get_property(np, "i-cache-size", NULL); const unsigned int *dc = of_get_property(np, "d-cache-size", NULL); seq_printf(m, "L2 cache\t:"); has_l2cache = 1; if (of_get_property(np, "cache-unified", NULL) != 0 && dc) { seq_printf(m, " %dK unified", *dc / 1024); } else { if (ic) seq_printf(m, " %dK instruction", *ic / 1024); if (dc) seq_printf(m, "%s %dK data", (ic? " +": ""), *dc / 1024); } pp = of_get_property(np, "ram-type", NULL); if (pp) seq_printf(m, " %s", pp); seq_printf(m, "\n"); of_node_put(np); } /* Indicate newworld/oldworld */ seq_printf(m, "pmac-generation\t: %s\n", pmac_newworld ? "NewWorld" : "OldWorld"); } #ifndef CONFIG_ADB_CUDA int find_via_cuda(void) { struct device_node *dn = of_find_node_by_name(NULL, "via-cuda"); if (!dn) return 0; of_node_put(dn); printk("WARNING ! Your machine is CUDA-based but your kernel\n"); printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n"); return 0; } #endif #ifndef CONFIG_ADB_PMU int find_via_pmu(void) { struct device_node *dn = of_find_node_by_name(NULL, "via-pmu"); if (!dn) return 0; of_node_put(dn); printk("WARNING ! Your machine is PMU-based but your kernel\n"); printk(" wasn't compiled with CONFIG_ADB_PMU option !\n"); return 0; } #endif #ifndef CONFIG_PMAC_SMU int smu_init(void) { /* should check and warn if SMU is present */ return 0; } #endif #ifdef CONFIG_PPC32 static volatile u32 *sysctrl_regs; static void __init ohare_init(void) { struct device_node *dn; /* this area has the CPU identification register and some registers used by smp boards */ sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000); /* * Turn on the L2 cache. * We assume that we have a PSX memory controller iff * we have an ohare I/O controller. */ dn = of_find_node_by_name(NULL, "ohare"); if (dn) { of_node_put(dn); if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) { if (sysctrl_regs[4] & 0x10) sysctrl_regs[4] |= 0x04000020; else sysctrl_regs[4] |= 0x04000000; if(has_l2cache) printk(KERN_INFO "Level 2 cache enabled\n"); } } } static void __init l2cr_init(void) { /* Checks "l2cr-value" property in the registry */ if (cpu_has_feature(CPU_FTR_L2CR)) { struct device_node *np = of_find_node_by_name(NULL, "cpus"); if (np == 0) np = of_find_node_by_type(NULL, "cpu"); if (np != 0) { const unsigned int *l2cr = of_get_property(np, "l2cr-value", NULL); if (l2cr != 0) { ppc_override_l2cr = 1; ppc_override_l2cr_value = *l2cr; _set_L2CR(0); _set_L2CR(ppc_override_l2cr_value); } of_node_put(np); } } if (ppc_override_l2cr) printk(KERN_INFO "L2CR overridden (0x%x), " "backside cache is %s\n", ppc_override_l2cr_value, (ppc_override_l2cr_value & 0x80000000) ? "enabled" : "disabled"); } #endif static void __init pmac_setup_arch(void) { struct device_node *cpu, *ic; const int *fp; unsigned long pvr; pvr = PVR_VER(mfspr(SPRN_PVR)); /* Set loops_per_jiffy to a half-way reasonable value, for use until calibrate_delay gets called. */ loops_per_jiffy = 50000000 / HZ; cpu = of_find_node_by_type(NULL, "cpu"); if (cpu != NULL) { fp = of_get_property(cpu, "clock-frequency", NULL); if (fp != NULL) { if (pvr >= 0x30 && pvr < 0x80) /* PPC970 etc. */ loops_per_jiffy = *fp / (3 * HZ); else if (pvr == 4 || pvr >= 8) /* 604, G3, G4 etc. */ loops_per_jiffy = *fp / HZ; else /* 601, 603, etc. */ loops_per_jiffy = *fp / (2 * HZ); } of_node_put(cpu); } /* See if newworld or oldworld */ ic = of_find_node_with_property(NULL, "interrupt-controller"); if (ic) { pmac_newworld = 1; of_node_put(ic); } /* Lookup PCI hosts */ pmac_pci_init(); #ifdef CONFIG_PPC32 ohare_init(); l2cr_init(); #endif /* CONFIG_PPC32 */ find_via_cuda(); find_via_pmu(); smu_init(); #if defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) || \ defined(CONFIG_PPC64) pmac_nvram_init(); #endif #ifdef CONFIG_PPC32 #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) ROOT_DEV = Root_RAM0; else #endif ROOT_DEV = DEFAULT_ROOT_DEVICE; #endif #ifdef CONFIG_ADB if (strstr(boot_command_line, "adb_sync")) { extern int __adb_probe_sync; __adb_probe_sync = 1; } #endif /* CONFIG_ADB */ } #ifdef CONFIG_SCSI void note_scsi_host(struct device_node *node, void *host) { } EXPORT_SYMBOL(note_scsi_host); #endif static int initializing = 1; static int pmac_late_init(void) { initializing = 0; return 0; } machine_late_initcall(powermac, pmac_late_init); /* * This is __init_refok because we check for "initializing" before * touching any of the __init sensitive things and "initializing" * will be false after __init time. This can't be __init because it * can be called whenever a disk is first accessed. */ void __init_refok note_bootable_part(dev_t dev, int part, int goodness) { char *p; if (!initializing) return; if ((goodness <= current_root_goodness) && ROOT_DEV != DEFAULT_ROOT_DEVICE) return; p = strstr(boot_command_line, "root="); if (p != NULL && (p == boot_command_line || p[-1] == ' ')) return; ROOT_DEV = dev + part; current_root_goodness = goodness; } #ifdef CONFIG_ADB_CUDA static void cuda_restart(void) { struct adb_request req; cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM); for (;;) cuda_poll(); } static void cuda_shutdown(void) { struct adb_request req; cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN); for (;;) cuda_poll(); } #else #define cuda_restart() #define cuda_shutdown() #endif #ifndef CONFIG_ADB_PMU #define pmu_restart() #define pmu_shutdown() #endif #ifndef CONFIG_PMAC_SMU #define smu_restart() #define smu_shutdown() #endif static void pmac_restart(char *cmd) { switch (sys_ctrler) { case SYS_CTRLER_CUDA: cuda_restart(); break; case SYS_CTRLER_PMU: pmu_restart(); break; case SYS_CTRLER_SMU: smu_restart(); break; default: ; } } static void pmac_power_off(void) { switch (sys_ctrler) { case SYS_CTRLER_CUDA: cuda_shutdown(); break; case SYS_CTRLER_PMU: pmu_shutdown(); break; case SYS_CTRLER_SMU: smu_shutdown(); break; default: ; } } static void pmac_halt(void) { pmac_power_off(); } /* * Early initialization. */ static void __init pmac_init_early(void) { /* Enable early btext debug if requested */ if (strstr(boot_command_line, "btextdbg")) { udbg_adb_init_early(); register_early_udbg_console(); } /* Probe motherboard chipset */ pmac_feature_init(); /* Initialize debug stuff */ udbg_scc_init(!!strstr(boot_command_line, "sccdbg")); udbg_adb_init(!!strstr(boot_command_line, "btextdbg")); #ifdef CONFIG_PPC64 iommu_init_early_dart(&pmac_pci_controller_ops); #endif /* SMP Init has to be done early as we need to patch up * cpu_possible_mask before interrupt stacks are allocated * or kaboom... */ #ifdef CONFIG_SMP pmac_setup_smp(); #endif } static int __init pmac_declare_of_platform_devices(void) { struct device_node *np; if (machine_is(chrp)) return -1; np = of_find_node_by_name(NULL, "valkyrie"); if (np) { of_platform_device_create(np, "valkyrie", NULL); of_node_put(np); } np = of_find_node_by_name(NULL, "platinum"); if (np) { of_platform_device_create(np, "platinum", NULL); of_node_put(np); } np = of_find_node_by_type(NULL, "smu"); if (np) { of_platform_device_create(np, "smu", NULL); of_node_put(np); } np = of_find_node_by_type(NULL, "fcu"); if (np == NULL) { /* Some machines have strangely broken device-tree */ np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e"); } if (np) { of_platform_device_create(np, "temperature", NULL); of_node_put(np); } return 0; } machine_device_initcall(powermac, pmac_declare_of_platform_devices); #ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE /* * This is called very early, as part of console_init() (typically just after * time_init()). This function is respondible for trying to find a good * default console on serial ports. It tries to match the open firmware * default output with one of the available serial console drivers. */ static int __init check_pmac_serial_console(void) { struct device_node *prom_stdout = NULL; int offset = 0; const char *name; #ifdef CONFIG_SERIAL_PMACZILOG_TTYS char *devname = "ttyS"; #else char *devname = "ttyPZ"; #endif pr_debug(" -> check_pmac_serial_console()\n"); /* The user has requested a console so this is already set up. */ if (strstr(boot_command_line, "console=")) { pr_debug(" console was specified !\n"); return -EBUSY; } if (!of_chosen) { pr_debug(" of_chosen is NULL !\n"); return -ENODEV; } /* We are getting a weird phandle from OF ... */ /* ... So use the full path instead */ name = of_get_property(of_chosen, "linux,stdout-path", NULL); if (name == NULL) { pr_debug(" no linux,stdout-path !\n"); return -ENODEV; } prom_stdout = of_find_node_by_path(name); if (!prom_stdout) { pr_debug(" can't find stdout package %s !\n", name); return -ENODEV; } pr_debug("stdout is %s\n", prom_stdout->full_name); name = of_get_property(prom_stdout, "name", NULL); if (!name) { pr_debug(" stdout package has no name !\n"); goto not_found; } if (strcmp(name, "ch-a") == 0) offset = 0; else if (strcmp(name, "ch-b") == 0) offset = 1; else goto not_found; of_node_put(prom_stdout); pr_debug("Found serial console at %s%d\n", devname, offset); return add_preferred_console(devname, offset, NULL); not_found: pr_debug("No preferred console found !\n"); of_node_put(prom_stdout); return -ENODEV; } console_initcall(check_pmac_serial_console); #endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */ /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init pmac_probe(void) { unsigned long root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "Power Macintosh") && !of_flat_dt_is_compatible(root, "MacRISC")) return 0; #ifdef CONFIG_PPC64 /* * On U3, the DART (iommu) must be allocated now since it * has an impact on htab_initialize (due to the large page it * occupies having to be broken up so the DART itself is not * part of the cacheable linar mapping */ alloc_dart_table(); hpte_init_native(); #endif #ifdef CONFIG_PPC32 /* isa_io_base gets set in pmac_pci_init */ ISA_DMA_THRESHOLD = ~0L; DMA_MODE_READ = 1; DMA_MODE_WRITE = 2; #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PMAC_SMU /* * SMU based G5s need some memory below 2Gb, at least the current * driver needs that. We have to allocate it now. We allocate 4k * (1 small page) for now. */ smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL); #endif /* CONFIG_PMAC_SMU */ pm_power_off = pmac_power_off; return 1; } define_machine(powermac) { .name = "PowerMac", .probe = pmac_probe, .setup_arch = pmac_setup_arch, .init_early = pmac_init_early, .show_cpuinfo = pmac_show_cpuinfo, .init_IRQ = pmac_pic_init, .get_irq = NULL, /* changed later */ .pci_irq_fixup = pmac_pci_irq_fixup, .restart = pmac_restart, .halt = pmac_halt, .time_init = pmac_time_init, .get_boot_time = pmac_get_boot_time, .set_rtc_time = pmac_set_rtc_time, .get_rtc_time = pmac_get_rtc_time, .calibrate_decr = pmac_calibrate_decr, .feature_call = pmac_do_feature_call, .progress = udbg_progress, #ifdef CONFIG_PPC64 .power_save = power4_idle, .enable_pmcs = power4_enable_pmcs, #endif /* CONFIG_PPC64 */ #ifdef CONFIG_PPC32 .pcibios_after_init = pmac_pcibios_after_init, .phys_mem_access_prot = pci_phys_mem_access_prot, #endif };
gpl-2.0
gui2dev/android_kernel_motorola_tinboost
drivers/staging/usbip/vhci_sysfs.c
1137
6131
/* * Copyright (C) 2003-2008 Takahiro Hirofuchi * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #include "usbip_common.h" #include "vhci.h" #include <linux/in.h> /* TODO: refine locking ?*/ /* Sysfs entry to show port status */ static ssize_t show_status(struct device *dev, struct device_attribute *attr, char *out) { char *s = out; int i = 0; BUG_ON(!the_controller || !out); spin_lock(&the_controller->lock); /* * output example: * prt sta spd dev socket local_busid * 000 004 000 000 c5a7bb80 1-2.3 * 001 004 000 000 d8cee980 2-3.4 * * IP address can be retrieved from a socket pointer address by looking * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a * port number and its peer IP address. */ out += sprintf(out, "prt sta spd bus dev socket " "local_busid\n"); for (i = 0; i < VHCI_NPORTS; i++) { struct vhci_device *vdev = port_to_vdev(i); spin_lock(&vdev->ud.lock); out += sprintf(out, "%03u %03u ", i, vdev->ud.status); if (vdev->ud.status == VDEV_ST_USED) { out += sprintf(out, "%03u %08x ", vdev->speed, vdev->devid); out += sprintf(out, "%16p ", vdev->ud.tcp_socket); out += sprintf(out, "%s", dev_name(&vdev->udev->dev)); } else out += sprintf(out, "000 000 000 0000000000000000 0-0"); out += sprintf(out, "\n"); spin_unlock(&vdev->ud.lock); } spin_unlock(&the_controller->lock); return out - s; } static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); /* Sysfs entry to shutdown a virtual connection */ static int vhci_port_disconnect(__u32 rhport) { struct vhci_device *vdev; usbip_dbg_vhci_sysfs("enter\n"); /* lock */ spin_lock(&the_controller->lock); vdev = port_to_vdev(rhport); spin_lock(&vdev->ud.lock); if (vdev->ud.status == VDEV_ST_NULL) { usbip_uerr("not connected %d\n", vdev->ud.status); /* unlock */ spin_unlock(&vdev->ud.lock); spin_unlock(&the_controller->lock); return -EINVAL; } /* unlock */ spin_unlock(&vdev->ud.lock); spin_unlock(&the_controller->lock); usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN); return 0; } static ssize_t store_detach(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int err; __u32 rhport = 0; sscanf(buf, "%u", &rhport); /* check rhport */ if (rhport >= VHCI_NPORTS) { usbip_uerr("invalid port %u\n", rhport); return -EINVAL; } err = vhci_port_disconnect(rhport); if (err < 0) return -EINVAL; usbip_dbg_vhci_sysfs("Leave\n"); return count; } static DEVICE_ATTR(detach, S_IWUSR, NULL, store_detach); /* Sysfs entry to establish a virtual connection */ static int valid_args(__u32 rhport, enum usb_device_speed speed) { /* check rhport */ if ((rhport < 0) || (rhport >= VHCI_NPORTS)) { usbip_uerr("port %u\n", rhport); return -EINVAL; } /* check speed */ switch (speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: case USB_SPEED_HIGH: case USB_SPEED_WIRELESS: break; default: usbip_uerr("speed %d\n", speed); return -EINVAL; } return 0; } /* * To start a new USB/IP attachment, a userland program needs to setup a TCP * connection and then write its socket descriptor with remote device * information into this sysfs file. * * A remote device is virtually attached to the root-hub port of @rhport with * @speed. @devid is embedded into a request to specify the remote device in a * server host. * * write() returns 0 on success, else negative errno. */ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vhci_device *vdev; struct socket *socket; int sockfd = 0; __u32 rhport = 0, devid = 0, speed = 0; /* * @rhport: port number of vhci_hcd * @sockfd: socket descriptor of an established TCP connection * @devid: unique device identifier in a remote host * @speed: usb device speed in a remote host */ sscanf(buf, "%u %u %u %u", &rhport, &sockfd, &devid, &speed); usbip_dbg_vhci_sysfs("rhport(%u) sockfd(%u) devid(%u) speed(%u)\n", rhport, sockfd, devid, speed); /* check received parameters */ if (valid_args(rhport, speed) < 0) return -EINVAL; /* check sockfd */ socket = sockfd_to_socket(sockfd); if (!socket) return -EINVAL; /* now need lock until setting vdev status as used */ /* begin a lock */ spin_lock(&the_controller->lock); vdev = port_to_vdev(rhport); spin_lock(&vdev->ud.lock); if (vdev->ud.status != VDEV_ST_NULL) { /* end of the lock */ spin_unlock(&vdev->ud.lock); spin_unlock(&the_controller->lock); usbip_uerr("port %d already used\n", rhport); return -EINVAL; } usbip_uinfo("rhport(%u) sockfd(%d) devid(%u) speed(%u)\n", rhport, sockfd, devid, speed); vdev->devid = devid; vdev->speed = speed; vdev->ud.tcp_socket = socket; vdev->ud.status = VDEV_ST_NOTASSIGNED; spin_unlock(&vdev->ud.lock); spin_unlock(&the_controller->lock); /* end the lock */ /* * this function will sleep, so should be out of the lock. but, it's ok * because we already marked vdev as being used. really? */ usbip_start_threads(&vdev->ud); rh_port_connect(rhport, speed); return count; } static DEVICE_ATTR(attach, S_IWUSR, NULL, store_attach); static struct attribute *dev_attrs[] = { &dev_attr_status.attr, &dev_attr_detach.attr, &dev_attr_attach.attr, &dev_attr_usbip_debug.attr, NULL, }; struct attribute_group dev_attr_group = { .attrs = dev_attrs, };
gpl-2.0
ffolkes/plasmakernel_note4_tw_lp511
arch/arm/mach-s5pv210/clock.c
2161
34192
/* linux/arch/arm/mach-s5pv210/clock.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S5PV210 - Clock support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/io.h> #include <mach/map.h> #include <plat/cpu-freq.h> #include <mach/regs-clock.h> #include <plat/clock.h> #include <plat/cpu.h> #include <plat/pll.h> #include <plat/s5p-clock.h> #include <plat/clock-clksrc.h> #include "common.h" static unsigned long xtal; static struct clksrc_clk clk_mout_apll = { .clk = { .name = "mout_apll", }, .sources = &clk_src_apll, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 }, }; static struct clksrc_clk clk_mout_epll = { .clk = { .name = "mout_epll", }, .sources = &clk_src_epll, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 8, .size = 1 }, }; static struct clksrc_clk clk_mout_mpll = { .clk = { .name = "mout_mpll", }, .sources = &clk_src_mpll, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 4, .size = 1 }, }; static struct clk *clkset_armclk_list[] = { [0] = &clk_mout_apll.clk, [1] = &clk_mout_mpll.clk, }; static struct clksrc_sources clkset_armclk = { .sources = clkset_armclk_list, .nr_sources = ARRAY_SIZE(clkset_armclk_list), }; static struct clksrc_clk clk_armclk = { .clk = { .name = "armclk", }, .sources = &clkset_armclk, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 16, .size = 1 }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 0, .size = 3 }, }; static struct clksrc_clk clk_hclk_msys = { .clk = { .name = "hclk_msys", .parent = &clk_armclk.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 8, .size = 3 }, }; static struct clksrc_clk clk_pclk_msys = { .clk = { .name = "pclk_msys", .parent = &clk_hclk_msys.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 12, .size = 3 }, }; static struct clksrc_clk clk_sclk_a2m = { .clk = { .name = "sclk_a2m", .parent = &clk_mout_apll.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 4, .size = 3 }, }; static struct clk *clkset_hclk_sys_list[] = { [0] = &clk_mout_mpll.clk, [1] = &clk_sclk_a2m.clk, }; static struct clksrc_sources clkset_hclk_sys = { .sources = clkset_hclk_sys_list, .nr_sources = ARRAY_SIZE(clkset_hclk_sys_list), }; static struct clksrc_clk clk_hclk_dsys = { .clk = { .name = "hclk_dsys", }, .sources = &clkset_hclk_sys, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 20, .size = 1 }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 16, .size = 4 }, }; static struct clksrc_clk clk_pclk_dsys = { .clk = { .name = "pclk_dsys", .parent = &clk_hclk_dsys.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 20, .size = 3 }, }; static struct clksrc_clk clk_hclk_psys = { .clk = { .name = "hclk_psys", }, .sources = &clkset_hclk_sys, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 24, .size = 1 }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 24, .size = 4 }, }; static struct clksrc_clk clk_pclk_psys = { .clk = { .name = "pclk_psys", .parent = &clk_hclk_psys.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 28, .size = 3 }, }; static int s5pv210_clk_ip0_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP0, clk, enable); } static int s5pv210_clk_ip1_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP1, clk, enable); } static int s5pv210_clk_ip2_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP2, clk, enable); } static int s5pv210_clk_ip3_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable); } static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable); } static int s5pv210_clk_mask1_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLK_SRC_MASK1, clk, enable); } static int s5pv210_clk_hdmiphy_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_HDMI_PHY_CONTROL, clk, enable); } static int exynos4_clk_dac_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_DAC_PHY_CONTROL, clk, enable); } static struct clk clk_sclk_hdmi27m = { .name = "sclk_hdmi27m", .rate = 27000000, }; static struct clk clk_sclk_hdmiphy = { .name = "sclk_hdmiphy", }; static struct clk clk_sclk_usbphy0 = { .name = "sclk_usbphy0", }; static struct clk clk_sclk_usbphy1 = { .name = "sclk_usbphy1", }; static struct clk clk_pcmcdclk0 = { .name = "pcmcdclk", }; static struct clk clk_pcmcdclk1 = { .name = "pcmcdclk", }; static struct clk clk_pcmcdclk2 = { .name = "pcmcdclk", }; static struct clk *clkset_vpllsrc_list[] = { [0] = &clk_fin_vpll, [1] = &clk_sclk_hdmi27m, }; static struct clksrc_sources clkset_vpllsrc = { .sources = clkset_vpllsrc_list, .nr_sources = ARRAY_SIZE(clkset_vpllsrc_list), }; static struct clksrc_clk clk_vpllsrc = { .clk = { .name = "vpll_src", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 7), }, .sources = &clkset_vpllsrc, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 28, .size = 1 }, }; static struct clk *clkset_sclk_vpll_list[] = { [0] = &clk_vpllsrc.clk, [1] = &clk_fout_vpll, }; static struct clksrc_sources clkset_sclk_vpll = { .sources = clkset_sclk_vpll_list, .nr_sources = ARRAY_SIZE(clkset_sclk_vpll_list), }; static struct clksrc_clk clk_sclk_vpll = { .clk = { .name = "sclk_vpll", }, .sources = &clkset_sclk_vpll, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 12, .size = 1 }, }; static struct clk *clkset_moutdmc0src_list[] = { [0] = &clk_sclk_a2m.clk, [1] = &clk_mout_mpll.clk, [2] = NULL, [3] = NULL, }; static struct clksrc_sources clkset_moutdmc0src = { .sources = clkset_moutdmc0src_list, .nr_sources = ARRAY_SIZE(clkset_moutdmc0src_list), }; static struct clksrc_clk clk_mout_dmc0 = { .clk = { .name = "mout_dmc0", }, .sources = &clkset_moutdmc0src, .reg_src = { .reg = S5P_CLK_SRC6, .shift = 24, .size = 2 }, }; static struct clksrc_clk clk_sclk_dmc0 = { .clk = { .name = "sclk_dmc0", .parent = &clk_mout_dmc0.clk, }, .reg_div = { .reg = S5P_CLK_DIV6, .shift = 28, .size = 4 }, }; static unsigned long s5pv210_clk_imem_get_rate(struct clk *clk) { return clk_get_rate(clk->parent) / 2; } static struct clk_ops clk_hclk_imem_ops = { .get_rate = s5pv210_clk_imem_get_rate, }; static unsigned long s5pv210_clk_fout_apll_get_rate(struct clk *clk) { return s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON), pll_4508); } static struct clk_ops clk_fout_apll_ops = { .get_rate = s5pv210_clk_fout_apll_get_rate, }; static struct clk init_clocks_off[] = { { .name = "rot", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1<<29), }, { .name = "fimc", .devname = "s5pv210-fimc.0", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 24), }, { .name = "fimc", .devname = "s5pv210-fimc.1", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 25), }, { .name = "fimc", .devname = "s5pv210-fimc.2", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 26), }, { .name = "jpeg", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 28), }, { .name = "mfc", .devname = "s5p-mfc", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 16), }, { .name = "dac", .devname = "s5p-sdo", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1 << 10), }, { .name = "mixer", .devname = "s5p-mixer", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1 << 9), }, { .name = "vp", .devname = "s5p-mixer", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1 << 8), }, { .name = "hdmi", .devname = "s5pv210-hdmi", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1 << 11), }, { .name = "hdmiphy", .devname = "s5pv210-hdmi", .enable = s5pv210_clk_hdmiphy_ctrl, .ctrlbit = (1 << 0), }, { .name = "dacphy", .devname = "s5p-sdo", .enable = exynos4_clk_dac_ctrl, .ctrlbit = (1 << 0), }, { .name = "otg", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1<<16), }, { .name = "usb-host", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1<<17), }, { .name = "lcd", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1<<0), }, { .name = "cfcon", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1<<25), }, { .name = "systimer", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<16), }, { .name = "watchdog", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<22), }, { .name = "rtc", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<15), }, { .name = "i2c", .devname = "s3c2440-i2c.0", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<7), }, { .name = "i2c", .devname = "s3c2440-i2c.1", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 10), }, { .name = "i2c", .devname = "s3c2440-i2c.2", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<9), }, { .name = "i2c", .devname = "s3c2440-hdmiphy-i2c", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 11), }, { .name = "spi", .devname = "s5pv210-spi.0", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<12), }, { .name = "spi", .devname = "s5pv210-spi.1", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<13), }, { .name = "spi", .devname = "s5pv210-spi.2", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<14), }, { .name = "timers", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<23), }, { .name = "adc", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<24), }, { .name = "keypad", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<21), }, { .name = "iis", .devname = "samsung-i2s.0", .parent = &clk_p, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1<<4), }, { .name = "iis", .devname = "samsung-i2s.1", .parent = &clk_p, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 5), }, { .name = "iis", .devname = "samsung-i2s.2", .parent = &clk_p, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 6), }, { .name = "spdif", .parent = &clk_p, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 0), }, }; static struct clk init_clocks[] = { { .name = "hclk_imem", .parent = &clk_hclk_msys.clk, .ctrlbit = (1 << 5), .enable = s5pv210_clk_ip0_ctrl, .ops = &clk_hclk_imem_ops, }, { .name = "uart", .devname = "s5pv210-uart.0", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 17), }, { .name = "uart", .devname = "s5pv210-uart.1", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 18), }, { .name = "uart", .devname = "s5pv210-uart.2", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 19), }, { .name = "uart", .devname = "s5pv210-uart.3", .parent = &clk_pclk_psys.clk, .enable = s5pv210_clk_ip3_ctrl, .ctrlbit = (1 << 20), }, { .name = "sromc", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip1_ctrl, .ctrlbit = (1 << 26), }, }; static struct clk clk_hsmmc0 = { .name = "hsmmc", .devname = "s3c-sdhci.0", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip2_ctrl, .ctrlbit = (1<<16), }; static struct clk clk_hsmmc1 = { .name = "hsmmc", .devname = "s3c-sdhci.1", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip2_ctrl, .ctrlbit = (1<<17), }; static struct clk clk_hsmmc2 = { .name = "hsmmc", .devname = "s3c-sdhci.2", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip2_ctrl, .ctrlbit = (1<<18), }; static struct clk clk_hsmmc3 = { .name = "hsmmc", .devname = "s3c-sdhci.3", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip2_ctrl, .ctrlbit = (1<<19), }; static struct clk clk_pdma0 = { .name = "pdma0", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 3), }; static struct clk clk_pdma1 = { .name = "pdma1", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 4), }; static struct clk *clkset_uart_list[] = { [6] = &clk_mout_mpll.clk, [7] = &clk_mout_epll.clk, }; static struct clksrc_sources clkset_uart = { .sources = clkset_uart_list, .nr_sources = ARRAY_SIZE(clkset_uart_list), }; static struct clk *clkset_group1_list[] = { [0] = &clk_sclk_a2m.clk, [1] = &clk_mout_mpll.clk, [2] = &clk_mout_epll.clk, [3] = &clk_sclk_vpll.clk, }; static struct clksrc_sources clkset_group1 = { .sources = clkset_group1_list, .nr_sources = ARRAY_SIZE(clkset_group1_list), }; static struct clk *clkset_sclk_onenand_list[] = { [0] = &clk_hclk_psys.clk, [1] = &clk_hclk_dsys.clk, }; static struct clksrc_sources clkset_sclk_onenand = { .sources = clkset_sclk_onenand_list, .nr_sources = ARRAY_SIZE(clkset_sclk_onenand_list), }; static struct clk *clkset_sclk_dac_list[] = { [0] = &clk_sclk_vpll.clk, [1] = &clk_sclk_hdmiphy, }; static struct clksrc_sources clkset_sclk_dac = { .sources = clkset_sclk_dac_list, .nr_sources = ARRAY_SIZE(clkset_sclk_dac_list), }; static struct clksrc_clk clk_sclk_dac = { .clk = { .name = "sclk_dac", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 2), }, .sources = &clkset_sclk_dac, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 8, .size = 1 }, }; static struct clksrc_clk clk_sclk_pixel = { .clk = { .name = "sclk_pixel", .parent = &clk_sclk_vpll.clk, }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 0, .size = 4}, }; static struct clk *clkset_sclk_hdmi_list[] = { [0] = &clk_sclk_pixel.clk, [1] = &clk_sclk_hdmiphy, }; static struct clksrc_sources clkset_sclk_hdmi = { .sources = clkset_sclk_hdmi_list, .nr_sources = ARRAY_SIZE(clkset_sclk_hdmi_list), }; static struct clksrc_clk clk_sclk_hdmi = { .clk = { .name = "sclk_hdmi", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 0), }, .sources = &clkset_sclk_hdmi, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 0, .size = 1 }, }; static struct clk *clkset_sclk_mixer_list[] = { [0] = &clk_sclk_dac.clk, [1] = &clk_sclk_hdmi.clk, }; static struct clksrc_sources clkset_sclk_mixer = { .sources = clkset_sclk_mixer_list, .nr_sources = ARRAY_SIZE(clkset_sclk_mixer_list), }; static struct clksrc_clk clk_sclk_mixer = { .clk = { .name = "sclk_mixer", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 1), }, .sources = &clkset_sclk_mixer, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 4, .size = 1 }, }; static struct clksrc_clk *sclk_tv[] = { &clk_sclk_dac, &clk_sclk_pixel, &clk_sclk_hdmi, &clk_sclk_mixer, }; static struct clk *clkset_sclk_audio0_list[] = { [0] = &clk_ext_xtal_mux, [1] = &clk_pcmcdclk0, [2] = &clk_sclk_hdmi27m, [3] = &clk_sclk_usbphy0, [4] = &clk_sclk_usbphy1, [5] = &clk_sclk_hdmiphy, [6] = &clk_mout_mpll.clk, [7] = &clk_mout_epll.clk, [8] = &clk_sclk_vpll.clk, }; static struct clksrc_sources clkset_sclk_audio0 = { .sources = clkset_sclk_audio0_list, .nr_sources = ARRAY_SIZE(clkset_sclk_audio0_list), }; static struct clksrc_clk clk_sclk_audio0 = { .clk = { .name = "sclk_audio", .devname = "soc-audio.0", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 24), }, .sources = &clkset_sclk_audio0, .reg_src = { .reg = S5P_CLK_SRC6, .shift = 0, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV6, .shift = 0, .size = 4 }, }; static struct clk *clkset_sclk_audio1_list[] = { [0] = &clk_ext_xtal_mux, [1] = &clk_pcmcdclk1, [2] = &clk_sclk_hdmi27m, [3] = &clk_sclk_usbphy0, [4] = &clk_sclk_usbphy1, [5] = &clk_sclk_hdmiphy, [6] = &clk_mout_mpll.clk, [7] = &clk_mout_epll.clk, [8] = &clk_sclk_vpll.clk, }; static struct clksrc_sources clkset_sclk_audio1 = { .sources = clkset_sclk_audio1_list, .nr_sources = ARRAY_SIZE(clkset_sclk_audio1_list), }; static struct clksrc_clk clk_sclk_audio1 = { .clk = { .name = "sclk_audio", .devname = "soc-audio.1", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 25), }, .sources = &clkset_sclk_audio1, .reg_src = { .reg = S5P_CLK_SRC6, .shift = 4, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV6, .shift = 4, .size = 4 }, }; static struct clk *clkset_sclk_audio2_list[] = { [0] = &clk_ext_xtal_mux, [1] = &clk_pcmcdclk0, [2] = &clk_sclk_hdmi27m, [3] = &clk_sclk_usbphy0, [4] = &clk_sclk_usbphy1, [5] = &clk_sclk_hdmiphy, [6] = &clk_mout_mpll.clk, [7] = &clk_mout_epll.clk, [8] = &clk_sclk_vpll.clk, }; static struct clksrc_sources clkset_sclk_audio2 = { .sources = clkset_sclk_audio2_list, .nr_sources = ARRAY_SIZE(clkset_sclk_audio2_list), }; static struct clksrc_clk clk_sclk_audio2 = { .clk = { .name = "sclk_audio", .devname = "soc-audio.2", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 26), }, .sources = &clkset_sclk_audio2, .reg_src = { .reg = S5P_CLK_SRC6, .shift = 8, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV6, .shift = 8, .size = 4 }, }; static struct clk *clkset_sclk_spdif_list[] = { [0] = &clk_sclk_audio0.clk, [1] = &clk_sclk_audio1.clk, [2] = &clk_sclk_audio2.clk, }; static struct clksrc_sources clkset_sclk_spdif = { .sources = clkset_sclk_spdif_list, .nr_sources = ARRAY_SIZE(clkset_sclk_spdif_list), }; static struct clksrc_clk clk_sclk_spdif = { .clk = { .name = "sclk_spdif", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 27), .ops = &s5p_sclk_spdif_ops, }, .sources = &clkset_sclk_spdif, .reg_src = { .reg = S5P_CLK_SRC6, .shift = 12, .size = 2 }, }; static struct clk *clkset_group2_list[] = { [0] = &clk_ext_xtal_mux, [1] = &clk_xusbxti, [2] = &clk_sclk_hdmi27m, [3] = &clk_sclk_usbphy0, [4] = &clk_sclk_usbphy1, [5] = &clk_sclk_hdmiphy, [6] = &clk_mout_mpll.clk, [7] = &clk_mout_epll.clk, [8] = &clk_sclk_vpll.clk, }; static struct clksrc_sources clkset_group2 = { .sources = clkset_group2_list, .nr_sources = ARRAY_SIZE(clkset_group2_list), }; static struct clksrc_clk clksrcs[] = { { .clk = { .name = "sclk_dmc", }, .sources = &clkset_group1, .reg_src = { .reg = S5P_CLK_SRC6, .shift = 24, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV6, .shift = 28, .size = 4 }, }, { .clk = { .name = "sclk_onenand", }, .sources = &clkset_sclk_onenand, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 28, .size = 1 }, .reg_div = { .reg = S5P_CLK_DIV6, .shift = 12, .size = 3 }, }, { .clk = { .name = "sclk_fimc", .devname = "s5pv210-fimc.0", .enable = s5pv210_clk_mask1_ctrl, .ctrlbit = (1 << 2), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC3, .shift = 12, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 12, .size = 4 }, }, { .clk = { .name = "sclk_fimc", .devname = "s5pv210-fimc.1", .enable = s5pv210_clk_mask1_ctrl, .ctrlbit = (1 << 3), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC3, .shift = 16, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 16, .size = 4 }, }, { .clk = { .name = "sclk_fimc", .devname = "s5pv210-fimc.2", .enable = s5pv210_clk_mask1_ctrl, .ctrlbit = (1 << 4), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC3, .shift = 20, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 20, .size = 4 }, }, { .clk = { .name = "sclk_cam0", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 3), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 12, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 4 }, }, { .clk = { .name = "sclk_cam1", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 4), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 16, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 16, .size = 4 }, }, { .clk = { .name = "sclk_fimd", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 5), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 20, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 20, .size = 4 }, }, { .clk = { .name = "sclk_mfc", .devname = "s5p-mfc", .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 16), }, .sources = &clkset_group1, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 4, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV2, .shift = 4, .size = 4 }, }, { .clk = { .name = "sclk_g2d", .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 12), }, .sources = &clkset_group1, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 8, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV2, .shift = 8, .size = 4 }, }, { .clk = { .name = "sclk_g3d", .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 8), }, .sources = &clkset_group1, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 0, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV2, .shift = 0, .size = 4 }, }, { .clk = { .name = "sclk_csis", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 6), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 24, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 28, .size = 4 }, }, { .clk = { .name = "sclk_pwi", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 29), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC6, .shift = 20, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV6, .shift = 24, .size = 4 }, }, { .clk = { .name = "sclk_pwm", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 19), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC5, .shift = 12, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV5, .shift = 12, .size = 4 }, }, }; static struct clksrc_clk clk_sclk_uart0 = { .clk = { .name = "uclk1", .devname = "s5pv210-uart.0", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 12), }, .sources = &clkset_uart, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 16, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 16, .size = 4 }, }; static struct clksrc_clk clk_sclk_uart1 = { .clk = { .name = "uclk1", .devname = "s5pv210-uart.1", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 13), }, .sources = &clkset_uart, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 20, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 20, .size = 4 }, }; static struct clksrc_clk clk_sclk_uart2 = { .clk = { .name = "uclk1", .devname = "s5pv210-uart.2", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 14), }, .sources = &clkset_uart, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 24, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 24, .size = 4 }, }; static struct clksrc_clk clk_sclk_uart3 = { .clk = { .name = "uclk1", .devname = "s5pv210-uart.3", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 15), }, .sources = &clkset_uart, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 28, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 28, .size = 4 }, }; static struct clksrc_clk clk_sclk_mmc0 = { .clk = { .name = "sclk_mmc", .devname = "s3c-sdhci.0", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 8), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 0, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 0, .size = 4 }, }; static struct clksrc_clk clk_sclk_mmc1 = { .clk = { .name = "sclk_mmc", .devname = "s3c-sdhci.1", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 9), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 4, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 4, .size = 4 }, }; static struct clksrc_clk clk_sclk_mmc2 = { .clk = { .name = "sclk_mmc", .devname = "s3c-sdhci.2", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 10), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 8, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 8, .size = 4 }, }; static struct clksrc_clk clk_sclk_mmc3 = { .clk = { .name = "sclk_mmc", .devname = "s3c-sdhci.3", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 11), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC4, .shift = 12, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 12, .size = 4 }, }; static struct clksrc_clk clk_sclk_spi0 = { .clk = { .name = "sclk_spi", .devname = "s5pv210-spi.0", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 16), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC5, .shift = 0, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV5, .shift = 0, .size = 4 }, }; static struct clksrc_clk clk_sclk_spi1 = { .clk = { .name = "sclk_spi", .devname = "s5pv210-spi.1", .enable = s5pv210_clk_mask0_ctrl, .ctrlbit = (1 << 17), }, .sources = &clkset_group2, .reg_src = { .reg = S5P_CLK_SRC5, .shift = 4, .size = 4 }, .reg_div = { .reg = S5P_CLK_DIV5, .shift = 4, .size = 4 }, }; static struct clksrc_clk *clksrc_cdev[] = { &clk_sclk_uart0, &clk_sclk_uart1, &clk_sclk_uart2, &clk_sclk_uart3, &clk_sclk_mmc0, &clk_sclk_mmc1, &clk_sclk_mmc2, &clk_sclk_mmc3, &clk_sclk_spi0, &clk_sclk_spi1, }; static struct clk *clk_cdev[] = { &clk_hsmmc0, &clk_hsmmc1, &clk_hsmmc2, &clk_hsmmc3, &clk_pdma0, &clk_pdma1, }; /* Clock initialisation code */ static struct clksrc_clk *sysclks[] = { &clk_mout_apll, &clk_mout_epll, &clk_mout_mpll, &clk_armclk, &clk_hclk_msys, &clk_sclk_a2m, &clk_hclk_dsys, &clk_hclk_psys, &clk_pclk_msys, &clk_pclk_dsys, &clk_pclk_psys, &clk_vpllsrc, &clk_sclk_vpll, &clk_mout_dmc0, &clk_sclk_dmc0, &clk_sclk_audio0, &clk_sclk_audio1, &clk_sclk_audio2, &clk_sclk_spdif, }; static u32 epll_div[][6] = { { 48000000, 0, 48, 3, 3, 0 }, { 96000000, 0, 48, 3, 2, 0 }, { 144000000, 1, 72, 3, 2, 0 }, { 192000000, 0, 48, 3, 1, 0 }, { 288000000, 1, 72, 3, 1, 0 }, { 32750000, 1, 65, 3, 4, 35127 }, { 32768000, 1, 65, 3, 4, 35127 }, { 45158400, 0, 45, 3, 3, 10355 }, { 45000000, 0, 45, 3, 3, 10355 }, { 45158000, 0, 45, 3, 3, 10355 }, { 49125000, 0, 49, 3, 3, 9961 }, { 49152000, 0, 49, 3, 3, 9961 }, { 67737600, 1, 67, 3, 3, 48366 }, { 67738000, 1, 67, 3, 3, 48366 }, { 73800000, 1, 73, 3, 3, 47710 }, { 73728000, 1, 73, 3, 3, 47710 }, { 36000000, 1, 32, 3, 4, 0 }, { 60000000, 1, 60, 3, 3, 0 }, { 72000000, 1, 72, 3, 3, 0 }, { 80000000, 1, 80, 3, 3, 0 }, { 84000000, 0, 42, 3, 2, 0 }, { 50000000, 0, 50, 3, 3, 0 }, }; static int s5pv210_epll_set_rate(struct clk *clk, unsigned long rate) { unsigned int epll_con, epll_con_k; unsigned int i; /* Return if nothing changed */ if (clk->rate == rate) return 0; epll_con = __raw_readl(S5P_EPLL_CON); epll_con_k = __raw_readl(S5P_EPLL_CON1); epll_con_k &= ~PLL46XX_KDIV_MASK; epll_con &= ~(1 << 27 | PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT | PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT | PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT); for (i = 0; i < ARRAY_SIZE(epll_div); i++) { if (epll_div[i][0] == rate) { epll_con_k |= epll_div[i][5] << 0; epll_con |= (epll_div[i][1] << 27 | epll_div[i][2] << PLL46XX_MDIV_SHIFT | epll_div[i][3] << PLL46XX_PDIV_SHIFT | epll_div[i][4] << PLL46XX_SDIV_SHIFT); break; } } if (i == ARRAY_SIZE(epll_div)) { printk(KERN_ERR "%s: Invalid Clock EPLL Frequency\n", __func__); return -EINVAL; } __raw_writel(epll_con, S5P_EPLL_CON); __raw_writel(epll_con_k, S5P_EPLL_CON1); printk(KERN_WARNING "EPLL Rate changes from %lu to %lu\n", clk->rate, rate); clk->rate = rate; return 0; } static struct clk_ops s5pv210_epll_ops = { .set_rate = s5pv210_epll_set_rate, .get_rate = s5p_epll_get_rate, }; static u32 vpll_div[][5] = { { 54000000, 3, 53, 3, 0 }, { 108000000, 3, 53, 2, 0 }, }; static unsigned long s5pv210_vpll_get_rate(struct clk *clk) { return clk->rate; } static int s5pv210_vpll_set_rate(struct clk *clk, unsigned long rate) { unsigned int vpll_con; unsigned int i; /* Return if nothing changed */ if (clk->rate == rate) return 0; vpll_con = __raw_readl(S5P_VPLL_CON); vpll_con &= ~(0x1 << 27 | \ PLL90XX_MDIV_MASK << PLL90XX_MDIV_SHIFT | \ PLL90XX_PDIV_MASK << PLL90XX_PDIV_SHIFT | \ PLL90XX_SDIV_MASK << PLL90XX_SDIV_SHIFT); for (i = 0; i < ARRAY_SIZE(vpll_div); i++) { if (vpll_div[i][0] == rate) { vpll_con |= vpll_div[i][1] << PLL90XX_PDIV_SHIFT; vpll_con |= vpll_div[i][2] << PLL90XX_MDIV_SHIFT; vpll_con |= vpll_div[i][3] << PLL90XX_SDIV_SHIFT; vpll_con |= vpll_div[i][4] << 27; break; } } if (i == ARRAY_SIZE(vpll_div)) { printk(KERN_ERR "%s: Invalid Clock VPLL Frequency\n", __func__); return -EINVAL; } __raw_writel(vpll_con, S5P_VPLL_CON); /* Wait for VPLL lock */ while (!(__raw_readl(S5P_VPLL_CON) & (1 << PLL90XX_LOCKED_SHIFT))) continue; clk->rate = rate; return 0; } static struct clk_ops s5pv210_vpll_ops = { .get_rate = s5pv210_vpll_get_rate, .set_rate = s5pv210_vpll_set_rate, }; void __init_or_cpufreq s5pv210_setup_clocks(void) { struct clk *xtal_clk; unsigned long vpllsrc; unsigned long armclk; unsigned long hclk_msys; unsigned long hclk_dsys; unsigned long hclk_psys; unsigned long pclk_msys; unsigned long pclk_dsys; unsigned long pclk_psys; unsigned long apll; unsigned long mpll; unsigned long epll; unsigned long vpll; unsigned int ptr; u32 clkdiv0, clkdiv1; /* Set functions for clk_fout_epll */ clk_fout_epll.enable = s5p_epll_enable; clk_fout_epll.ops = &s5pv210_epll_ops; printk(KERN_DEBUG "%s: registering clocks\n", __func__); clkdiv0 = __raw_readl(S5P_CLK_DIV0); clkdiv1 = __raw_readl(S5P_CLK_DIV1); printk(KERN_DEBUG "%s: clkdiv0 = %08x, clkdiv1 = %08x\n", __func__, clkdiv0, clkdiv1); xtal_clk = clk_get(NULL, "xtal"); BUG_ON(IS_ERR(xtal_clk)); xtal = clk_get_rate(xtal_clk); clk_put(xtal_clk); printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal); apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON), pll_4508); mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON), pll_4502); epll = s5p_get_pll46xx(xtal, __raw_readl(S5P_EPLL_CON), __raw_readl(S5P_EPLL_CON1), pll_4600); vpllsrc = clk_get_rate(&clk_vpllsrc.clk); vpll = s5p_get_pll45xx(vpllsrc, __raw_readl(S5P_VPLL_CON), pll_4502); clk_fout_apll.ops = &clk_fout_apll_ops; clk_fout_mpll.rate = mpll; clk_fout_epll.rate = epll; clk_fout_vpll.ops = &s5pv210_vpll_ops; clk_fout_vpll.rate = vpll; printk(KERN_INFO "S5PV210: PLL settings, A=%ld, M=%ld, E=%ld V=%ld", apll, mpll, epll, vpll); armclk = clk_get_rate(&clk_armclk.clk); hclk_msys = clk_get_rate(&clk_hclk_msys.clk); hclk_dsys = clk_get_rate(&clk_hclk_dsys.clk); hclk_psys = clk_get_rate(&clk_hclk_psys.clk); pclk_msys = clk_get_rate(&clk_pclk_msys.clk); pclk_dsys = clk_get_rate(&clk_pclk_dsys.clk); pclk_psys = clk_get_rate(&clk_pclk_psys.clk); printk(KERN_INFO "S5PV210: ARMCLK=%ld, HCLKM=%ld, HCLKD=%ld\n" "HCLKP=%ld, PCLKM=%ld, PCLKD=%ld, PCLKP=%ld\n", armclk, hclk_msys, hclk_dsys, hclk_psys, pclk_msys, pclk_dsys, pclk_psys); clk_f.rate = armclk; clk_h.rate = hclk_psys; clk_p.rate = pclk_psys; for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++) s3c_set_clksrc(&clksrcs[ptr], true); } static struct clk *clks[] __initdata = { &clk_sclk_hdmi27m, &clk_sclk_hdmiphy, &clk_sclk_usbphy0, &clk_sclk_usbphy1, &clk_pcmcdclk0, &clk_pcmcdclk1, &clk_pcmcdclk2, }; static struct clk_lookup s5pv210_clk_lookup[] = { CLKDEV_INIT(NULL, "clk_uart_baud0", &clk_p), CLKDEV_INIT("s5pv210-uart.0", "clk_uart_baud1", &clk_sclk_uart0.clk), CLKDEV_INIT("s5pv210-uart.1", "clk_uart_baud1", &clk_sclk_uart1.clk), CLKDEV_INIT("s5pv210-uart.2", "clk_uart_baud1", &clk_sclk_uart2.clk), CLKDEV_INIT("s5pv210-uart.3", "clk_uart_baud1", &clk_sclk_uart3.clk), CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &clk_hsmmc0), CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &clk_hsmmc1), CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.0", &clk_hsmmc2), CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.0", &clk_hsmmc3), CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk), CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk), CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk), CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &clk_sclk_mmc3.clk), CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0), CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1), }; void __init s5pv210_register_clocks(void) { int ptr; s3c24xx_register_clocks(clks, ARRAY_SIZE(clks)); for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++) s3c_register_clksrc(sysclks[ptr], 1); for (ptr = 0; ptr < ARRAY_SIZE(sclk_tv); ptr++) s3c_register_clksrc(sclk_tv[ptr], 1); for (ptr = 0; ptr < ARRAY_SIZE(clksrc_cdev); ptr++) s3c_register_clksrc(clksrc_cdev[ptr], 1); s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs)); s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); clkdev_add_table(s5pv210_clk_lookup, ARRAY_SIZE(s5pv210_clk_lookup)); s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev)); for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++) s3c_disable_clocks(clk_cdev[ptr], 1); s3c_pwmclk_init(); }
gpl-2.0
lanniaoershi/android_kernel_oneplus_msm8994
fs/afs/flock.c
2161
16137
/* AFS file locking support * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include "internal.h" #define AFS_LOCK_GRANTED 0 #define AFS_LOCK_PENDING 1 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl); static void afs_fl_release_private(struct file_lock *fl); static struct workqueue_struct *afs_lock_manager; static DEFINE_MUTEX(afs_lock_manager_mutex); static const struct file_lock_operations afs_lock_ops = { .fl_copy_lock = afs_fl_copy_lock, .fl_release_private = afs_fl_release_private, }; /* * initialise the lock manager thread if it isn't already running */ static int afs_init_lock_manager(void) { int ret; ret = 0; if (!afs_lock_manager) { mutex_lock(&afs_lock_manager_mutex); if (!afs_lock_manager) { afs_lock_manager = create_singlethread_workqueue("kafs_lockd"); if (!afs_lock_manager) ret = -ENOMEM; } mutex_unlock(&afs_lock_manager_mutex); } return ret; } /* * destroy the lock manager thread if it's running */ void __exit afs_kill_lock_manager(void) { if (afs_lock_manager) destroy_workqueue(afs_lock_manager); } /* * if the callback is broken on this vnode, then the lock may now be available */ void afs_lock_may_be_available(struct afs_vnode *vnode) { _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0); } /* * the lock will time out in 5 minutes unless we extend it, so schedule * extension in a bit less than that time */ static void afs_schedule_lock_extension(struct afs_vnode *vnode) { queue_delayed_work(afs_lock_manager, &vnode->lock_work, AFS_LOCKWAIT * HZ / 2); } /* * grant one or more locks (readlocks are allowed to jump the queue if the * first lock in the queue is itself a readlock) * - the caller must hold the vnode lock */ static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl) { struct file_lock *p, *_p; list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); if (fl->fl_type == F_RDLCK) { list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) { if (p->fl_type == F_RDLCK) { p->fl_u.afs.state = AFS_LOCK_GRANTED; list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks); wake_up(&p->fl_wait); } } } } /* * do work for a lock, including: * - probing for a lock we're waiting on but didn't get immediately * - extending a lock that's close to timing out */ void afs_lock_work(struct work_struct *work) { struct afs_vnode *vnode = container_of(work, struct afs_vnode, lock_work.work); struct file_lock *fl; afs_lock_type_t type; struct key *key; int ret; _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); spin_lock(&vnode->lock); if (test_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) { _debug("unlock"); spin_unlock(&vnode->lock); /* attempt to release the server lock; if it fails, we just * wait 5 minutes and it'll time out anyway */ ret = afs_vnode_release_lock(vnode, vnode->unlock_key); if (ret < 0) printk(KERN_WARNING "AFS:" " Failed to release lock on {%x:%x} error %d\n", vnode->fid.vid, vnode->fid.vnode, ret); spin_lock(&vnode->lock); key_put(vnode->unlock_key); vnode->unlock_key = NULL; clear_bit(AFS_VNODE_UNLOCKING, &vnode->flags); } /* if we've got a lock, then it must be time to extend that lock as AFS * locks time out after 5 minutes */ if (!list_empty(&vnode->granted_locks)) { _debug("extend"); if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags)) BUG(); fl = list_entry(vnode->granted_locks.next, struct file_lock, fl_u.afs.link); key = key_get(fl->fl_file->private_data); spin_unlock(&vnode->lock); ret = afs_vnode_extend_lock(vnode, key); clear_bit(AFS_VNODE_LOCKING, &vnode->flags); key_put(key); switch (ret) { case 0: afs_schedule_lock_extension(vnode); break; default: /* ummm... we failed to extend the lock - retry * extension shortly */ printk(KERN_WARNING "AFS:" " Failed to extend lock on {%x:%x} error %d\n", vnode->fid.vid, vnode->fid.vnode, ret); queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 10); break; } _leave(" [extend]"); return; } /* if we don't have a granted lock, then we must've been called back by * the server, and so if might be possible to get a lock we're * currently waiting for */ if (!list_empty(&vnode->pending_locks)) { _debug("get"); if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags)) BUG(); fl = list_entry(vnode->pending_locks.next, struct file_lock, fl_u.afs.link); key = key_get(fl->fl_file->private_data); type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; spin_unlock(&vnode->lock); ret = afs_vnode_set_lock(vnode, key, type); clear_bit(AFS_VNODE_LOCKING, &vnode->flags); switch (ret) { case -EWOULDBLOCK: _debug("blocked"); break; case 0: _debug("acquired"); if (type == AFS_LOCK_READ) set_bit(AFS_VNODE_READLOCKED, &vnode->flags); else set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); ret = AFS_LOCK_GRANTED; default: spin_lock(&vnode->lock); /* the pending lock may have been withdrawn due to a * signal */ if (list_entry(vnode->pending_locks.next, struct file_lock, fl_u.afs.link) == fl) { fl->fl_u.afs.state = ret; if (ret == AFS_LOCK_GRANTED) afs_grant_locks(vnode, fl); else list_del_init(&fl->fl_u.afs.link); wake_up(&fl->fl_wait); spin_unlock(&vnode->lock); } else { _debug("withdrawn"); clear_bit(AFS_VNODE_READLOCKED, &vnode->flags); clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); spin_unlock(&vnode->lock); afs_vnode_release_lock(vnode, key); if (!list_empty(&vnode->pending_locks)) afs_lock_may_be_available(vnode); } break; } key_put(key); _leave(" [pend]"); return; } /* looks like the lock request was withdrawn on a signal */ spin_unlock(&vnode->lock); _leave(" [no locks]"); } /* * pass responsibility for the unlocking of a vnode on the server to the * manager thread, lest a pending signal in the calling thread interrupt * AF_RXRPC * - the caller must hold the vnode lock */ static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key) { cancel_delayed_work(&vnode->lock_work); if (!test_and_clear_bit(AFS_VNODE_READLOCKED, &vnode->flags) && !test_and_clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags)) BUG(); if (test_and_set_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) BUG(); vnode->unlock_key = key_get(key); afs_lock_may_be_available(vnode); } /* * request a lock on a file on the server */ static int afs_do_setlk(struct file *file, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); afs_lock_type_t type; struct key *key = file->private_data; int ret; _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); /* only whole-file locks are supported */ if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX) return -EINVAL; ret = afs_init_lock_manager(); if (ret < 0) return ret; fl->fl_ops = &afs_lock_ops; INIT_LIST_HEAD(&fl->fl_u.afs.link); fl->fl_u.afs.state = AFS_LOCK_PENDING; type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; lock_flocks(); /* make sure we've got a callback on this file and that our view of the * data version is up to date */ ret = afs_vnode_fetch_status(vnode, NULL, key); if (ret < 0) goto error; if (vnode->status.lock_count != 0 && !(fl->fl_flags & FL_SLEEP)) { ret = -EAGAIN; goto error; } spin_lock(&vnode->lock); /* if we've already got a readlock on the server then we can instantly * grant another readlock, irrespective of whether there are any * pending writelocks */ if (type == AFS_LOCK_READ && vnode->flags & (1 << AFS_VNODE_READLOCKED)) { _debug("instant readlock"); ASSERTCMP(vnode->flags & ((1 << AFS_VNODE_LOCKING) | (1 << AFS_VNODE_WRITELOCKED)), ==, 0); ASSERT(!list_empty(&vnode->granted_locks)); goto sharing_existing_lock; } /* if there's no-one else with a lock on this vnode, then we need to * ask the server for a lock */ if (list_empty(&vnode->pending_locks) && list_empty(&vnode->granted_locks)) { _debug("not locked"); ASSERTCMP(vnode->flags & ((1 << AFS_VNODE_LOCKING) | (1 << AFS_VNODE_READLOCKED) | (1 << AFS_VNODE_WRITELOCKED)), ==, 0); list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); set_bit(AFS_VNODE_LOCKING, &vnode->flags); spin_unlock(&vnode->lock); ret = afs_vnode_set_lock(vnode, key, type); clear_bit(AFS_VNODE_LOCKING, &vnode->flags); switch (ret) { case 0: _debug("acquired"); goto acquired_server_lock; case -EWOULDBLOCK: _debug("would block"); spin_lock(&vnode->lock); ASSERT(list_empty(&vnode->granted_locks)); ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link); goto wait; default: spin_lock(&vnode->lock); list_del_init(&fl->fl_u.afs.link); spin_unlock(&vnode->lock); goto error; } } /* otherwise, we need to wait for a local lock to become available */ _debug("wait local"); list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); wait: if (!(fl->fl_flags & FL_SLEEP)) { _debug("noblock"); ret = -EAGAIN; goto abort_attempt; } spin_unlock(&vnode->lock); /* now we need to sleep and wait for the lock manager thread to get the * lock from the server */ _debug("sleep"); ret = wait_event_interruptible(fl->fl_wait, fl->fl_u.afs.state <= AFS_LOCK_GRANTED); if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) { ret = fl->fl_u.afs.state; if (ret < 0) goto error; spin_lock(&vnode->lock); goto given_lock; } /* we were interrupted, but someone may still be in the throes of * giving us the lock */ _debug("intr"); ASSERTCMP(ret, ==, -ERESTARTSYS); spin_lock(&vnode->lock); if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) { ret = fl->fl_u.afs.state; if (ret < 0) { spin_unlock(&vnode->lock); goto error; } goto given_lock; } abort_attempt: /* we aren't going to get the lock, either because we're unwilling to * wait, or because some signal happened */ _debug("abort"); if (list_empty(&vnode->granted_locks) && vnode->pending_locks.next == &fl->fl_u.afs.link) { if (vnode->pending_locks.prev != &fl->fl_u.afs.link) { /* kick the next pending lock into having a go */ list_del_init(&fl->fl_u.afs.link); afs_lock_may_be_available(vnode); } } else { list_del_init(&fl->fl_u.afs.link); } spin_unlock(&vnode->lock); goto error; acquired_server_lock: /* we've acquired a server lock, but it needs to be renewed after 5 * mins */ spin_lock(&vnode->lock); afs_schedule_lock_extension(vnode); if (type == AFS_LOCK_READ) set_bit(AFS_VNODE_READLOCKED, &vnode->flags); else set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); sharing_existing_lock: /* the lock has been granted as far as we're concerned... */ fl->fl_u.afs.state = AFS_LOCK_GRANTED; list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); given_lock: /* ... but we do still need to get the VFS's blessing */ ASSERT(!(vnode->flags & (1 << AFS_VNODE_LOCKING))); ASSERT((vnode->flags & ((1 << AFS_VNODE_READLOCKED) | (1 << AFS_VNODE_WRITELOCKED))) != 0); ret = posix_lock_file(file, fl, NULL); if (ret < 0) goto vfs_rejected_lock; spin_unlock(&vnode->lock); /* again, make sure we've got a callback on this file and, again, make * sure that our view of the data version is up to date (we ignore * errors incurred here and deal with the consequences elsewhere) */ afs_vnode_fetch_status(vnode, NULL, key); error: unlock_flocks(); _leave(" = %d", ret); return ret; vfs_rejected_lock: /* the VFS rejected the lock we just obtained, so we have to discard * what we just got */ _debug("vfs refused %d", ret); list_del_init(&fl->fl_u.afs.link); if (list_empty(&vnode->granted_locks)) afs_defer_unlock(vnode, key); goto abort_attempt; } /* * unlock on a file on the server */ static int afs_do_unlk(struct file *file, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); struct key *key = file->private_data; int ret; _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); /* only whole-file unlocks are supported */ if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX) return -EINVAL; fl->fl_ops = &afs_lock_ops; INIT_LIST_HEAD(&fl->fl_u.afs.link); fl->fl_u.afs.state = AFS_LOCK_PENDING; spin_lock(&vnode->lock); ret = posix_lock_file(file, fl, NULL); if (ret < 0) { spin_unlock(&vnode->lock); _leave(" = %d [vfs]", ret); return ret; } /* discard the server lock only if all granted locks are gone */ if (list_empty(&vnode->granted_locks)) afs_defer_unlock(vnode, key); spin_unlock(&vnode->lock); _leave(" = 0"); return 0; } /* * return information about a lock we currently hold, if indeed we hold one */ static int afs_do_getlk(struct file *file, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); struct key *key = file->private_data; int ret, lock_count; _enter(""); fl->fl_type = F_UNLCK; mutex_lock(&vnode->vfs_inode.i_mutex); /* check local lock records first */ ret = 0; posix_test_lock(file, fl); if (fl->fl_type == F_UNLCK) { /* no local locks; consult the server */ ret = afs_vnode_fetch_status(vnode, NULL, key); if (ret < 0) goto error; lock_count = vnode->status.lock_count; if (lock_count) { if (lock_count > 0) fl->fl_type = F_RDLCK; else fl->fl_type = F_WRLCK; fl->fl_start = 0; fl->fl_end = OFFSET_MAX; } } error: mutex_unlock(&vnode->vfs_inode.i_mutex); _leave(" = %d [%hd]", ret, fl->fl_type); return ret; } /* * manage POSIX locks on a file */ int afs_lock(struct file *file, int cmd, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); _enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}", vnode->fid.vid, vnode->fid.vnode, cmd, fl->fl_type, fl->fl_flags, (long long) fl->fl_start, (long long) fl->fl_end); /* AFS doesn't support mandatory locks */ if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK) return -ENOLCK; if (IS_GETLK(cmd)) return afs_do_getlk(file, fl); if (fl->fl_type == F_UNLCK) return afs_do_unlk(file, fl); return afs_do_setlk(file, fl); } /* * manage FLOCK locks on a file */ int afs_flock(struct file *file, int cmd, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); _enter("{%x:%u},%d,{t=%x,fl=%x}", vnode->fid.vid, vnode->fid.vnode, cmd, fl->fl_type, fl->fl_flags); /* * No BSD flocks over NFS allowed. * Note: we could try to fake a POSIX lock request here by * using ((u32) filp | 0x80000000) or some such as the pid. * Not sure whether that would be unique, though, or whether * that would break in other places. */ if (!(fl->fl_flags & FL_FLOCK)) return -ENOLCK; /* we're simulating flock() locks using posix locks on the server */ fl->fl_owner = (fl_owner_t) file; fl->fl_start = 0; fl->fl_end = OFFSET_MAX; if (fl->fl_type == F_UNLCK) return afs_do_unlk(file, fl); return afs_do_setlk(file, fl); } /* * the POSIX lock management core VFS code copies the lock record and adds the * copy into its own list, so we need to add that copy to the vnode's lock * queue in the same place as the original (which will be deleted shortly * after) */ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl) { _enter(""); list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link); } /* * need to remove this lock from the vnode queue when it's removed from the * VFS's list */ static void afs_fl_release_private(struct file_lock *fl) { _enter(""); list_del_init(&fl->fl_u.afs.link); }
gpl-2.0
leilihh/linux
drivers/hwmon/adm9240.c
2673
23911
/* * adm9240.c Part of lm_sensors, Linux kernel modules for hardware * monitoring * * Copyright (C) 1999 Frodo Looijaard <frodol@dds.nl> * Philip Edelbrock <phil@netroedge.com> * Copyright (C) 2003 Michiel Rook <michiel@grendelproject.nl> * Copyright (C) 2005 Grant Coady <gcoady.lk@gmail.com> with valuable * guidance from Jean Delvare * * Driver supports Analog Devices ADM9240 * Dallas Semiconductor DS1780 * National Semiconductor LM81 * * ADM9240 is the reference, DS1780 and LM81 are register compatibles * * Voltage Six inputs are scaled by chip, VID also reported * Temperature Chip temperature to 0.5'C, maximum and max_hysteris * Fans 2 fans, low speed alarm, automatic fan clock divider * Alarms 16-bit map of active alarms * Analog Out 0..1250 mV output * * Chassis Intrusion: clear CI latch with 'echo 0 > intrusion0_alarm' * * Test hardware: Intel SE440BX-2 desktop motherboard --Grant * * LM81 extended temp reading not implemented * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/jiffies.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; enum chips { adm9240, ds1780, lm81 }; /* ADM9240 registers */ #define ADM9240_REG_MAN_ID 0x3e #define ADM9240_REG_DIE_REV 0x3f #define ADM9240_REG_CONFIG 0x40 #define ADM9240_REG_IN(nr) (0x20 + (nr)) /* 0..5 */ #define ADM9240_REG_IN_MAX(nr) (0x2b + (nr) * 2) #define ADM9240_REG_IN_MIN(nr) (0x2c + (nr) * 2) #define ADM9240_REG_FAN(nr) (0x28 + (nr)) /* 0..1 */ #define ADM9240_REG_FAN_MIN(nr) (0x3b + (nr)) #define ADM9240_REG_INT(nr) (0x41 + (nr)) #define ADM9240_REG_INT_MASK(nr) (0x43 + (nr)) #define ADM9240_REG_TEMP 0x27 #define ADM9240_REG_TEMP_MAX(nr) (0x39 + (nr)) /* 0, 1 = high, hyst */ #define ADM9240_REG_ANALOG_OUT 0x19 #define ADM9240_REG_CHASSIS_CLEAR 0x46 #define ADM9240_REG_VID_FAN_DIV 0x47 #define ADM9240_REG_I2C_ADDR 0x48 #define ADM9240_REG_VID4 0x49 #define ADM9240_REG_TEMP_CONF 0x4b /* generalised scaling with integer rounding */ static inline int SCALE(long val, int mul, int div) { if (val < 0) return (val * mul - div / 2) / div; else return (val * mul + div / 2) / div; } /* adm9240 internally scales voltage measurements */ static const u16 nom_mv[] = { 2500, 2700, 3300, 5000, 12000, 2700 }; static inline unsigned int IN_FROM_REG(u8 reg, int n) { return SCALE(reg, nom_mv[n], 192); } static inline u8 IN_TO_REG(unsigned long val, int n) { return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255); } /* temperature range: -40..125, 127 disables temperature alarm */ static inline s8 TEMP_TO_REG(long val) { return clamp_val(SCALE(val, 1, 1000), -40, 127); } /* two fans, each with low fan speed limit */ static inline unsigned int FAN_FROM_REG(u8 reg, u8 div) { if (!reg) /* error */ return -1; if (reg == 255) return 0; return SCALE(1350000, 1, reg * div); } /* analog out 0..1250mV */ static inline u8 AOUT_TO_REG(unsigned long val) { return clamp_val(SCALE(val, 255, 1250), 0, 255); } static inline unsigned int AOUT_FROM_REG(u8 reg) { return SCALE(reg, 1250, 255); } static int adm9240_probe(struct i2c_client *client, const struct i2c_device_id *id); static int adm9240_detect(struct i2c_client *client, struct i2c_board_info *info); static void adm9240_init_client(struct i2c_client *client); static int adm9240_remove(struct i2c_client *client); static struct adm9240_data *adm9240_update_device(struct device *dev); /* driver data */ static const struct i2c_device_id adm9240_id[] = { { "adm9240", adm9240 }, { "ds1780", ds1780 }, { "lm81", lm81 }, { } }; MODULE_DEVICE_TABLE(i2c, adm9240_id); static struct i2c_driver adm9240_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "adm9240", }, .probe = adm9240_probe, .remove = adm9240_remove, .id_table = adm9240_id, .detect = adm9240_detect, .address_list = normal_i2c, }; /* per client data */ struct adm9240_data { struct device *hwmon_dev; struct mutex update_lock; char valid; unsigned long last_updated_measure; unsigned long last_updated_config; u8 in[6]; /* ro in0_input */ u8 in_max[6]; /* rw in0_max */ u8 in_min[6]; /* rw in0_min */ u8 fan[2]; /* ro fan1_input */ u8 fan_min[2]; /* rw fan1_min */ u8 fan_div[2]; /* rw fan1_div, read-only accessor */ s16 temp; /* ro temp1_input, 9-bit sign-extended */ s8 temp_max[2]; /* rw 0 -> temp_max, 1 -> temp_max_hyst */ u16 alarms; /* ro alarms */ u8 aout; /* rw aout_output */ u8 vid; /* ro vid */ u8 vrm; /* -- vrm set on startup, no accessor */ }; /*** sysfs accessors ***/ /* temperature */ static ssize_t show_temp(struct device *dev, struct device_attribute *dummy, char *buf) { struct adm9240_data *data = adm9240_update_device(dev); return sprintf(buf, "%d\n", data->temp * 500); /* 9-bit value */ } static ssize_t show_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adm9240_data *data = adm9240_update_device(dev); return sprintf(buf, "%d\n", data->temp_max[attr->index] * 1000); } static ssize_t set_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adm9240_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_max[attr->index] = TEMP_TO_REG(val); i2c_smbus_write_byte_data(client, ADM9240_REG_TEMP_MAX(attr->index), data->temp_max[attr->index]); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL); static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_max, set_max, 0); static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, show_max, set_max, 1); /* voltage */ static ssize_t show_in(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adm9240_data *data = adm9240_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in[attr->index], attr->index)); } static ssize_t show_in_min(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adm9240_data *data = adm9240_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[attr->index], attr->index)); } static ssize_t show_in_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adm9240_data *data = adm9240_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[attr->index], attr->index)); } static ssize_t set_in_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adm9240_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[attr->index] = IN_TO_REG(val, attr->index); i2c_smbus_write_byte_data(client, ADM9240_REG_IN_MIN(attr->index), data->in_min[attr->index]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adm9240_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[attr->index] = IN_TO_REG(val, attr->index); i2c_smbus_write_byte_data(client, ADM9240_REG_IN_MAX(attr->index), data->in_max[attr->index]); mutex_unlock(&data->update_lock); return count; } #define vin(nr) \ static SENSOR_DEVICE_ATTR(in##nr##_input, S_IRUGO, \ show_in, NULL, nr); \ static SENSOR_DEVICE_ATTR(in##nr##_min, S_IRUGO | S_IWUSR, \ show_in_min, set_in_min, nr); \ static SENSOR_DEVICE_ATTR(in##nr##_max, S_IRUGO | S_IWUSR, \ show_in_max, set_in_max, nr); vin(0); vin(1); vin(2); vin(3); vin(4); vin(5); /* fans */ static ssize_t show_fan(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adm9240_data *data = adm9240_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index], 1 << data->fan_div[attr->index])); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adm9240_data *data = adm9240_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[attr->index], 1 << data->fan_div[attr->index])); } static ssize_t show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct adm9240_data *data = adm9240_update_device(dev); return sprintf(buf, "%d\n", 1 << data->fan_div[attr->index]); } /* write new fan div, callers must hold data->update_lock */ static void adm9240_write_fan_div(struct i2c_client *client, int nr, u8 fan_div) { u8 reg, old, shift = (nr + 2) * 2; reg = i2c_smbus_read_byte_data(client, ADM9240_REG_VID_FAN_DIV); old = (reg >> shift) & 3; reg &= ~(3 << shift); reg |= (fan_div << shift); i2c_smbus_write_byte_data(client, ADM9240_REG_VID_FAN_DIV, reg); dev_dbg(&client->dev, "fan%d clock divider changed from %u to %u\n", nr + 1, 1 << old, 1 << fan_div); } /* * set fan speed low limit: * * - value is zero: disable fan speed low limit alarm * * - value is below fan speed measurement range: enable fan speed low * limit alarm to be asserted while fan speed too slow to measure * * - otherwise: select fan clock divider to suit fan speed low limit, * measurement code may adjust registers to ensure fan speed reading */ static ssize_t set_fan_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct adm9240_data *data = i2c_get_clientdata(client); int nr = attr->index; u8 new_div; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if (!val) { data->fan_min[nr] = 255; new_div = data->fan_div[nr]; dev_dbg(&client->dev, "fan%u low limit set disabled\n", nr + 1); } else if (val < 1350000 / (8 * 254)) { new_div = 3; data->fan_min[nr] = 254; dev_dbg(&client->dev, "fan%u low limit set minimum %u\n", nr + 1, FAN_FROM_REG(254, 1 << new_div)); } else { unsigned int new_min = 1350000 / val; new_div = 0; while (new_min > 192 && new_div < 3) { new_div++; new_min /= 2; } if (!new_min) /* keep > 0 */ new_min++; data->fan_min[nr] = new_min; dev_dbg(&client->dev, "fan%u low limit set fan speed %u\n", nr + 1, FAN_FROM_REG(new_min, 1 << new_div)); } if (new_div != data->fan_div[nr]) { data->fan_div[nr] = new_div; adm9240_write_fan_div(client, nr, new_div); } i2c_smbus_write_byte_data(client, ADM9240_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } #define fan(nr) \ static SENSOR_DEVICE_ATTR(fan##nr##_input, S_IRUGO, \ show_fan, NULL, nr - 1); \ static SENSOR_DEVICE_ATTR(fan##nr##_div, S_IRUGO, \ show_fan_div, NULL, nr - 1); \ static SENSOR_DEVICE_ATTR(fan##nr##_min, S_IRUGO | S_IWUSR, \ show_fan_min, set_fan_min, nr - 1); fan(1); fan(2); /* alarms */ static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf) { struct adm9240_data *data = adm9240_update_device(dev); return sprintf(buf, "%u\n", data->alarms); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct adm9240_data *data = adm9240_update_device(dev); return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7); /* vid */ static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char *buf) { struct adm9240_data *data = adm9240_update_device(dev); return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); } static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL); /* analog output */ static ssize_t show_aout(struct device *dev, struct device_attribute *attr, char *buf) { struct adm9240_data *data = adm9240_update_device(dev); return sprintf(buf, "%d\n", AOUT_FROM_REG(data->aout)); } static ssize_t set_aout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adm9240_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->aout = AOUT_TO_REG(val); i2c_smbus_write_byte_data(client, ADM9240_REG_ANALOG_OUT, data->aout); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(aout_output, S_IRUGO | S_IWUSR, show_aout, set_aout); static ssize_t chassis_clear(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adm9240_data *data = i2c_get_clientdata(client); unsigned long val; if (kstrtoul(buf, 10, &val) || val != 0) return -EINVAL; mutex_lock(&data->update_lock); i2c_smbus_write_byte_data(client, ADM9240_REG_CHASSIS_CLEAR, 0x80); data->valid = 0; /* Force cache refresh */ mutex_unlock(&data->update_lock); dev_dbg(&client->dev, "chassis intrusion latch cleared\n"); return count; } static SENSOR_DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR, show_alarm, chassis_clear, 12); static struct attribute *adm9240_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, &dev_attr_temp1_input.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan2_div.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &dev_attr_alarms.attr, &dev_attr_aout_output.attr, &sensor_dev_attr_intrusion0_alarm.dev_attr.attr, &dev_attr_cpu0_vid.attr, NULL }; static const struct attribute_group adm9240_group = { .attrs = adm9240_attributes, }; /*** sensor chip detect and driver install ***/ /* Return 0 if detection is successful, -ENODEV otherwise */ static int adm9240_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; const char *name = ""; int address = new_client->addr; u8 man_id, die_rev; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* verify chip: reg address should match i2c address */ if (i2c_smbus_read_byte_data(new_client, ADM9240_REG_I2C_ADDR) != address) { dev_err(&adapter->dev, "detect fail: address match, 0x%02x\n", address); return -ENODEV; } /* check known chip manufacturer */ man_id = i2c_smbus_read_byte_data(new_client, ADM9240_REG_MAN_ID); if (man_id == 0x23) { name = "adm9240"; } else if (man_id == 0xda) { name = "ds1780"; } else if (man_id == 0x01) { name = "lm81"; } else { dev_err(&adapter->dev, "detect fail: unknown manuf, 0x%02x\n", man_id); return -ENODEV; } /* successful detect, print chip info */ die_rev = i2c_smbus_read_byte_data(new_client, ADM9240_REG_DIE_REV); dev_info(&adapter->dev, "found %s revision %u\n", man_id == 0x23 ? "ADM9240" : man_id == 0xda ? "DS1780" : "LM81", die_rev); strlcpy(info->type, name, I2C_NAME_SIZE); return 0; } static int adm9240_probe(struct i2c_client *new_client, const struct i2c_device_id *id) { struct adm9240_data *data; int err; data = devm_kzalloc(&new_client->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(new_client, data); mutex_init(&data->update_lock); adm9240_init_client(new_client); /* populate sysfs filesystem */ err = sysfs_create_group(&new_client->dev.kobj, &adm9240_group); if (err) return err; data->hwmon_dev = hwmon_device_register(&new_client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove; } return 0; exit_remove: sysfs_remove_group(&new_client->dev.kobj, &adm9240_group); return err; } static int adm9240_remove(struct i2c_client *client) { struct adm9240_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &adm9240_group); return 0; } static void adm9240_init_client(struct i2c_client *client) { struct adm9240_data *data = i2c_get_clientdata(client); u8 conf = i2c_smbus_read_byte_data(client, ADM9240_REG_CONFIG); u8 mode = i2c_smbus_read_byte_data(client, ADM9240_REG_TEMP_CONF) & 3; data->vrm = vid_which_vrm(); /* need this to report vid as mV */ dev_info(&client->dev, "Using VRM: %d.%d\n", data->vrm / 10, data->vrm % 10); if (conf & 1) { /* measurement cycle running: report state */ dev_info(&client->dev, "status: config 0x%02x mode %u\n", conf, mode); } else { /* cold start: open limits before starting chip */ int i; for (i = 0; i < 6; i++) { i2c_smbus_write_byte_data(client, ADM9240_REG_IN_MIN(i), 0); i2c_smbus_write_byte_data(client, ADM9240_REG_IN_MAX(i), 255); } i2c_smbus_write_byte_data(client, ADM9240_REG_FAN_MIN(0), 255); i2c_smbus_write_byte_data(client, ADM9240_REG_FAN_MIN(1), 255); i2c_smbus_write_byte_data(client, ADM9240_REG_TEMP_MAX(0), 127); i2c_smbus_write_byte_data(client, ADM9240_REG_TEMP_MAX(1), 127); /* start measurement cycle */ i2c_smbus_write_byte_data(client, ADM9240_REG_CONFIG, 1); dev_info(&client->dev, "cold start: config was 0x%02x mode %u\n", conf, mode); } } static struct adm9240_data *adm9240_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct adm9240_data *data = i2c_get_clientdata(client); int i; mutex_lock(&data->update_lock); /* minimum measurement cycle: 1.75 seconds */ if (time_after(jiffies, data->last_updated_measure + (HZ * 7 / 4)) || !data->valid) { for (i = 0; i < 6; i++) { /* read voltages */ data->in[i] = i2c_smbus_read_byte_data(client, ADM9240_REG_IN(i)); } data->alarms = i2c_smbus_read_byte_data(client, ADM9240_REG_INT(0)) | i2c_smbus_read_byte_data(client, ADM9240_REG_INT(1)) << 8; /* * read temperature: assume temperature changes less than * 0.5'C per two measurement cycles thus ignore possible * but unlikely aliasing error on lsb reading. --Grant */ data->temp = ((i2c_smbus_read_byte_data(client, ADM9240_REG_TEMP) << 8) | i2c_smbus_read_byte_data(client, ADM9240_REG_TEMP_CONF)) / 128; for (i = 0; i < 2; i++) { /* read fans */ data->fan[i] = i2c_smbus_read_byte_data(client, ADM9240_REG_FAN(i)); /* adjust fan clock divider on overflow */ if (data->valid && data->fan[i] == 255 && data->fan_div[i] < 3) { adm9240_write_fan_div(client, i, ++data->fan_div[i]); /* adjust fan_min if active, but not to 0 */ if (data->fan_min[i] < 255 && data->fan_min[i] >= 2) data->fan_min[i] /= 2; } } data->last_updated_measure = jiffies; } /* minimum config reading cycle: 300 seconds */ if (time_after(jiffies, data->last_updated_config + (HZ * 300)) || !data->valid) { for (i = 0; i < 6; i++) { data->in_min[i] = i2c_smbus_read_byte_data(client, ADM9240_REG_IN_MIN(i)); data->in_max[i] = i2c_smbus_read_byte_data(client, ADM9240_REG_IN_MAX(i)); } for (i = 0; i < 2; i++) { data->fan_min[i] = i2c_smbus_read_byte_data(client, ADM9240_REG_FAN_MIN(i)); } data->temp_max[0] = i2c_smbus_read_byte_data(client, ADM9240_REG_TEMP_MAX(0)); data->temp_max[1] = i2c_smbus_read_byte_data(client, ADM9240_REG_TEMP_MAX(1)); /* read fan divs and 5-bit VID */ i = i2c_smbus_read_byte_data(client, ADM9240_REG_VID_FAN_DIV); data->fan_div[0] = (i >> 4) & 3; data->fan_div[1] = (i >> 6) & 3; data->vid = i & 0x0f; data->vid |= (i2c_smbus_read_byte_data(client, ADM9240_REG_VID4) & 1) << 4; /* read analog out */ data->aout = i2c_smbus_read_byte_data(client, ADM9240_REG_ANALOG_OUT); data->last_updated_config = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } module_i2c_driver(adm9240_driver); MODULE_AUTHOR("Michiel Rook <michiel@grendelproject.nl>, " "Grant Coady <gcoady.lk@gmail.com> and others"); MODULE_DESCRIPTION("ADM9240/DS1780/LM81 driver"); MODULE_LICENSE("GPL");
gpl-2.0
KangBangKreations/KangBanged-Kernel
drivers/s390/block/dasd_fba.c
2929
17919
/* * File...........: linux/drivers/s390/block/dasd_fba.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> * Copyright IBM Corp. 1999, 2009 */ #define KMSG_COMPONENT "dasd-fba" #include <linux/stddef.h> #include <linux/kernel.h> #include <asm/debug.h> #include <linux/slab.h> #include <linux/hdreg.h> /* HDIO_GETGEO */ #include <linux/bio.h> #include <linux/module.h> #include <linux/init.h> #include <asm/idals.h> #include <asm/ebcdic.h> #include <asm/io.h> #include <asm/ccwdev.h> #include "dasd_int.h" #include "dasd_fba.h" #ifdef PRINTK_HEADER #undef PRINTK_HEADER #endif /* PRINTK_HEADER */ #define PRINTK_HEADER "dasd(fba):" #define DASD_FBA_CCW_WRITE 0x41 #define DASD_FBA_CCW_READ 0x42 #define DASD_FBA_CCW_LOCATE 0x43 #define DASD_FBA_CCW_DEFINE_EXTENT 0x63 MODULE_LICENSE("GPL"); static struct dasd_discipline dasd_fba_discipline; struct dasd_fba_private { struct dasd_fba_characteristics rdc_data; }; static struct ccw_device_id dasd_fba_ids[] = { { CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), .driver_info = 0x1}, { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), .driver_info = 0x2}, { /* end of list */ }, }; MODULE_DEVICE_TABLE(ccw, dasd_fba_ids); static struct ccw_driver dasd_fba_driver; /* see below */ static int dasd_fba_probe(struct ccw_device *cdev) { return dasd_generic_probe(cdev, &dasd_fba_discipline); } static int dasd_fba_set_online(struct ccw_device *cdev) { return dasd_generic_set_online(cdev, &dasd_fba_discipline); } static struct ccw_driver dasd_fba_driver = { .driver = { .name = "dasd-fba", .owner = THIS_MODULE, }, .ids = dasd_fba_ids, .probe = dasd_fba_probe, .remove = dasd_generic_remove, .set_offline = dasd_generic_set_offline, .set_online = dasd_fba_set_online, .notify = dasd_generic_notify, .path_event = dasd_generic_path_event, .freeze = dasd_generic_pm_freeze, .thaw = dasd_generic_restore_device, .restore = dasd_generic_restore_device, }; static void define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, int blksize, int beg, int nr) { ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT; ccw->flags = 0; ccw->count = 16; ccw->cda = (__u32) __pa(data); memset(data, 0, sizeof (struct DE_fba_data)); if (rw == WRITE) (data->mask).perm = 0x0; else if (rw == READ) (data->mask).perm = 0x1; else data->mask.perm = 0x2; data->blk_size = blksize; data->ext_loc = beg; data->ext_end = nr - 1; } static void locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, int block_nr, int block_ct) { ccw->cmd_code = DASD_FBA_CCW_LOCATE; ccw->flags = 0; ccw->count = 8; ccw->cda = (__u32) __pa(data); memset(data, 0, sizeof (struct LO_fba_data)); if (rw == WRITE) data->operation.cmd = 0x5; else if (rw == READ) data->operation.cmd = 0x6; else data->operation.cmd = 0x8; data->blk_nr = block_nr; data->blk_ct = block_ct; } static int dasd_fba_check_characteristics(struct dasd_device *device) { struct dasd_block *block; struct dasd_fba_private *private; struct ccw_device *cdev = device->cdev; int rc; int readonly; private = (struct dasd_fba_private *) device->private; if (!private) { private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); if (!private) { dev_warn(&device->cdev->dev, "Allocating memory for private DASD " "data failed\n"); return -ENOMEM; } device->private = (void *) private; } else { memset(private, 0, sizeof(*private)); } block = dasd_alloc_block(); if (IS_ERR(block)) { DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate " "dasd block structure"); device->private = NULL; kfree(private); return PTR_ERR(block); } device->block = block; block->base = device; /* Read Device Characteristics */ rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC, &private->rdc_data, 32); if (rc) { DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device " "characteristics returned error %d", rc); device->block = NULL; dasd_free_block(block); device->private = NULL; kfree(private); return rc; } device->default_expires = DASD_EXPIRES; device->path_data.opm = LPM_ANYPATH; readonly = dasd_device_is_ro(device); if (readonly) set_bit(DASD_FLAG_DEVICE_RO, &device->flags); dev_info(&device->cdev->dev, "New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB " "and %d B/blk%s\n", cdev->id.dev_type, cdev->id.dev_model, cdev->id.cu_type, cdev->id.cu_model, ((private->rdc_data.blk_bdsa * (private->rdc_data.blk_size >> 9)) >> 11), private->rdc_data.blk_size, readonly ? ", read-only device" : ""); return 0; } static int dasd_fba_do_analysis(struct dasd_block *block) { struct dasd_fba_private *private; int sb, rc; private = (struct dasd_fba_private *) block->base->private; rc = dasd_check_blocksize(private->rdc_data.blk_size); if (rc) { DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d", private->rdc_data.blk_size); return rc; } block->blocks = private->rdc_data.blk_bdsa; block->bp_block = private->rdc_data.blk_size; block->s2b_shift = 0; /* bits to shift 512 to get a block */ for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1) block->s2b_shift++; return 0; } static int dasd_fba_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) { if (dasd_check_blocksize(block->bp_block) != 0) return -EINVAL; geo->cylinders = (block->blocks << block->s2b_shift) >> 10; geo->heads = 16; geo->sectors = 128 >> block->s2b_shift; return 0; } static dasd_erp_fn_t dasd_fba_erp_action(struct dasd_ccw_req * cqr) { return dasd_default_erp_action; } static dasd_erp_fn_t dasd_fba_erp_postaction(struct dasd_ccw_req * cqr) { if (cqr->function == dasd_default_erp_action) return dasd_default_erp_postaction; DBF_DEV_EVENT(DBF_WARNING, cqr->startdev, "unknown ERP action %p", cqr->function); return NULL; } static void dasd_fba_check_for_device_change(struct dasd_device *device, struct dasd_ccw_req *cqr, struct irb *irb) { char mask; /* first of all check for state change pending interrupt */ mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; if ((irb->scsw.cmd.dstat & mask) == mask) dasd_generic_handle_state_change(device); }; static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, struct dasd_block *block, struct request *req) { struct dasd_fba_private *private; unsigned long *idaws; struct LO_fba_data *LO_data; struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; struct bio_vec *bv; char *dst; int count, cidaw, cplength, datasize; sector_t recid, first_rec, last_rec; unsigned int blksize, off; unsigned char cmd; private = (struct dasd_fba_private *) block->base->private; if (rq_data_dir(req) == READ) { cmd = DASD_FBA_CCW_READ; } else if (rq_data_dir(req) == WRITE) { cmd = DASD_FBA_CCW_WRITE; } else return ERR_PTR(-EINVAL); blksize = block->bp_block; /* Calculate record id of first and last block. */ first_rec = blk_rq_pos(req) >> block->s2b_shift; last_rec = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; /* Check struct bio and count the number of blocks for the request. */ count = 0; cidaw = 0; rq_for_each_segment(bv, req, iter) { if (bv->bv_len & (blksize - 1)) /* Fba can only do full blocks. */ return ERR_PTR(-EINVAL); count += bv->bv_len >> (block->s2b_shift + 9); #if defined(CONFIG_64BIT) if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) cidaw += bv->bv_len / blksize; #endif } /* Paranoia. */ if (count != last_rec - first_rec + 1) return ERR_PTR(-EINVAL); /* 1x define extent + 1x locate record + number of blocks */ cplength = 2 + count; /* 1x define extent + 1x locate record */ datasize = sizeof(struct DE_fba_data) + sizeof(struct LO_fba_data) + cidaw * sizeof(unsigned long); /* * Find out number of additional locate record ccws if the device * can't do data chaining. */ if (private->rdc_data.mode.bits.data_chain == 0) { cplength += count - 1; datasize += (count - 1)*sizeof(struct LO_fba_data); } /* Allocate the ccw request. */ cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev); if (IS_ERR(cqr)) return cqr; ccw = cqr->cpaddr; /* First ccw is define extent. */ define_extent(ccw++, cqr->data, rq_data_dir(req), block->bp_block, blk_rq_pos(req), blk_rq_sectors(req)); /* Build locate_record + read/write ccws. */ idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); LO_data = (struct LO_fba_data *) (idaws + cidaw); /* Locate record for all blocks for smart devices. */ if (private->rdc_data.mode.bits.data_chain != 0) { ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count); } recid = first_rec; rq_for_each_segment(bv, req, iter) { dst = page_address(bv->bv_page) + bv->bv_offset; if (dasd_page_cache) { char *copy = kmem_cache_alloc(dasd_page_cache, GFP_DMA | __GFP_NOWARN); if (copy && rq_data_dir(req) == WRITE) memcpy(copy + bv->bv_offset, dst, bv->bv_len); if (copy) dst = copy + bv->bv_offset; } for (off = 0; off < bv->bv_len; off += blksize) { /* Locate record for stupid devices. */ if (private->rdc_data.mode.bits.data_chain == 0) { ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw, LO_data++, rq_data_dir(req), recid - first_rec, 1); ccw->flags = CCW_FLAG_CC; ccw++; } else { if (recid > first_rec) ccw[-1].flags |= CCW_FLAG_DC; else ccw[-1].flags |= CCW_FLAG_CC; } ccw->cmd_code = cmd; ccw->count = block->bp_block; if (idal_is_needed(dst, blksize)) { ccw->cda = (__u32)(addr_t) idaws; ccw->flags = CCW_FLAG_IDA; idaws = idal_create_words(idaws, dst, blksize); } else { ccw->cda = (__u32)(addr_t) dst; ccw->flags = 0; } ccw++; dst += blksize; recid++; } } if (blk_noretry_request(req) || block->base->features & DASD_FEATURE_FAILFAST) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->startdev = memdev; cqr->memdev = memdev; cqr->block = block; cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */ cqr->retries = 32; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } static int dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) { struct dasd_fba_private *private; struct ccw1 *ccw; struct req_iterator iter; struct bio_vec *bv; char *dst, *cda; unsigned int blksize, off; int status; if (!dasd_page_cache) goto out; private = (struct dasd_fba_private *) cqr->block->base->private; blksize = cqr->block->bp_block; ccw = cqr->cpaddr; /* Skip over define extent & locate record. */ ccw++; if (private->rdc_data.mode.bits.data_chain != 0) ccw++; rq_for_each_segment(bv, req, iter) { dst = page_address(bv->bv_page) + bv->bv_offset; for (off = 0; off < bv->bv_len; off += blksize) { /* Skip locate record. */ if (private->rdc_data.mode.bits.data_chain == 0) ccw++; if (dst) { if (ccw->flags & CCW_FLAG_IDA) cda = *((char **)((addr_t) ccw->cda)); else cda = (char *)((addr_t) ccw->cda); if (dst != cda) { if (rq_data_dir(req) == READ) memcpy(dst, cda, bv->bv_len); kmem_cache_free(dasd_page_cache, (void *)((addr_t)cda & PAGE_MASK)); } dst = NULL; } ccw++; } } out: status = cqr->status == DASD_CQR_DONE; dasd_sfree_request(cqr, cqr->memdev); return status; } static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr) { cqr->status = DASD_CQR_FILLED; }; static int dasd_fba_fill_info(struct dasd_device * device, struct dasd_information2_t * info) { info->label_block = 1; info->FBA_layout = 1; info->format = DASD_FORMAT_LDL; info->characteristics_size = sizeof(struct dasd_fba_characteristics); memcpy(info->characteristics, &((struct dasd_fba_private *) device->private)->rdc_data, sizeof (struct dasd_fba_characteristics)); info->confdata_size = 0; return 0; } static void dasd_fba_dump_sense_dbf(struct dasd_device *device, struct irb *irb, char *reason) { u64 *sense; sense = (u64 *) dasd_get_sense(irb); if (sense) { DBF_DEV_EVENT(DBF_EMERG, device, "%s: %s %02x%02x%02x %016llx %016llx %016llx " "%016llx", reason, scsw_is_tm(&irb->scsw) ? "t" : "c", scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), sense[0], sense[1], sense[2], sense[3]); } else { DBF_DEV_EVENT(DBF_EMERG, device, "%s", "SORRY - NO VALID SENSE AVAILABLE\n"); } } static void dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, struct irb *irb) { char *page; struct ccw1 *act, *end, *last; int len, sl, sct, count; page = (char *) get_zeroed_page(GFP_ATOMIC); if (page == NULL) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "No memory to dump sense data"); return; } len = sprintf(page, KERN_ERR PRINTK_HEADER " I/O status report for device %s:\n", dev_name(&device->cdev->dev)); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " in req: %p CS: 0x%02X DS: 0x%02X\n", req, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); len += sprintf(page + len, KERN_ERR PRINTK_HEADER " device %s: Failing CCW: %p\n", dev_name(&device->cdev->dev), (void *) (addr_t) irb->scsw.cmd.cpa); if (irb->esw.esw0.erw.cons) { for (sl = 0; sl < 4; sl++) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " Sense(hex) %2d-%2d:", (8 * sl), ((8 * sl) + 7)); for (sct = 0; sct < 8; sct++) { len += sprintf(page + len, " %02x", irb->ecw[8 * sl + sct]); } len += sprintf(page + len, "\n"); } } else { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " SORRY - NO VALID SENSE AVAILABLE\n"); } printk(KERN_ERR "%s", page); /* dump the Channel Program */ /* print first CCWs (maximum 8) */ act = req->cpaddr; for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); end = min(act + 8, last); len = sprintf(page, KERN_ERR PRINTK_HEADER " Related CP in req: %p\n", req); while (act <= end) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " CCW %p: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; count += sizeof(int)) len += sprintf(page + len, " %08X", ((int *) (addr_t) act->cda) [(count>>2)]); len += sprintf(page + len, "\n"); act++; } printk(KERN_ERR "%s", page); /* print failing CCW area */ len = 0; if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) { act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2; len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); } end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last); while (act <= end) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " CCW %p: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; count += sizeof(int)) len += sprintf(page + len, " %08X", ((int *) (addr_t) act->cda) [(count>>2)]); len += sprintf(page + len, "\n"); act++; } /* print last CCWs */ if (act < last - 2) { act = last - 2; len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); } while (act <= last) { len += sprintf(page + len, KERN_ERR PRINTK_HEADER " CCW %p: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; count += sizeof(int)) len += sprintf(page + len, " %08X", ((int *) (addr_t) act->cda) [(count>>2)]); len += sprintf(page + len, "\n"); act++; } if (len > 0) printk(KERN_ERR "%s", page); free_page((unsigned long) page); } /* * max_blocks is dependent on the amount of storage that is available * in the static io buffer for each device. Currently each device has * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In * addition we have one define extent ccw + 16 bytes of data and a * locate record ccw for each block (stupid devices!) + 16 bytes of data. * That makes: * (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum. * We want to fit two into the available memory so that we can immediately * start the next request if one finishes off. That makes 100.1 blocks * for one request. Give a little safety and the result is 96. */ static struct dasd_discipline dasd_fba_discipline = { .owner = THIS_MODULE, .name = "FBA ", .ebcname = "FBA ", .max_blocks = 96, .check_device = dasd_fba_check_characteristics, .do_analysis = dasd_fba_do_analysis, .verify_path = dasd_generic_verify_path, .fill_geometry = dasd_fba_fill_geometry, .start_IO = dasd_start_IO, .term_IO = dasd_term_IO, .handle_terminated_request = dasd_fba_handle_terminated_request, .erp_action = dasd_fba_erp_action, .erp_postaction = dasd_fba_erp_postaction, .check_for_device_change = dasd_fba_check_for_device_change, .build_cp = dasd_fba_build_cp, .free_cp = dasd_fba_free_cp, .dump_sense = dasd_fba_dump_sense, .dump_sense_dbf = dasd_fba_dump_sense_dbf, .fill_info = dasd_fba_fill_info, }; static int __init dasd_fba_init(void) { int ret; ASCEBC(dasd_fba_discipline.ebcname, 4); ret = ccw_driver_register(&dasd_fba_driver); if (!ret) wait_for_device_probe(); return ret; } static void __exit dasd_fba_cleanup(void) { ccw_driver_unregister(&dasd_fba_driver); } module_init(dasd_fba_init); module_exit(dasd_fba_cleanup);
gpl-2.0
hiikezoe/android_kernel_fujitsu_f12nad
drivers/input/misc/adxl34x.c
2929
23440
/* * ADXL345/346 Three-Axis Digital Accelerometers * * Enter bugs at http://blackfin.uclinux.org/ * * Copyright (C) 2009 Michael Hennerich, Analog Devices Inc. * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/input/adxl34x.h> #include "adxl34x.h" /* ADXL345/6 Register Map */ #define DEVID 0x00 /* R Device ID */ #define THRESH_TAP 0x1D /* R/W Tap threshold */ #define OFSX 0x1E /* R/W X-axis offset */ #define OFSY 0x1F /* R/W Y-axis offset */ #define OFSZ 0x20 /* R/W Z-axis offset */ #define DUR 0x21 /* R/W Tap duration */ #define LATENT 0x22 /* R/W Tap latency */ #define WINDOW 0x23 /* R/W Tap window */ #define THRESH_ACT 0x24 /* R/W Activity threshold */ #define THRESH_INACT 0x25 /* R/W Inactivity threshold */ #define TIME_INACT 0x26 /* R/W Inactivity time */ #define ACT_INACT_CTL 0x27 /* R/W Axis enable control for activity and */ /* inactivity detection */ #define THRESH_FF 0x28 /* R/W Free-fall threshold */ #define TIME_FF 0x29 /* R/W Free-fall time */ #define TAP_AXES 0x2A /* R/W Axis control for tap/double tap */ #define ACT_TAP_STATUS 0x2B /* R Source of tap/double tap */ #define BW_RATE 0x2C /* R/W Data rate and power mode control */ #define POWER_CTL 0x2D /* R/W Power saving features control */ #define INT_ENABLE 0x2E /* R/W Interrupt enable control */ #define INT_MAP 0x2F /* R/W Interrupt mapping control */ #define INT_SOURCE 0x30 /* R Source of interrupts */ #define DATA_FORMAT 0x31 /* R/W Data format control */ #define DATAX0 0x32 /* R X-Axis Data 0 */ #define DATAX1 0x33 /* R X-Axis Data 1 */ #define DATAY0 0x34 /* R Y-Axis Data 0 */ #define DATAY1 0x35 /* R Y-Axis Data 1 */ #define DATAZ0 0x36 /* R Z-Axis Data 0 */ #define DATAZ1 0x37 /* R Z-Axis Data 1 */ #define FIFO_CTL 0x38 /* R/W FIFO control */ #define FIFO_STATUS 0x39 /* R FIFO status */ #define TAP_SIGN 0x3A /* R Sign and source for tap/double tap */ /* Orientation ADXL346 only */ #define ORIENT_CONF 0x3B /* R/W Orientation configuration */ #define ORIENT 0x3C /* R Orientation status */ /* DEVIDs */ #define ID_ADXL345 0xE5 #define ID_ADXL346 0xE6 /* INT_ENABLE/INT_MAP/INT_SOURCE Bits */ #define DATA_READY (1 << 7) #define SINGLE_TAP (1 << 6) #define DOUBLE_TAP (1 << 5) #define ACTIVITY (1 << 4) #define INACTIVITY (1 << 3) #define FREE_FALL (1 << 2) #define WATERMARK (1 << 1) #define OVERRUN (1 << 0) /* ACT_INACT_CONTROL Bits */ #define ACT_ACDC (1 << 7) #define ACT_X_EN (1 << 6) #define ACT_Y_EN (1 << 5) #define ACT_Z_EN (1 << 4) #define INACT_ACDC (1 << 3) #define INACT_X_EN (1 << 2) #define INACT_Y_EN (1 << 1) #define INACT_Z_EN (1 << 0) /* TAP_AXES Bits */ #define SUPPRESS (1 << 3) #define TAP_X_EN (1 << 2) #define TAP_Y_EN (1 << 1) #define TAP_Z_EN (1 << 0) /* ACT_TAP_STATUS Bits */ #define ACT_X_SRC (1 << 6) #define ACT_Y_SRC (1 << 5) #define ACT_Z_SRC (1 << 4) #define ASLEEP (1 << 3) #define TAP_X_SRC (1 << 2) #define TAP_Y_SRC (1 << 1) #define TAP_Z_SRC (1 << 0) /* BW_RATE Bits */ #define LOW_POWER (1 << 4) #define RATE(x) ((x) & 0xF) /* POWER_CTL Bits */ #define PCTL_LINK (1 << 5) #define PCTL_AUTO_SLEEP (1 << 4) #define PCTL_MEASURE (1 << 3) #define PCTL_SLEEP (1 << 2) #define PCTL_WAKEUP(x) ((x) & 0x3) /* DATA_FORMAT Bits */ #define SELF_TEST (1 << 7) #define SPI (1 << 6) #define INT_INVERT (1 << 5) #define FULL_RES (1 << 3) #define JUSTIFY (1 << 2) #define RANGE(x) ((x) & 0x3) #define RANGE_PM_2g 0 #define RANGE_PM_4g 1 #define RANGE_PM_8g 2 #define RANGE_PM_16g 3 /* * Maximum value our axis may get in full res mode for the input device * (signed 13 bits) */ #define ADXL_FULLRES_MAX_VAL 4096 /* * Maximum value our axis may get in fixed res mode for the input device * (signed 10 bits) */ #define ADXL_FIXEDRES_MAX_VAL 512 /* FIFO_CTL Bits */ #define FIFO_MODE(x) (((x) & 0x3) << 6) #define FIFO_BYPASS 0 #define FIFO_FIFO 1 #define FIFO_STREAM 2 #define FIFO_TRIGGER 3 #define TRIGGER (1 << 5) #define SAMPLES(x) ((x) & 0x1F) /* FIFO_STATUS Bits */ #define FIFO_TRIG (1 << 7) #define ENTRIES(x) ((x) & 0x3F) /* TAP_SIGN Bits ADXL346 only */ #define XSIGN (1 << 6) #define YSIGN (1 << 5) #define ZSIGN (1 << 4) #define XTAP (1 << 3) #define YTAP (1 << 2) #define ZTAP (1 << 1) /* ORIENT_CONF ADXL346 only */ #define ORIENT_DEADZONE(x) (((x) & 0x7) << 4) #define ORIENT_DIVISOR(x) ((x) & 0x7) /* ORIENT ADXL346 only */ #define ADXL346_2D_VALID (1 << 6) #define ADXL346_2D_ORIENT(x) (((x) & 0x3) >> 4) #define ADXL346_3D_VALID (1 << 3) #define ADXL346_3D_ORIENT(x) ((x) & 0x7) #define ADXL346_2D_PORTRAIT_POS 0 /* +X */ #define ADXL346_2D_PORTRAIT_NEG 1 /* -X */ #define ADXL346_2D_LANDSCAPE_POS 2 /* +Y */ #define ADXL346_2D_LANDSCAPE_NEG 3 /* -Y */ #define ADXL346_3D_FRONT 3 /* +X */ #define ADXL346_3D_BACK 4 /* -X */ #define ADXL346_3D_RIGHT 2 /* +Y */ #define ADXL346_3D_LEFT 5 /* -Y */ #define ADXL346_3D_TOP 1 /* +Z */ #define ADXL346_3D_BOTTOM 6 /* -Z */ #undef ADXL_DEBUG #define ADXL_X_AXIS 0 #define ADXL_Y_AXIS 1 #define ADXL_Z_AXIS 2 #define AC_READ(ac, reg) ((ac)->bops->read((ac)->dev, reg)) #define AC_WRITE(ac, reg, val) ((ac)->bops->write((ac)->dev, reg, val)) struct axis_triple { int x; int y; int z; }; struct adxl34x { struct device *dev; struct input_dev *input; struct mutex mutex; /* reentrant protection for struct */ struct adxl34x_platform_data pdata; struct axis_triple swcal; struct axis_triple hwcal; struct axis_triple saved; char phys[32]; unsigned orient2d_saved; unsigned orient3d_saved; bool disabled; /* P: mutex */ bool opened; /* P: mutex */ bool suspended; /* P: mutex */ bool fifo_delay; int irq; unsigned model; unsigned int_mask; const struct adxl34x_bus_ops *bops; }; static const struct adxl34x_platform_data adxl34x_default_init = { .tap_threshold = 35, .tap_duration = 3, .tap_latency = 20, .tap_window = 20, .tap_axis_control = ADXL_TAP_X_EN | ADXL_TAP_Y_EN | ADXL_TAP_Z_EN, .act_axis_control = 0xFF, .activity_threshold = 6, .inactivity_threshold = 4, .inactivity_time = 3, .free_fall_threshold = 8, .free_fall_time = 0x20, .data_rate = 8, .data_range = ADXL_FULL_RES, .ev_type = EV_ABS, .ev_code_x = ABS_X, /* EV_REL */ .ev_code_y = ABS_Y, /* EV_REL */ .ev_code_z = ABS_Z, /* EV_REL */ .ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY {x,y,z} */ .power_mode = ADXL_AUTO_SLEEP | ADXL_LINK, .fifo_mode = FIFO_STREAM, .watermark = 0, }; static void adxl34x_get_triple(struct adxl34x *ac, struct axis_triple *axis) { short buf[3]; ac->bops->read_block(ac->dev, DATAX0, DATAZ1 - DATAX0 + 1, buf); mutex_lock(&ac->mutex); ac->saved.x = (s16) le16_to_cpu(buf[0]); axis->x = ac->saved.x; ac->saved.y = (s16) le16_to_cpu(buf[1]); axis->y = ac->saved.y; ac->saved.z = (s16) le16_to_cpu(buf[2]); axis->z = ac->saved.z; mutex_unlock(&ac->mutex); } static void adxl34x_service_ev_fifo(struct adxl34x *ac) { struct adxl34x_platform_data *pdata = &ac->pdata; struct axis_triple axis; adxl34x_get_triple(ac, &axis); input_event(ac->input, pdata->ev_type, pdata->ev_code_x, axis.x - ac->swcal.x); input_event(ac->input, pdata->ev_type, pdata->ev_code_y, axis.y - ac->swcal.y); input_event(ac->input, pdata->ev_type, pdata->ev_code_z, axis.z - ac->swcal.z); } static void adxl34x_report_key_single(struct input_dev *input, int key) { input_report_key(input, key, true); input_sync(input); input_report_key(input, key, false); } static void adxl34x_send_key_events(struct adxl34x *ac, struct adxl34x_platform_data *pdata, int status, int press) { int i; for (i = ADXL_X_AXIS; i <= ADXL_Z_AXIS; i++) { if (status & (1 << (ADXL_Z_AXIS - i))) input_report_key(ac->input, pdata->ev_code_tap[i], press); } } static void adxl34x_do_tap(struct adxl34x *ac, struct adxl34x_platform_data *pdata, int status) { adxl34x_send_key_events(ac, pdata, status, true); input_sync(ac->input); adxl34x_send_key_events(ac, pdata, status, false); } static irqreturn_t adxl34x_irq(int irq, void *handle) { struct adxl34x *ac = handle; struct adxl34x_platform_data *pdata = &ac->pdata; int int_stat, tap_stat, samples, orient, orient_code; /* * ACT_TAP_STATUS should be read before clearing the interrupt * Avoid reading ACT_TAP_STATUS in case TAP detection is disabled */ if (pdata->tap_axis_control & (TAP_X_EN | TAP_Y_EN | TAP_Z_EN)) tap_stat = AC_READ(ac, ACT_TAP_STATUS); else tap_stat = 0; int_stat = AC_READ(ac, INT_SOURCE); if (int_stat & FREE_FALL) adxl34x_report_key_single(ac->input, pdata->ev_code_ff); if (int_stat & OVERRUN) dev_dbg(ac->dev, "OVERRUN\n"); if (int_stat & (SINGLE_TAP | DOUBLE_TAP)) { adxl34x_do_tap(ac, pdata, tap_stat); if (int_stat & DOUBLE_TAP) adxl34x_do_tap(ac, pdata, tap_stat); } if (pdata->ev_code_act_inactivity) { if (int_stat & ACTIVITY) input_report_key(ac->input, pdata->ev_code_act_inactivity, 1); if (int_stat & INACTIVITY) input_report_key(ac->input, pdata->ev_code_act_inactivity, 0); } /* * ORIENTATION SENSING ADXL346 only */ if (pdata->orientation_enable) { orient = AC_READ(ac, ORIENT); if ((pdata->orientation_enable & ADXL_EN_ORIENTATION_2D) && (orient & ADXL346_2D_VALID)) { orient_code = ADXL346_2D_ORIENT(orient); /* Report orientation only when it changes */ if (ac->orient2d_saved != orient_code) { ac->orient2d_saved = orient_code; adxl34x_report_key_single(ac->input, pdata->ev_codes_orient_2d[orient_code]); } } if ((pdata->orientation_enable & ADXL_EN_ORIENTATION_3D) && (orient & ADXL346_3D_VALID)) { orient_code = ADXL346_3D_ORIENT(orient) - 1; /* Report orientation only when it changes */ if (ac->orient3d_saved != orient_code) { ac->orient3d_saved = orient_code; adxl34x_report_key_single(ac->input, pdata->ev_codes_orient_3d[orient_code]); } } } if (int_stat & (DATA_READY | WATERMARK)) { if (pdata->fifo_mode) samples = ENTRIES(AC_READ(ac, FIFO_STATUS)) + 1; else samples = 1; for (; samples > 0; samples--) { adxl34x_service_ev_fifo(ac); /* * To ensure that the FIFO has * completely popped, there must be at least 5 us between * the end of reading the data registers, signified by the * transition to register 0x38 from 0x37 or the CS pin * going high, and the start of new reads of the FIFO or * reading the FIFO_STATUS register. For SPI operation at * 1.5 MHz or lower, the register addressing portion of the * transmission is sufficient delay to ensure the FIFO has * completely popped. It is necessary for SPI operation * greater than 1.5 MHz to de-assert the CS pin to ensure a * total of 5 us, which is at most 3.4 us at 5 MHz * operation. */ if (ac->fifo_delay && (samples > 1)) udelay(3); } } input_sync(ac->input); return IRQ_HANDLED; } static void __adxl34x_disable(struct adxl34x *ac) { /* * A '0' places the ADXL34x into standby mode * with minimum power consumption. */ AC_WRITE(ac, POWER_CTL, 0); } static void __adxl34x_enable(struct adxl34x *ac) { AC_WRITE(ac, POWER_CTL, ac->pdata.power_mode | PCTL_MEASURE); } void adxl34x_suspend(struct adxl34x *ac) { mutex_lock(&ac->mutex); if (!ac->suspended && !ac->disabled && ac->opened) __adxl34x_disable(ac); ac->suspended = true; mutex_unlock(&ac->mutex); } EXPORT_SYMBOL_GPL(adxl34x_suspend); void adxl34x_resume(struct adxl34x *ac) { mutex_lock(&ac->mutex); if (ac->suspended && !ac->disabled && ac->opened) __adxl34x_enable(ac); ac->suspended = false; mutex_unlock(&ac->mutex); } EXPORT_SYMBOL_GPL(adxl34x_resume); static ssize_t adxl34x_disable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct adxl34x *ac = dev_get_drvdata(dev); return sprintf(buf, "%u\n", ac->disabled); } static ssize_t adxl34x_disable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct adxl34x *ac = dev_get_drvdata(dev); unsigned long val; int error; error = strict_strtoul(buf, 10, &val); if (error) return error; mutex_lock(&ac->mutex); if (!ac->suspended && ac->opened) { if (val) { if (!ac->disabled) __adxl34x_disable(ac); } else { if (ac->disabled) __adxl34x_enable(ac); } } ac->disabled = !!val; mutex_unlock(&ac->mutex); return count; } static DEVICE_ATTR(disable, 0664, adxl34x_disable_show, adxl34x_disable_store); static ssize_t adxl34x_calibrate_show(struct device *dev, struct device_attribute *attr, char *buf) { struct adxl34x *ac = dev_get_drvdata(dev); ssize_t count; mutex_lock(&ac->mutex); count = sprintf(buf, "%d,%d,%d\n", ac->hwcal.x * 4 + ac->swcal.x, ac->hwcal.y * 4 + ac->swcal.y, ac->hwcal.z * 4 + ac->swcal.z); mutex_unlock(&ac->mutex); return count; } static ssize_t adxl34x_calibrate_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct adxl34x *ac = dev_get_drvdata(dev); /* * Hardware offset calibration has a resolution of 15.6 mg/LSB. * We use HW calibration and handle the remaining bits in SW. (4mg/LSB) */ mutex_lock(&ac->mutex); ac->hwcal.x -= (ac->saved.x / 4); ac->swcal.x = ac->saved.x % 4; ac->hwcal.y -= (ac->saved.y / 4); ac->swcal.y = ac->saved.y % 4; ac->hwcal.z -= (ac->saved.z / 4); ac->swcal.z = ac->saved.z % 4; AC_WRITE(ac, OFSX, (s8) ac->hwcal.x); AC_WRITE(ac, OFSY, (s8) ac->hwcal.y); AC_WRITE(ac, OFSZ, (s8) ac->hwcal.z); mutex_unlock(&ac->mutex); return count; } static DEVICE_ATTR(calibrate, 0664, adxl34x_calibrate_show, adxl34x_calibrate_store); static ssize_t adxl34x_rate_show(struct device *dev, struct device_attribute *attr, char *buf) { struct adxl34x *ac = dev_get_drvdata(dev); return sprintf(buf, "%u\n", RATE(ac->pdata.data_rate)); } static ssize_t adxl34x_rate_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct adxl34x *ac = dev_get_drvdata(dev); unsigned long val; int error; error = strict_strtoul(buf, 10, &val); if (error) return error; mutex_lock(&ac->mutex); ac->pdata.data_rate = RATE(val); AC_WRITE(ac, BW_RATE, ac->pdata.data_rate | (ac->pdata.low_power_mode ? LOW_POWER : 0)); mutex_unlock(&ac->mutex); return count; } static DEVICE_ATTR(rate, 0664, adxl34x_rate_show, adxl34x_rate_store); static ssize_t adxl34x_autosleep_show(struct device *dev, struct device_attribute *attr, char *buf) { struct adxl34x *ac = dev_get_drvdata(dev); return sprintf(buf, "%u\n", ac->pdata.power_mode & (PCTL_AUTO_SLEEP | PCTL_LINK) ? 1 : 0); } static ssize_t adxl34x_autosleep_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct adxl34x *ac = dev_get_drvdata(dev); unsigned long val; int error; error = strict_strtoul(buf, 10, &val); if (error) return error; mutex_lock(&ac->mutex); if (val) ac->pdata.power_mode |= (PCTL_AUTO_SLEEP | PCTL_LINK); else ac->pdata.power_mode &= ~(PCTL_AUTO_SLEEP | PCTL_LINK); if (!ac->disabled && !ac->suspended && ac->opened) AC_WRITE(ac, POWER_CTL, ac->pdata.power_mode | PCTL_MEASURE); mutex_unlock(&ac->mutex); return count; } static DEVICE_ATTR(autosleep, 0664, adxl34x_autosleep_show, adxl34x_autosleep_store); static ssize_t adxl34x_position_show(struct device *dev, struct device_attribute *attr, char *buf) { struct adxl34x *ac = dev_get_drvdata(dev); ssize_t count; mutex_lock(&ac->mutex); count = sprintf(buf, "(%d, %d, %d)\n", ac->saved.x, ac->saved.y, ac->saved.z); mutex_unlock(&ac->mutex); return count; } static DEVICE_ATTR(position, S_IRUGO, adxl34x_position_show, NULL); #ifdef ADXL_DEBUG static ssize_t adxl34x_write_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct adxl34x *ac = dev_get_drvdata(dev); unsigned long val; int error; /* * This allows basic ADXL register write access for debug purposes. */ error = strict_strtoul(buf, 16, &val); if (error) return error; mutex_lock(&ac->mutex); AC_WRITE(ac, val >> 8, val & 0xFF); mutex_unlock(&ac->mutex); return count; } static DEVICE_ATTR(write, 0664, NULL, adxl34x_write_store); #endif static struct attribute *adxl34x_attributes[] = { &dev_attr_disable.attr, &dev_attr_calibrate.attr, &dev_attr_rate.attr, &dev_attr_autosleep.attr, &dev_attr_position.attr, #ifdef ADXL_DEBUG &dev_attr_write.attr, #endif NULL }; static const struct attribute_group adxl34x_attr_group = { .attrs = adxl34x_attributes, }; static int adxl34x_input_open(struct input_dev *input) { struct adxl34x *ac = input_get_drvdata(input); mutex_lock(&ac->mutex); if (!ac->suspended && !ac->disabled) __adxl34x_enable(ac); ac->opened = true; mutex_unlock(&ac->mutex); return 0; } static void adxl34x_input_close(struct input_dev *input) { struct adxl34x *ac = input_get_drvdata(input); mutex_lock(&ac->mutex); if (!ac->suspended && !ac->disabled) __adxl34x_disable(ac); ac->opened = false; mutex_unlock(&ac->mutex); } struct adxl34x *adxl34x_probe(struct device *dev, int irq, bool fifo_delay_default, const struct adxl34x_bus_ops *bops) { struct adxl34x *ac; struct input_dev *input_dev; const struct adxl34x_platform_data *pdata; int err, range, i; unsigned char revid; if (!irq) { dev_err(dev, "no IRQ?\n"); err = -ENODEV; goto err_out; } ac = kzalloc(sizeof(*ac), GFP_KERNEL); input_dev = input_allocate_device(); if (!ac || !input_dev) { err = -ENOMEM; goto err_free_mem; } ac->fifo_delay = fifo_delay_default; pdata = dev->platform_data; if (!pdata) { dev_dbg(dev, "No platform data: Using default initialization\n"); pdata = &adxl34x_default_init; } ac->pdata = *pdata; pdata = &ac->pdata; ac->input = input_dev; ac->dev = dev; ac->irq = irq; ac->bops = bops; mutex_init(&ac->mutex); input_dev->name = "ADXL34x accelerometer"; revid = ac->bops->read(dev, DEVID); switch (revid) { case ID_ADXL345: ac->model = 345; break; case ID_ADXL346: ac->model = 346; break; default: dev_err(dev, "Failed to probe %s\n", input_dev->name); err = -ENODEV; goto err_free_mem; } snprintf(ac->phys, sizeof(ac->phys), "%s/input0", dev_name(dev)); input_dev->phys = ac->phys; input_dev->dev.parent = dev; input_dev->id.product = ac->model; input_dev->id.bustype = bops->bustype; input_dev->open = adxl34x_input_open; input_dev->close = adxl34x_input_close; input_set_drvdata(input_dev, ac); __set_bit(ac->pdata.ev_type, input_dev->evbit); if (ac->pdata.ev_type == EV_REL) { __set_bit(REL_X, input_dev->relbit); __set_bit(REL_Y, input_dev->relbit); __set_bit(REL_Z, input_dev->relbit); } else { /* EV_ABS */ __set_bit(ABS_X, input_dev->absbit); __set_bit(ABS_Y, input_dev->absbit); __set_bit(ABS_Z, input_dev->absbit); if (pdata->data_range & FULL_RES) range = ADXL_FULLRES_MAX_VAL; /* Signed 13-bit */ else range = ADXL_FIXEDRES_MAX_VAL; /* Signed 10-bit */ input_set_abs_params(input_dev, ABS_X, -range, range, 3, 3); input_set_abs_params(input_dev, ABS_Y, -range, range, 3, 3); input_set_abs_params(input_dev, ABS_Z, -range, range, 3, 3); } __set_bit(EV_KEY, input_dev->evbit); __set_bit(pdata->ev_code_tap[ADXL_X_AXIS], input_dev->keybit); __set_bit(pdata->ev_code_tap[ADXL_Y_AXIS], input_dev->keybit); __set_bit(pdata->ev_code_tap[ADXL_Z_AXIS], input_dev->keybit); if (pdata->ev_code_ff) { ac->int_mask = FREE_FALL; __set_bit(pdata->ev_code_ff, input_dev->keybit); } if (pdata->ev_code_act_inactivity) __set_bit(pdata->ev_code_act_inactivity, input_dev->keybit); ac->int_mask |= ACTIVITY | INACTIVITY; if (pdata->watermark) { ac->int_mask |= WATERMARK; if (!FIFO_MODE(pdata->fifo_mode)) ac->pdata.fifo_mode |= FIFO_STREAM; } else { ac->int_mask |= DATA_READY; } if (pdata->tap_axis_control & (TAP_X_EN | TAP_Y_EN | TAP_Z_EN)) ac->int_mask |= SINGLE_TAP | DOUBLE_TAP; if (FIFO_MODE(pdata->fifo_mode) == FIFO_BYPASS) ac->fifo_delay = false; ac->bops->write(dev, POWER_CTL, 0); err = request_threaded_irq(ac->irq, NULL, adxl34x_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, dev_name(dev), ac); if (err) { dev_err(dev, "irq %d busy?\n", ac->irq); goto err_free_mem; } err = sysfs_create_group(&dev->kobj, &adxl34x_attr_group); if (err) goto err_free_irq; err = input_register_device(input_dev); if (err) goto err_remove_attr; AC_WRITE(ac, THRESH_TAP, pdata->tap_threshold); AC_WRITE(ac, OFSX, pdata->x_axis_offset); ac->hwcal.x = pdata->x_axis_offset; AC_WRITE(ac, OFSY, pdata->y_axis_offset); ac->hwcal.y = pdata->y_axis_offset; AC_WRITE(ac, OFSZ, pdata->z_axis_offset); ac->hwcal.z = pdata->z_axis_offset; AC_WRITE(ac, THRESH_TAP, pdata->tap_threshold); AC_WRITE(ac, DUR, pdata->tap_duration); AC_WRITE(ac, LATENT, pdata->tap_latency); AC_WRITE(ac, WINDOW, pdata->tap_window); AC_WRITE(ac, THRESH_ACT, pdata->activity_threshold); AC_WRITE(ac, THRESH_INACT, pdata->inactivity_threshold); AC_WRITE(ac, TIME_INACT, pdata->inactivity_time); AC_WRITE(ac, THRESH_FF, pdata->free_fall_threshold); AC_WRITE(ac, TIME_FF, pdata->free_fall_time); AC_WRITE(ac, TAP_AXES, pdata->tap_axis_control); AC_WRITE(ac, ACT_INACT_CTL, pdata->act_axis_control); AC_WRITE(ac, BW_RATE, RATE(ac->pdata.data_rate) | (pdata->low_power_mode ? LOW_POWER : 0)); AC_WRITE(ac, DATA_FORMAT, pdata->data_range); AC_WRITE(ac, FIFO_CTL, FIFO_MODE(pdata->fifo_mode) | SAMPLES(pdata->watermark)); if (pdata->use_int2) { /* Map all INTs to INT2 */ AC_WRITE(ac, INT_MAP, ac->int_mask | OVERRUN); } else { /* Map all INTs to INT1 */ AC_WRITE(ac, INT_MAP, 0); } if (ac->model == 346 && ac->pdata.orientation_enable) { AC_WRITE(ac, ORIENT_CONF, ORIENT_DEADZONE(ac->pdata.deadzone_angle) | ORIENT_DIVISOR(ac->pdata.divisor_length)); ac->orient2d_saved = 1234; ac->orient3d_saved = 1234; if (pdata->orientation_enable & ADXL_EN_ORIENTATION_3D) for (i = 0; i < ARRAY_SIZE(pdata->ev_codes_orient_3d); i++) __set_bit(pdata->ev_codes_orient_3d[i], input_dev->keybit); if (pdata->orientation_enable & ADXL_EN_ORIENTATION_2D) for (i = 0; i < ARRAY_SIZE(pdata->ev_codes_orient_2d); i++) __set_bit(pdata->ev_codes_orient_2d[i], input_dev->keybit); } else { ac->pdata.orientation_enable = 0; } AC_WRITE(ac, INT_ENABLE, ac->int_mask | OVERRUN); ac->pdata.power_mode &= (PCTL_AUTO_SLEEP | PCTL_LINK); return ac; err_remove_attr: sysfs_remove_group(&dev->kobj, &adxl34x_attr_group); err_free_irq: free_irq(ac->irq, ac); err_free_mem: input_free_device(input_dev); kfree(ac); err_out: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(adxl34x_probe); int adxl34x_remove(struct adxl34x *ac) { sysfs_remove_group(&ac->dev->kobj, &adxl34x_attr_group); free_irq(ac->irq, ac); input_unregister_device(ac->input); dev_dbg(ac->dev, "unregistered accelerometer\n"); kfree(ac); return 0; } EXPORT_SYMBOL_GPL(adxl34x_remove); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("ADXL345/346 Three-Axis Digital Accelerometer Driver"); MODULE_LICENSE("GPL");
gpl-2.0
NEKTech-Labs/wrapfs-kernel-linux-3.17
drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
3697
3725
/* * pmi backend for the cbe_cpufreq driver * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 * * Author: Christian Krafft <krafft@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/module.h> #include <linux/of_platform.h> #include <asm/processor.h> #include <asm/prom.h> #include <asm/pmi.h> #include <asm/cell-regs.h> #ifdef DEBUG #include <asm/time.h> #endif #include "ppc_cbe_cpufreq.h" static u8 pmi_slow_mode_limit[MAX_CBE]; bool cbe_cpufreq_has_pmi = false; EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi); /* * hardware specific functions */ int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode) { int ret; pmi_message_t pmi_msg; #ifdef DEBUG long time; #endif pmi_msg.type = PMI_TYPE_FREQ_CHANGE; pmi_msg.data1 = cbe_cpu_to_node(cpu); pmi_msg.data2 = pmode; #ifdef DEBUG time = jiffies; #endif pmi_send_message(pmi_msg); #ifdef DEBUG time = jiffies - time; time = jiffies_to_msecs(time); pr_debug("had to wait %lu ms for a transition using " \ "PMI\n", time); #endif ret = pmi_msg.data2; pr_debug("PMI returned slow mode %d\n", ret); return ret; } EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi); static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) { u8 node, slow_mode; BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE); node = pmi_msg.data1; slow_mode = pmi_msg.data2; pmi_slow_mode_limit[node] = slow_mode; pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode); } static int pmi_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; struct cpufreq_frequency_table *cbe_freqs; u8 node; /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE * and CPUFREQ_NOTIFY policy events?) */ if (event == CPUFREQ_START) return 0; cbe_freqs = cpufreq_frequency_get_table(policy->cpu); node = cbe_cpu_to_node(policy->cpu); pr_debug("got notified, event=%lu, node=%u\n", event, node); if (pmi_slow_mode_limit[node] != 0) { pr_debug("limiting node %d to slow mode %d\n", node, pmi_slow_mode_limit[node]); cpufreq_verify_within_limits(policy, 0, cbe_freqs[pmi_slow_mode_limit[node]].frequency); } return 0; } static struct notifier_block pmi_notifier_block = { .notifier_call = pmi_notifier, }; static struct pmi_handler cbe_pmi_handler = { .type = PMI_TYPE_FREQ_CHANGE, .handle_pmi_message = cbe_cpufreq_handle_pmi, }; static int __init cbe_cpufreq_pmi_init(void) { cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0; if (!cbe_cpufreq_has_pmi) return -ENODEV; cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); return 0; } static void __exit cbe_cpufreq_pmi_exit(void) { cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); pmi_unregister_handler(&cbe_pmi_handler); } module_init(cbe_cpufreq_pmi_init); module_exit(cbe_cpufreq_pmi_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
gpl-2.0
Ezekeel/android-3.0
arch/powerpc/platforms/iseries/pci.c
3697
23042
/* * Copyright (C) 2001 Allan Trautman, IBM Corporation * Copyright (C) 2005,2007 Stephen Rothwell, IBM Corp * * iSeries specific routines for PCI. * * Based on code from pci.c and iSeries_pci.c 32bit * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #undef DEBUG #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/of.h> #include <linux/ratelimit.h> #include <asm/types.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/iommu.h> #include <asm/abs_addr.h> #include <asm/firmware.h> #include <asm/iseries/hv_types.h> #include <asm/iseries/hv_call_xm.h> #include <asm/iseries/mf.h> #include <asm/iseries/iommu.h> #include <asm/ppc-pci.h> #include "irq.h" #include "pci.h" #include "call_pci.h" #define PCI_RETRY_MAX 3 static int limit_pci_retries = 1; /* Set Retry Error on. */ /* * Table defines * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space. */ #define IOMM_TABLE_MAX_ENTRIES 1024 #define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL #define BASE_IO_MEMORY 0xE000000000000000UL #define END_IO_MEMORY 0xEFFFFFFFFFFFFFFFUL static unsigned long max_io_memory = BASE_IO_MEMORY; static long current_iomm_table_entry; /* * Lookup Tables. */ static struct device_node *iomm_table[IOMM_TABLE_MAX_ENTRIES]; static u64 ds_addr_table[IOMM_TABLE_MAX_ENTRIES]; static DEFINE_SPINLOCK(iomm_table_lock); /* * Generate a Direct Select Address for the Hypervisor */ static inline u64 iseries_ds_addr(struct device_node *node) { struct pci_dn *pdn = PCI_DN(node); const u32 *sbp = of_get_property(node, "linux,subbus", NULL); return ((u64)pdn->busno << 48) + ((u64)(sbp ? *sbp : 0) << 40) + ((u64)0x10 << 32); } /* * Size of Bus VPD data */ #define BUS_VPDSIZE 1024 /* * Bus Vpd Tags */ #define VPD_END_OF_AREA 0x79 #define VPD_ID_STRING 0x82 #define VPD_VENDOR_AREA 0x84 /* * Mfg Area Tags */ #define VPD_FRU_FRAME_ID 0x4649 /* "FI" */ #define VPD_SLOT_MAP_FORMAT 0x4D46 /* "MF" */ #define VPD_SLOT_MAP 0x534D /* "SM" */ /* * Structures of the areas */ struct mfg_vpd_area { u16 tag; u8 length; u8 data1; u8 data2; }; #define MFG_ENTRY_SIZE 3 struct slot_map { u8 agent; u8 secondary_agent; u8 phb; char card_location[3]; char parms[8]; char reserved[2]; }; #define SLOT_ENTRY_SIZE 16 /* * Parse the Slot Area */ static void __init iseries_parse_slot_area(struct slot_map *map, int len, HvAgentId agent, u8 *phb, char card[4]) { /* * Parse Slot label until we find the one requested */ while (len > 0) { if (map->agent == agent) { /* * If Phb wasn't found, grab the entry first one found. */ if (*phb == 0xff) *phb = map->phb; /* Found it, extract the data. */ if (map->phb == *phb) { memcpy(card, &map->card_location, 3); card[3] = 0; break; } } /* Point to the next Slot */ map = (struct slot_map *)((char *)map + SLOT_ENTRY_SIZE); len -= SLOT_ENTRY_SIZE; } } /* * Parse the Mfg Area */ static void __init iseries_parse_mfg_area(struct mfg_vpd_area *area, int len, HvAgentId agent, u8 *phb, u8 *frame, char card[4]) { u16 slot_map_fmt = 0; /* Parse Mfg Data */ while (len > 0) { int mfg_tag_len = area->length; /* Frame ID (FI 4649020310 ) */ if (area->tag == VPD_FRU_FRAME_ID) *frame = area->data1; /* Slot Map Format (MF 4D46020004 ) */ else if (area->tag == VPD_SLOT_MAP_FORMAT) slot_map_fmt = (area->data1 * 256) + area->data2; /* Slot Map (SM 534D90 */ else if (area->tag == VPD_SLOT_MAP) { struct slot_map *slot_map; if (slot_map_fmt == 0x1004) slot_map = (struct slot_map *)((char *)area + MFG_ENTRY_SIZE + 1); else slot_map = (struct slot_map *)((char *)area + MFG_ENTRY_SIZE); iseries_parse_slot_area(slot_map, mfg_tag_len, agent, phb, card); } /* * Point to the next Mfg Area * Use defined size, sizeof give wrong answer */ area = (struct mfg_vpd_area *)((char *)area + mfg_tag_len + MFG_ENTRY_SIZE); len -= (mfg_tag_len + MFG_ENTRY_SIZE); } } /* * Look for "BUS".. Data is not Null terminated. * PHBID of 0xFF indicates PHB was not found in VPD Data. */ static u8 __init iseries_parse_phbid(u8 *area, int len) { while (len > 0) { if ((*area == 'B') && (*(area + 1) == 'U') && (*(area + 2) == 'S')) { area += 3; while (*area == ' ') area++; return *area & 0x0F; } area++; len--; } return 0xff; } /* * Parse out the VPD Areas */ static void __init iseries_parse_vpd(u8 *data, int data_len, HvAgentId agent, u8 *frame, char card[4]) { u8 phb = 0xff; while (data_len > 0) { int len; u8 tag = *data; if (tag == VPD_END_OF_AREA) break; len = *(data + 1) + (*(data + 2) * 256); data += 3; data_len -= 3; if (tag == VPD_ID_STRING) phb = iseries_parse_phbid(data, len); else if (tag == VPD_VENDOR_AREA) iseries_parse_mfg_area((struct mfg_vpd_area *)data, len, agent, &phb, frame, card); /* Point to next Area. */ data += len; data_len -= len; } } static int __init iseries_get_location_code(u16 bus, HvAgentId agent, u8 *frame, char card[4]) { int status = 0; int bus_vpd_len = 0; u8 *bus_vpd = kmalloc(BUS_VPDSIZE, GFP_KERNEL); if (bus_vpd == NULL) { printk("PCI: Bus VPD Buffer allocation failure.\n"); return 0; } bus_vpd_len = HvCallPci_getBusVpd(bus, iseries_hv_addr(bus_vpd), BUS_VPDSIZE); if (bus_vpd_len == 0) { printk("PCI: Bus VPD Buffer zero length.\n"); goto out_free; } /* printk("PCI: bus_vpd: %p, %d\n",bus_vpd, bus_vpd_len); */ /* Make sure this is what I think it is */ if (*bus_vpd != VPD_ID_STRING) { printk("PCI: Bus VPD Buffer missing starting tag.\n"); goto out_free; } iseries_parse_vpd(bus_vpd, bus_vpd_len, agent, frame, card); status = 1; out_free: kfree(bus_vpd); return status; } /* * Prints the device information. * - Pass in pci_dev* pointer to the device. * - Pass in the device count * * Format: * PCI: Bus 0, Device 26, Vendor 0x12AE Frame 1, Card C10 Ethernet * controller */ static void __init iseries_device_information(struct pci_dev *pdev, u16 bus, HvSubBusNumber subbus) { u8 frame = 0; char card[4]; HvAgentId agent; agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus), ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus)); if (iseries_get_location_code(bus, agent, &frame, card)) { printk(KERN_INFO "PCI: %s, Vendor %04X Frame%3d, " "Card %4s 0x%04X\n", pci_name(pdev), pdev->vendor, frame, card, (int)(pdev->class >> 8)); } } /* * iomm_table_allocate_entry * * Adds pci_dev entry in address translation table * * - Allocates the number of entries required in table base on BAR * size. * - Allocates starting at BASE_IO_MEMORY and increases. * - The size is round up to be a multiple of entry size. * - CurrentIndex is incremented to keep track of the last entry. * - Builds the resource entry for allocated BARs. */ static void __init iomm_table_allocate_entry(struct pci_dev *dev, int bar_num) { struct resource *bar_res = &dev->resource[bar_num]; long bar_size = pci_resource_len(dev, bar_num); struct device_node *dn = pci_device_to_OF_node(dev); /* * No space to allocate, quick exit, skip Allocation. */ if (bar_size == 0) return; /* * Set Resource values. */ spin_lock(&iomm_table_lock); bar_res->start = BASE_IO_MEMORY + IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry; bar_res->end = bar_res->start + bar_size - 1; /* * Allocate the number of table entries needed for BAR. */ while (bar_size > 0 ) { iomm_table[current_iomm_table_entry] = dn; ds_addr_table[current_iomm_table_entry] = iseries_ds_addr(dn) | (bar_num << 24); bar_size -= IOMM_TABLE_ENTRY_SIZE; ++current_iomm_table_entry; } max_io_memory = BASE_IO_MEMORY + IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry; spin_unlock(&iomm_table_lock); } /* * allocate_device_bars * * - Allocates ALL pci_dev BAR's and updates the resources with the * BAR value. BARS with zero length will have the resources * The HvCallPci_getBarParms is used to get the size of the BAR * space. It calls iomm_table_allocate_entry to allocate * each entry. * - Loops through The Bar resources(0 - 5) including the ROM * is resource(6). */ static void __init allocate_device_bars(struct pci_dev *dev) { int bar_num; for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) iomm_table_allocate_entry(dev, bar_num); } /* * Log error information to system console. * Filter out the device not there errors. * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx */ static void pci_log_error(char *error, int bus, int subbus, int agent, int hv_res) { if (hv_res == 0x0302) return; printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X", error, bus, subbus, agent, hv_res); } /* * Look down the chain to find the matching Device Device */ static struct device_node *find_device_node(int bus, int devfn) { struct device_node *node; for (node = NULL; (node = of_find_all_nodes(node)); ) { struct pci_dn *pdn = PCI_DN(node); if (pdn && (bus == pdn->busno) && (devfn == pdn->devfn)) return node; } return NULL; } /* * iSeries_pcibios_fixup_resources * * Fixes up all resources for devices */ void __init iSeries_pcibios_fixup_resources(struct pci_dev *pdev) { const u32 *agent; const u32 *sub_bus; unsigned char bus = pdev->bus->number; struct device_node *node; int i; node = pci_device_to_OF_node(pdev); pr_debug("PCI: iSeries %s, pdev %p, node %p\n", pci_name(pdev), pdev, node); if (!node) { printk("PCI: %s disabled, device tree entry not found !\n", pci_name(pdev)); for (i = 0; i <= PCI_ROM_RESOURCE; i++) pdev->resource[i].flags = 0; return; } sub_bus = of_get_property(node, "linux,subbus", NULL); agent = of_get_property(node, "linux,agent-id", NULL); if (agent && sub_bus) { u8 irq = iSeries_allocate_IRQ(bus, 0, *sub_bus); int err; err = HvCallXm_connectBusUnit(bus, *sub_bus, *agent, irq); if (err) pci_log_error("Connect Bus Unit", bus, *sub_bus, *agent, err); else { err = HvCallPci_configStore8(bus, *sub_bus, *agent, PCI_INTERRUPT_LINE, irq); if (err) pci_log_error("PciCfgStore Irq Failed!", bus, *sub_bus, *agent, err); else pdev->irq = irq; } } allocate_device_bars(pdev); if (likely(sub_bus)) iseries_device_information(pdev, bus, *sub_bus); else printk(KERN_ERR "PCI: Device node %s has missing or invalid " "linux,subbus property\n", node->full_name); } /* * iSeries_pci_final_fixup(void) */ void __init iSeries_pci_final_fixup(void) { /* Fix up at the device node and pci_dev relationship */ mf_display_src(0xC9000100); iSeries_activate_IRQs(); mf_display_src(0xC9000200); } /* * Config space read and write functions. * For now at least, we look for the device node for the bus and devfn * that we are asked to access. It may be possible to translate the devfn * to a subbus and deviceid more directly. */ static u64 hv_cfg_read_func[4] = { HvCallPciConfigLoad8, HvCallPciConfigLoad16, HvCallPciConfigLoad32, HvCallPciConfigLoad32 }; static u64 hv_cfg_write_func[4] = { HvCallPciConfigStore8, HvCallPciConfigStore16, HvCallPciConfigStore32, HvCallPciConfigStore32 }; /* * Read PCI config space */ static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int size, u32 *val) { struct device_node *node = find_device_node(bus->number, devfn); u64 fn; struct HvCallPci_LoadReturn ret; if (node == NULL) return PCIBIOS_DEVICE_NOT_FOUND; if (offset > 255) { *val = ~0; return PCIBIOS_BAD_REGISTER_NUMBER; } fn = hv_cfg_read_func[(size - 1) & 3]; HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0); if (ret.rc != 0) { *val = ~0; return PCIBIOS_DEVICE_NOT_FOUND; /* or something */ } *val = ret.value; return 0; } /* * Write PCI config space */ static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int size, u32 val) { struct device_node *node = find_device_node(bus->number, devfn); u64 fn; u64 ret; if (node == NULL) return PCIBIOS_DEVICE_NOT_FOUND; if (offset > 255) return PCIBIOS_BAD_REGISTER_NUMBER; fn = hv_cfg_write_func[(size - 1) & 3]; ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0); if (ret != 0) return PCIBIOS_DEVICE_NOT_FOUND; return 0; } static struct pci_ops iSeries_pci_ops = { .read = iSeries_pci_read_config, .write = iSeries_pci_write_config }; /* * Check Return Code * -> On Failure, print and log information. * Increment Retry Count, if exceeds max, panic partition. * * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234 * PCI: Device 23.90 ReadL Retry( 1) * PCI: Device 23.90 ReadL Retry Successful(1) */ static int check_return_code(char *type, struct device_node *dn, int *retry, u64 ret) { if (ret != 0) { struct pci_dn *pdn = PCI_DN(dn); (*retry)++; printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n", type, pdn->busno, pdn->devfn, *retry, (int)ret); /* * Bump the retry and check for retry count exceeded. * If, Exceeded, panic the system. */ if (((*retry) > PCI_RETRY_MAX) && (limit_pci_retries > 0)) { mf_display_src(0xB6000103); panic_timeout = 0; panic("PCI: Hardware I/O Error, SRC B6000103, " "Automatic Reboot Disabled.\n"); } return -1; /* Retry Try */ } return 0; } /* * Translate the I/O Address into a device node, bar, and bar offset. * Note: Make sure the passed variable end up on the stack to avoid * the exposure of being device global. */ static inline struct device_node *xlate_iomm_address( const volatile void __iomem *addr, u64 *dsaptr, u64 *bar_offset, const char *func) { unsigned long orig_addr; unsigned long base_addr; unsigned long ind; struct device_node *dn; orig_addr = (unsigned long __force)addr; if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) { static DEFINE_RATELIMIT_STATE(ratelimit, 60 * HZ, 10); if (__ratelimit(&ratelimit)) printk(KERN_ERR "iSeries_%s: invalid access at IO address %p\n", func, addr); return NULL; } base_addr = orig_addr - BASE_IO_MEMORY; ind = base_addr / IOMM_TABLE_ENTRY_SIZE; dn = iomm_table[ind]; if (dn != NULL) { *dsaptr = ds_addr_table[ind]; *bar_offset = base_addr % IOMM_TABLE_ENTRY_SIZE; } else panic("PCI: Invalid PCI IO address detected!\n"); return dn; } /* * Read MM I/O Instructions for the iSeries * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal * else, data is returned in Big Endian format. */ static u8 iseries_readb(const volatile void __iomem *addr) { u64 bar_offset; u64 dsa; int retry = 0; struct HvCallPci_LoadReturn ret; struct device_node *dn = xlate_iomm_address(addr, &dsa, &bar_offset, "read_byte"); if (dn == NULL) return 0xff; do { HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, bar_offset, 0); } while (check_return_code("RDB", dn, &retry, ret.rc) != 0); return ret.value; } static u16 iseries_readw_be(const volatile void __iomem *addr) { u64 bar_offset; u64 dsa; int retry = 0; struct HvCallPci_LoadReturn ret; struct device_node *dn = xlate_iomm_address(addr, &dsa, &bar_offset, "read_word"); if (dn == NULL) return 0xffff; do { HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa, bar_offset, 0); } while (check_return_code("RDW", dn, &retry, ret.rc) != 0); return ret.value; } static u32 iseries_readl_be(const volatile void __iomem *addr) { u64 bar_offset; u64 dsa; int retry = 0; struct HvCallPci_LoadReturn ret; struct device_node *dn = xlate_iomm_address(addr, &dsa, &bar_offset, "read_long"); if (dn == NULL) return 0xffffffff; do { HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa, bar_offset, 0); } while (check_return_code("RDL", dn, &retry, ret.rc) != 0); return ret.value; } /* * Write MM I/O Instructions for the iSeries * */ static void iseries_writeb(u8 data, volatile void __iomem *addr) { u64 bar_offset; u64 dsa; int retry = 0; u64 rc; struct device_node *dn = xlate_iomm_address(addr, &dsa, &bar_offset, "write_byte"); if (dn == NULL) return; do { rc = HvCall4(HvCallPciBarStore8, dsa, bar_offset, data, 0); } while (check_return_code("WWB", dn, &retry, rc) != 0); } static void iseries_writew_be(u16 data, volatile void __iomem *addr) { u64 bar_offset; u64 dsa; int retry = 0; u64 rc; struct device_node *dn = xlate_iomm_address(addr, &dsa, &bar_offset, "write_word"); if (dn == NULL) return; do { rc = HvCall4(HvCallPciBarStore16, dsa, bar_offset, data, 0); } while (check_return_code("WWW", dn, &retry, rc) != 0); } static void iseries_writel_be(u32 data, volatile void __iomem *addr) { u64 bar_offset; u64 dsa; int retry = 0; u64 rc; struct device_node *dn = xlate_iomm_address(addr, &dsa, &bar_offset, "write_long"); if (dn == NULL) return; do { rc = HvCall4(HvCallPciBarStore32, dsa, bar_offset, data, 0); } while (check_return_code("WWL", dn, &retry, rc) != 0); } static u16 iseries_readw(const volatile void __iomem *addr) { return le16_to_cpu(iseries_readw_be(addr)); } static u32 iseries_readl(const volatile void __iomem *addr) { return le32_to_cpu(iseries_readl_be(addr)); } static void iseries_writew(u16 data, volatile void __iomem *addr) { iseries_writew_be(cpu_to_le16(data), addr); } static void iseries_writel(u32 data, volatile void __iomem *addr) { iseries_writel(cpu_to_le32(data), addr); } static void iseries_readsb(const volatile void __iomem *addr, void *buf, unsigned long count) { u8 *dst = buf; while(count-- > 0) *(dst++) = iseries_readb(addr); } static void iseries_readsw(const volatile void __iomem *addr, void *buf, unsigned long count) { u16 *dst = buf; while(count-- > 0) *(dst++) = iseries_readw_be(addr); } static void iseries_readsl(const volatile void __iomem *addr, void *buf, unsigned long count) { u32 *dst = buf; while(count-- > 0) *(dst++) = iseries_readl_be(addr); } static void iseries_writesb(volatile void __iomem *addr, const void *buf, unsigned long count) { const u8 *src = buf; while(count-- > 0) iseries_writeb(*(src++), addr); } static void iseries_writesw(volatile void __iomem *addr, const void *buf, unsigned long count) { const u16 *src = buf; while(count-- > 0) iseries_writew_be(*(src++), addr); } static void iseries_writesl(volatile void __iomem *addr, const void *buf, unsigned long count) { const u32 *src = buf; while(count-- > 0) iseries_writel_be(*(src++), addr); } static void iseries_memset_io(volatile void __iomem *addr, int c, unsigned long n) { volatile char __iomem *d = addr; while (n-- > 0) iseries_writeb(c, d++); } static void iseries_memcpy_fromio(void *dest, const volatile void __iomem *src, unsigned long n) { char *d = dest; const volatile char __iomem *s = src; while (n-- > 0) *d++ = iseries_readb(s++); } static void iseries_memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) { const char *s = src; volatile char __iomem *d = dest; while (n-- > 0) iseries_writeb(*s++, d++); } /* We only set MMIO ops. The default PIO ops will be default * to the MMIO ops + pci_io_base which is 0 on iSeries as * expected so both should work. * * Note that we don't implement the readq/writeq versions as * I don't know of an HV call for doing so. Thus, the default * operation will be used instead, which will fault a the value * return by iSeries for MMIO addresses always hits a non mapped * area. This is as good as the BUG() we used to have there. */ static struct ppc_pci_io __initdata iseries_pci_io = { .readb = iseries_readb, .readw = iseries_readw, .readl = iseries_readl, .readw_be = iseries_readw_be, .readl_be = iseries_readl_be, .writeb = iseries_writeb, .writew = iseries_writew, .writel = iseries_writel, .writew_be = iseries_writew_be, .writel_be = iseries_writel_be, .readsb = iseries_readsb, .readsw = iseries_readsw, .readsl = iseries_readsl, .writesb = iseries_writesb, .writesw = iseries_writesw, .writesl = iseries_writesl, .memset_io = iseries_memset_io, .memcpy_fromio = iseries_memcpy_fromio, .memcpy_toio = iseries_memcpy_toio, }; /* * iSeries_pcibios_init * * Description: * This function checks for all possible system PCI host bridges that connect * PCI buses. The system hypervisor is queried as to the guest partition * ownership status. A pci_controller is built for any bus which is partially * owned or fully owned by this guest partition. */ void __init iSeries_pcibios_init(void) { struct pci_controller *phb; struct device_node *root = of_find_node_by_path("/"); struct device_node *node = NULL; /* Install IO hooks */ ppc_pci_io = iseries_pci_io; pci_probe_only = 1; /* iSeries has no IO space in the common sense, it needs to set * the IO base to 0 */ pci_io_base = 0; if (root == NULL) { printk(KERN_CRIT "iSeries_pcibios_init: can't find root " "of device tree\n"); return; } while ((node = of_get_next_child(root, node)) != NULL) { HvBusNumber bus; const u32 *busp; if ((node->type == NULL) || (strcmp(node->type, "pci") != 0)) continue; busp = of_get_property(node, "bus-range", NULL); if (busp == NULL) continue; bus = *busp; printk("bus %d appears to exist\n", bus); phb = pcibios_alloc_controller(node); if (phb == NULL) continue; /* All legacy iSeries PHBs are in domain zero */ phb->global_number = 0; phb->first_busno = bus; phb->last_busno = bus; phb->ops = &iSeries_pci_ops; phb->io_base_virt = (void __iomem *)_IO_BASE; phb->io_resource.flags = IORESOURCE_IO; phb->io_resource.start = BASE_IO_MEMORY; phb->io_resource.end = END_IO_MEMORY; phb->io_resource.name = "iSeries PCI IO"; phb->mem_resources[0].flags = IORESOURCE_MEM; phb->mem_resources[0].start = BASE_IO_MEMORY; phb->mem_resources[0].end = END_IO_MEMORY; phb->mem_resources[0].name = "Series PCI MEM"; } of_node_put(root); pci_devs_phb_init(); }
gpl-2.0
TripNRaVeR/caf_kernel_msm_htc_m7
sound/soc/codecs/max98088.c
3697
76616
/* * max98088.c -- MAX98088 ALSA SoC Audio driver * * Copyright 2010 Maxim Integrated Products * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include <linux/slab.h> #include <asm/div64.h> #include <sound/max98088.h> #include "max98088.h" enum max98088_type { MAX98088, MAX98089, }; struct max98088_cdata { unsigned int rate; unsigned int fmt; int eq_sel; }; struct max98088_priv { enum max98088_type devtype; struct max98088_pdata *pdata; unsigned int sysclk; struct max98088_cdata dai[2]; int eq_textcnt; const char **eq_texts; struct soc_enum eq_enum; u8 ina_state; u8 inb_state; unsigned int ex_mode; unsigned int digmic; unsigned int mic1pre; unsigned int mic2pre; unsigned int extmic_mode; }; static const u8 max98088_reg[M98088_REG_CNT] = { 0x00, /* 00 IRQ status */ 0x00, /* 01 MIC status */ 0x00, /* 02 jack status */ 0x00, /* 03 battery voltage */ 0x00, /* 04 */ 0x00, /* 05 */ 0x00, /* 06 */ 0x00, /* 07 */ 0x00, /* 08 */ 0x00, /* 09 */ 0x00, /* 0A */ 0x00, /* 0B */ 0x00, /* 0C */ 0x00, /* 0D */ 0x00, /* 0E */ 0x00, /* 0F interrupt enable */ 0x00, /* 10 master clock */ 0x00, /* 11 DAI1 clock mode */ 0x00, /* 12 DAI1 clock control */ 0x00, /* 13 DAI1 clock control */ 0x00, /* 14 DAI1 format */ 0x00, /* 15 DAI1 clock */ 0x00, /* 16 DAI1 config */ 0x00, /* 17 DAI1 TDM */ 0x00, /* 18 DAI1 filters */ 0x00, /* 19 DAI2 clock mode */ 0x00, /* 1A DAI2 clock control */ 0x00, /* 1B DAI2 clock control */ 0x00, /* 1C DAI2 format */ 0x00, /* 1D DAI2 clock */ 0x00, /* 1E DAI2 config */ 0x00, /* 1F DAI2 TDM */ 0x00, /* 20 DAI2 filters */ 0x00, /* 21 data config */ 0x00, /* 22 DAC mixer */ 0x00, /* 23 left ADC mixer */ 0x00, /* 24 right ADC mixer */ 0x00, /* 25 left HP mixer */ 0x00, /* 26 right HP mixer */ 0x00, /* 27 HP control */ 0x00, /* 28 left REC mixer */ 0x00, /* 29 right REC mixer */ 0x00, /* 2A REC control */ 0x00, /* 2B left SPK mixer */ 0x00, /* 2C right SPK mixer */ 0x00, /* 2D SPK control */ 0x00, /* 2E sidetone */ 0x00, /* 2F DAI1 playback level */ 0x00, /* 30 DAI1 playback level */ 0x00, /* 31 DAI2 playback level */ 0x00, /* 32 DAI2 playbakc level */ 0x00, /* 33 left ADC level */ 0x00, /* 34 right ADC level */ 0x00, /* 35 MIC1 level */ 0x00, /* 36 MIC2 level */ 0x00, /* 37 INA level */ 0x00, /* 38 INB level */ 0x00, /* 39 left HP volume */ 0x00, /* 3A right HP volume */ 0x00, /* 3B left REC volume */ 0x00, /* 3C right REC volume */ 0x00, /* 3D left SPK volume */ 0x00, /* 3E right SPK volume */ 0x00, /* 3F MIC config */ 0x00, /* 40 MIC threshold */ 0x00, /* 41 excursion limiter filter */ 0x00, /* 42 excursion limiter threshold */ 0x00, /* 43 ALC */ 0x00, /* 44 power limiter threshold */ 0x00, /* 45 power limiter config */ 0x00, /* 46 distortion limiter config */ 0x00, /* 47 audio input */ 0x00, /* 48 microphone */ 0x00, /* 49 level control */ 0x00, /* 4A bypass switches */ 0x00, /* 4B jack detect */ 0x00, /* 4C input enable */ 0x00, /* 4D output enable */ 0xF0, /* 4E bias control */ 0x00, /* 4F DAC power */ 0x0F, /* 50 DAC power */ 0x00, /* 51 system */ 0x00, /* 52 DAI1 EQ1 */ 0x00, /* 53 DAI1 EQ1 */ 0x00, /* 54 DAI1 EQ1 */ 0x00, /* 55 DAI1 EQ1 */ 0x00, /* 56 DAI1 EQ1 */ 0x00, /* 57 DAI1 EQ1 */ 0x00, /* 58 DAI1 EQ1 */ 0x00, /* 59 DAI1 EQ1 */ 0x00, /* 5A DAI1 EQ1 */ 0x00, /* 5B DAI1 EQ1 */ 0x00, /* 5C DAI1 EQ2 */ 0x00, /* 5D DAI1 EQ2 */ 0x00, /* 5E DAI1 EQ2 */ 0x00, /* 5F DAI1 EQ2 */ 0x00, /* 60 DAI1 EQ2 */ 0x00, /* 61 DAI1 EQ2 */ 0x00, /* 62 DAI1 EQ2 */ 0x00, /* 63 DAI1 EQ2 */ 0x00, /* 64 DAI1 EQ2 */ 0x00, /* 65 DAI1 EQ2 */ 0x00, /* 66 DAI1 EQ3 */ 0x00, /* 67 DAI1 EQ3 */ 0x00, /* 68 DAI1 EQ3 */ 0x00, /* 69 DAI1 EQ3 */ 0x00, /* 6A DAI1 EQ3 */ 0x00, /* 6B DAI1 EQ3 */ 0x00, /* 6C DAI1 EQ3 */ 0x00, /* 6D DAI1 EQ3 */ 0x00, /* 6E DAI1 EQ3 */ 0x00, /* 6F DAI1 EQ3 */ 0x00, /* 70 DAI1 EQ4 */ 0x00, /* 71 DAI1 EQ4 */ 0x00, /* 72 DAI1 EQ4 */ 0x00, /* 73 DAI1 EQ4 */ 0x00, /* 74 DAI1 EQ4 */ 0x00, /* 75 DAI1 EQ4 */ 0x00, /* 76 DAI1 EQ4 */ 0x00, /* 77 DAI1 EQ4 */ 0x00, /* 78 DAI1 EQ4 */ 0x00, /* 79 DAI1 EQ4 */ 0x00, /* 7A DAI1 EQ5 */ 0x00, /* 7B DAI1 EQ5 */ 0x00, /* 7C DAI1 EQ5 */ 0x00, /* 7D DAI1 EQ5 */ 0x00, /* 7E DAI1 EQ5 */ 0x00, /* 7F DAI1 EQ5 */ 0x00, /* 80 DAI1 EQ5 */ 0x00, /* 81 DAI1 EQ5 */ 0x00, /* 82 DAI1 EQ5 */ 0x00, /* 83 DAI1 EQ5 */ 0x00, /* 84 DAI2 EQ1 */ 0x00, /* 85 DAI2 EQ1 */ 0x00, /* 86 DAI2 EQ1 */ 0x00, /* 87 DAI2 EQ1 */ 0x00, /* 88 DAI2 EQ1 */ 0x00, /* 89 DAI2 EQ1 */ 0x00, /* 8A DAI2 EQ1 */ 0x00, /* 8B DAI2 EQ1 */ 0x00, /* 8C DAI2 EQ1 */ 0x00, /* 8D DAI2 EQ1 */ 0x00, /* 8E DAI2 EQ2 */ 0x00, /* 8F DAI2 EQ2 */ 0x00, /* 90 DAI2 EQ2 */ 0x00, /* 91 DAI2 EQ2 */ 0x00, /* 92 DAI2 EQ2 */ 0x00, /* 93 DAI2 EQ2 */ 0x00, /* 94 DAI2 EQ2 */ 0x00, /* 95 DAI2 EQ2 */ 0x00, /* 96 DAI2 EQ2 */ 0x00, /* 97 DAI2 EQ2 */ 0x00, /* 98 DAI2 EQ3 */ 0x00, /* 99 DAI2 EQ3 */ 0x00, /* 9A DAI2 EQ3 */ 0x00, /* 9B DAI2 EQ3 */ 0x00, /* 9C DAI2 EQ3 */ 0x00, /* 9D DAI2 EQ3 */ 0x00, /* 9E DAI2 EQ3 */ 0x00, /* 9F DAI2 EQ3 */ 0x00, /* A0 DAI2 EQ3 */ 0x00, /* A1 DAI2 EQ3 */ 0x00, /* A2 DAI2 EQ4 */ 0x00, /* A3 DAI2 EQ4 */ 0x00, /* A4 DAI2 EQ4 */ 0x00, /* A5 DAI2 EQ4 */ 0x00, /* A6 DAI2 EQ4 */ 0x00, /* A7 DAI2 EQ4 */ 0x00, /* A8 DAI2 EQ4 */ 0x00, /* A9 DAI2 EQ4 */ 0x00, /* AA DAI2 EQ4 */ 0x00, /* AB DAI2 EQ4 */ 0x00, /* AC DAI2 EQ5 */ 0x00, /* AD DAI2 EQ5 */ 0x00, /* AE DAI2 EQ5 */ 0x00, /* AF DAI2 EQ5 */ 0x00, /* B0 DAI2 EQ5 */ 0x00, /* B1 DAI2 EQ5 */ 0x00, /* B2 DAI2 EQ5 */ 0x00, /* B3 DAI2 EQ5 */ 0x00, /* B4 DAI2 EQ5 */ 0x00, /* B5 DAI2 EQ5 */ 0x00, /* B6 DAI1 biquad */ 0x00, /* B7 DAI1 biquad */ 0x00, /* B8 DAI1 biquad */ 0x00, /* B9 DAI1 biquad */ 0x00, /* BA DAI1 biquad */ 0x00, /* BB DAI1 biquad */ 0x00, /* BC DAI1 biquad */ 0x00, /* BD DAI1 biquad */ 0x00, /* BE DAI1 biquad */ 0x00, /* BF DAI1 biquad */ 0x00, /* C0 DAI2 biquad */ 0x00, /* C1 DAI2 biquad */ 0x00, /* C2 DAI2 biquad */ 0x00, /* C3 DAI2 biquad */ 0x00, /* C4 DAI2 biquad */ 0x00, /* C5 DAI2 biquad */ 0x00, /* C6 DAI2 biquad */ 0x00, /* C7 DAI2 biquad */ 0x00, /* C8 DAI2 biquad */ 0x00, /* C9 DAI2 biquad */ 0x00, /* CA */ 0x00, /* CB */ 0x00, /* CC */ 0x00, /* CD */ 0x00, /* CE */ 0x00, /* CF */ 0x00, /* D0 */ 0x00, /* D1 */ 0x00, /* D2 */ 0x00, /* D3 */ 0x00, /* D4 */ 0x00, /* D5 */ 0x00, /* D6 */ 0x00, /* D7 */ 0x00, /* D8 */ 0x00, /* D9 */ 0x00, /* DA */ 0x70, /* DB */ 0x00, /* DC */ 0x00, /* DD */ 0x00, /* DE */ 0x00, /* DF */ 0x00, /* E0 */ 0x00, /* E1 */ 0x00, /* E2 */ 0x00, /* E3 */ 0x00, /* E4 */ 0x00, /* E5 */ 0x00, /* E6 */ 0x00, /* E7 */ 0x00, /* E8 */ 0x00, /* E9 */ 0x00, /* EA */ 0x00, /* EB */ 0x00, /* EC */ 0x00, /* ED */ 0x00, /* EE */ 0x00, /* EF */ 0x00, /* F0 */ 0x00, /* F1 */ 0x00, /* F2 */ 0x00, /* F3 */ 0x00, /* F4 */ 0x00, /* F5 */ 0x00, /* F6 */ 0x00, /* F7 */ 0x00, /* F8 */ 0x00, /* F9 */ 0x00, /* FA */ 0x00, /* FB */ 0x00, /* FC */ 0x00, /* FD */ 0x00, /* FE */ 0x00, /* FF */ }; static struct { int readable; int writable; int vol; } max98088_access[M98088_REG_CNT] = { { 0xFF, 0xFF, 1 }, /* 00 IRQ status */ { 0xFF, 0x00, 1 }, /* 01 MIC status */ { 0xFF, 0x00, 1 }, /* 02 jack status */ { 0x1F, 0x1F, 1 }, /* 03 battery voltage */ { 0xFF, 0xFF, 0 }, /* 04 */ { 0xFF, 0xFF, 0 }, /* 05 */ { 0xFF, 0xFF, 0 }, /* 06 */ { 0xFF, 0xFF, 0 }, /* 07 */ { 0xFF, 0xFF, 0 }, /* 08 */ { 0xFF, 0xFF, 0 }, /* 09 */ { 0xFF, 0xFF, 0 }, /* 0A */ { 0xFF, 0xFF, 0 }, /* 0B */ { 0xFF, 0xFF, 0 }, /* 0C */ { 0xFF, 0xFF, 0 }, /* 0D */ { 0xFF, 0xFF, 0 }, /* 0E */ { 0xFF, 0xFF, 0 }, /* 0F interrupt enable */ { 0xFF, 0xFF, 0 }, /* 10 master clock */ { 0xFF, 0xFF, 0 }, /* 11 DAI1 clock mode */ { 0xFF, 0xFF, 0 }, /* 12 DAI1 clock control */ { 0xFF, 0xFF, 0 }, /* 13 DAI1 clock control */ { 0xFF, 0xFF, 0 }, /* 14 DAI1 format */ { 0xFF, 0xFF, 0 }, /* 15 DAI1 clock */ { 0xFF, 0xFF, 0 }, /* 16 DAI1 config */ { 0xFF, 0xFF, 0 }, /* 17 DAI1 TDM */ { 0xFF, 0xFF, 0 }, /* 18 DAI1 filters */ { 0xFF, 0xFF, 0 }, /* 19 DAI2 clock mode */ { 0xFF, 0xFF, 0 }, /* 1A DAI2 clock control */ { 0xFF, 0xFF, 0 }, /* 1B DAI2 clock control */ { 0xFF, 0xFF, 0 }, /* 1C DAI2 format */ { 0xFF, 0xFF, 0 }, /* 1D DAI2 clock */ { 0xFF, 0xFF, 0 }, /* 1E DAI2 config */ { 0xFF, 0xFF, 0 }, /* 1F DAI2 TDM */ { 0xFF, 0xFF, 0 }, /* 20 DAI2 filters */ { 0xFF, 0xFF, 0 }, /* 21 data config */ { 0xFF, 0xFF, 0 }, /* 22 DAC mixer */ { 0xFF, 0xFF, 0 }, /* 23 left ADC mixer */ { 0xFF, 0xFF, 0 }, /* 24 right ADC mixer */ { 0xFF, 0xFF, 0 }, /* 25 left HP mixer */ { 0xFF, 0xFF, 0 }, /* 26 right HP mixer */ { 0xFF, 0xFF, 0 }, /* 27 HP control */ { 0xFF, 0xFF, 0 }, /* 28 left REC mixer */ { 0xFF, 0xFF, 0 }, /* 29 right REC mixer */ { 0xFF, 0xFF, 0 }, /* 2A REC control */ { 0xFF, 0xFF, 0 }, /* 2B left SPK mixer */ { 0xFF, 0xFF, 0 }, /* 2C right SPK mixer */ { 0xFF, 0xFF, 0 }, /* 2D SPK control */ { 0xFF, 0xFF, 0 }, /* 2E sidetone */ { 0xFF, 0xFF, 0 }, /* 2F DAI1 playback level */ { 0xFF, 0xFF, 0 }, /* 30 DAI1 playback level */ { 0xFF, 0xFF, 0 }, /* 31 DAI2 playback level */ { 0xFF, 0xFF, 0 }, /* 32 DAI2 playbakc level */ { 0xFF, 0xFF, 0 }, /* 33 left ADC level */ { 0xFF, 0xFF, 0 }, /* 34 right ADC level */ { 0xFF, 0xFF, 0 }, /* 35 MIC1 level */ { 0xFF, 0xFF, 0 }, /* 36 MIC2 level */ { 0xFF, 0xFF, 0 }, /* 37 INA level */ { 0xFF, 0xFF, 0 }, /* 38 INB level */ { 0xFF, 0xFF, 0 }, /* 39 left HP volume */ { 0xFF, 0xFF, 0 }, /* 3A right HP volume */ { 0xFF, 0xFF, 0 }, /* 3B left REC volume */ { 0xFF, 0xFF, 0 }, /* 3C right REC volume */ { 0xFF, 0xFF, 0 }, /* 3D left SPK volume */ { 0xFF, 0xFF, 0 }, /* 3E right SPK volume */ { 0xFF, 0xFF, 0 }, /* 3F MIC config */ { 0xFF, 0xFF, 0 }, /* 40 MIC threshold */ { 0xFF, 0xFF, 0 }, /* 41 excursion limiter filter */ { 0xFF, 0xFF, 0 }, /* 42 excursion limiter threshold */ { 0xFF, 0xFF, 0 }, /* 43 ALC */ { 0xFF, 0xFF, 0 }, /* 44 power limiter threshold */ { 0xFF, 0xFF, 0 }, /* 45 power limiter config */ { 0xFF, 0xFF, 0 }, /* 46 distortion limiter config */ { 0xFF, 0xFF, 0 }, /* 47 audio input */ { 0xFF, 0xFF, 0 }, /* 48 microphone */ { 0xFF, 0xFF, 0 }, /* 49 level control */ { 0xFF, 0xFF, 0 }, /* 4A bypass switches */ { 0xFF, 0xFF, 0 }, /* 4B jack detect */ { 0xFF, 0xFF, 0 }, /* 4C input enable */ { 0xFF, 0xFF, 0 }, /* 4D output enable */ { 0xFF, 0xFF, 0 }, /* 4E bias control */ { 0xFF, 0xFF, 0 }, /* 4F DAC power */ { 0xFF, 0xFF, 0 }, /* 50 DAC power */ { 0xFF, 0xFF, 0 }, /* 51 system */ { 0xFF, 0xFF, 0 }, /* 52 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 53 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 54 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 55 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 56 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 57 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 58 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 59 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 5A DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 5B DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 5C DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 5D DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 5E DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 5F DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 60 DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 61 DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 62 DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 63 DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 64 DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 65 DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 66 DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 67 DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 68 DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 69 DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 6A DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 6B DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 6C DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 6D DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 6E DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 6F DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 70 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 71 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 72 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 73 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 74 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 75 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 76 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 77 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 78 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 79 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 7A DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 7B DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 7C DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 7D DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 7E DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 7F DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 80 DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 81 DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 82 DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 83 DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 84 DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 85 DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 86 DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 87 DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 88 DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 89 DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 8A DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 8B DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 8C DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 8D DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 8E DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 8F DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 90 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 91 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 92 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 93 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 94 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 95 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 96 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 97 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 98 DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* 99 DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* 9A DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* 9B DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* 9C DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* 9D DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* 9E DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* 9F DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* A0 DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* A1 DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* A2 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* A3 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* A4 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* A5 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* A6 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* A7 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* A8 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* A9 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* AA DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* AB DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* AC DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* AD DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* AE DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* AF DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* B0 DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* B1 DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* B2 DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* B3 DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* B4 DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* B5 DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* B6 DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* B7 DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* B8 DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* B9 DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* BA DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* BB DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* BC DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* BD DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* BE DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* BF DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* C0 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C1 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C2 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C3 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C4 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C5 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C6 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C7 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C8 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C9 DAI2 biquad */ { 0x00, 0x00, 0 }, /* CA */ { 0x00, 0x00, 0 }, /* CB */ { 0x00, 0x00, 0 }, /* CC */ { 0x00, 0x00, 0 }, /* CD */ { 0x00, 0x00, 0 }, /* CE */ { 0x00, 0x00, 0 }, /* CF */ { 0x00, 0x00, 0 }, /* D0 */ { 0x00, 0x00, 0 }, /* D1 */ { 0x00, 0x00, 0 }, /* D2 */ { 0x00, 0x00, 0 }, /* D3 */ { 0x00, 0x00, 0 }, /* D4 */ { 0x00, 0x00, 0 }, /* D5 */ { 0x00, 0x00, 0 }, /* D6 */ { 0x00, 0x00, 0 }, /* D7 */ { 0x00, 0x00, 0 }, /* D8 */ { 0x00, 0x00, 0 }, /* D9 */ { 0x00, 0x00, 0 }, /* DA */ { 0x00, 0x00, 0 }, /* DB */ { 0x00, 0x00, 0 }, /* DC */ { 0x00, 0x00, 0 }, /* DD */ { 0x00, 0x00, 0 }, /* DE */ { 0x00, 0x00, 0 }, /* DF */ { 0x00, 0x00, 0 }, /* E0 */ { 0x00, 0x00, 0 }, /* E1 */ { 0x00, 0x00, 0 }, /* E2 */ { 0x00, 0x00, 0 }, /* E3 */ { 0x00, 0x00, 0 }, /* E4 */ { 0x00, 0x00, 0 }, /* E5 */ { 0x00, 0x00, 0 }, /* E6 */ { 0x00, 0x00, 0 }, /* E7 */ { 0x00, 0x00, 0 }, /* E8 */ { 0x00, 0x00, 0 }, /* E9 */ { 0x00, 0x00, 0 }, /* EA */ { 0x00, 0x00, 0 }, /* EB */ { 0x00, 0x00, 0 }, /* EC */ { 0x00, 0x00, 0 }, /* ED */ { 0x00, 0x00, 0 }, /* EE */ { 0x00, 0x00, 0 }, /* EF */ { 0x00, 0x00, 0 }, /* F0 */ { 0x00, 0x00, 0 }, /* F1 */ { 0x00, 0x00, 0 }, /* F2 */ { 0x00, 0x00, 0 }, /* F3 */ { 0x00, 0x00, 0 }, /* F4 */ { 0x00, 0x00, 0 }, /* F5 */ { 0x00, 0x00, 0 }, /* F6 */ { 0x00, 0x00, 0 }, /* F7 */ { 0x00, 0x00, 0 }, /* F8 */ { 0x00, 0x00, 0 }, /* F9 */ { 0x00, 0x00, 0 }, /* FA */ { 0x00, 0x00, 0 }, /* FB */ { 0x00, 0x00, 0 }, /* FC */ { 0x00, 0x00, 0 }, /* FD */ { 0x00, 0x00, 0 }, /* FE */ { 0xFF, 0x00, 1 }, /* FF */ }; static int max98088_volatile_register(struct snd_soc_codec *codec, unsigned int reg) { return max98088_access[reg].vol; } /* * Load equalizer DSP coefficient configurations registers */ static void m98088_eq_band(struct snd_soc_codec *codec, unsigned int dai, unsigned int band, u16 *coefs) { unsigned int eq_reg; unsigned int i; BUG_ON(band > 4); BUG_ON(dai > 1); /* Load the base register address */ eq_reg = dai ? M98088_REG_84_DAI2_EQ_BASE : M98088_REG_52_DAI1_EQ_BASE; /* Add the band address offset, note adjustment for word address */ eq_reg += band * (M98088_COEFS_PER_BAND << 1); /* Step through the registers and coefs */ for (i = 0; i < M98088_COEFS_PER_BAND; i++) { snd_soc_write(codec, eq_reg++, M98088_BYTE1(coefs[i])); snd_soc_write(codec, eq_reg++, M98088_BYTE0(coefs[i])); } } /* * Excursion limiter modes */ static const char *max98088_exmode_texts[] = { "Off", "100Hz", "400Hz", "600Hz", "800Hz", "1000Hz", "200-400Hz", "400-600Hz", "400-800Hz", }; static const unsigned int max98088_exmode_values[] = { 0x00, 0x43, 0x10, 0x20, 0x30, 0x40, 0x11, 0x22, 0x32 }; static const struct soc_enum max98088_exmode_enum = SOC_VALUE_ENUM_SINGLE(M98088_REG_41_SPKDHP, 0, 127, ARRAY_SIZE(max98088_exmode_texts), max98088_exmode_texts, max98088_exmode_values); static const char *max98088_ex_thresh[] = { /* volts PP */ "0.6", "1.2", "1.8", "2.4", "3.0", "3.6", "4.2", "4.8"}; static const struct soc_enum max98088_ex_thresh_enum[] = { SOC_ENUM_SINGLE(M98088_REG_42_SPKDHP_THRESH, 0, 8, max98088_ex_thresh), }; static const char *max98088_fltr_mode[] = {"Voice", "Music" }; static const struct soc_enum max98088_filter_mode_enum[] = { SOC_ENUM_SINGLE(M98088_REG_18_DAI1_FILTERS, 7, 2, max98088_fltr_mode), }; static const char *max98088_extmic_text[] = { "None", "MIC1", "MIC2" }; static const struct soc_enum max98088_extmic_enum = SOC_ENUM_SINGLE(M98088_REG_48_CFG_MIC, 0, 3, max98088_extmic_text); static const struct snd_kcontrol_new max98088_extmic_mux = SOC_DAPM_ENUM("External MIC Mux", max98088_extmic_enum); static const char *max98088_dai1_fltr[] = { "Off", "fc=258/fs=16k", "fc=500/fs=16k", "fc=258/fs=8k", "fc=500/fs=8k", "fc=200"}; static const struct soc_enum max98088_dai1_dac_filter_enum[] = { SOC_ENUM_SINGLE(M98088_REG_18_DAI1_FILTERS, 0, 6, max98088_dai1_fltr), }; static const struct soc_enum max98088_dai1_adc_filter_enum[] = { SOC_ENUM_SINGLE(M98088_REG_18_DAI1_FILTERS, 4, 6, max98088_dai1_fltr), }; static int max98088_mic1pre_set(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); unsigned int sel = ucontrol->value.integer.value[0]; max98088->mic1pre = sel; snd_soc_update_bits(codec, M98088_REG_35_LVL_MIC1, M98088_MICPRE_MASK, (1+sel)<<M98088_MICPRE_SHIFT); return 0; } static int max98088_mic1pre_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); ucontrol->value.integer.value[0] = max98088->mic1pre; return 0; } static int max98088_mic2pre_set(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); unsigned int sel = ucontrol->value.integer.value[0]; max98088->mic2pre = sel; snd_soc_update_bits(codec, M98088_REG_36_LVL_MIC2, M98088_MICPRE_MASK, (1+sel)<<M98088_MICPRE_SHIFT); return 0; } static int max98088_mic2pre_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); ucontrol->value.integer.value[0] = max98088->mic2pre; return 0; } static const unsigned int max98088_micboost_tlv[] = { TLV_DB_RANGE_HEAD(2), 0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0), 2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0), }; static const struct snd_kcontrol_new max98088_snd_controls[] = { SOC_DOUBLE_R("Headphone Volume", M98088_REG_39_LVL_HP_L, M98088_REG_3A_LVL_HP_R, 0, 31, 0), SOC_DOUBLE_R("Speaker Volume", M98088_REG_3D_LVL_SPK_L, M98088_REG_3E_LVL_SPK_R, 0, 31, 0), SOC_DOUBLE_R("Receiver Volume", M98088_REG_3B_LVL_REC_L, M98088_REG_3C_LVL_REC_R, 0, 31, 0), SOC_DOUBLE_R("Headphone Switch", M98088_REG_39_LVL_HP_L, M98088_REG_3A_LVL_HP_R, 7, 1, 1), SOC_DOUBLE_R("Speaker Switch", M98088_REG_3D_LVL_SPK_L, M98088_REG_3E_LVL_SPK_R, 7, 1, 1), SOC_DOUBLE_R("Receiver Switch", M98088_REG_3B_LVL_REC_L, M98088_REG_3C_LVL_REC_R, 7, 1, 1), SOC_SINGLE("MIC1 Volume", M98088_REG_35_LVL_MIC1, 0, 31, 1), SOC_SINGLE("MIC2 Volume", M98088_REG_36_LVL_MIC2, 0, 31, 1), SOC_SINGLE_EXT_TLV("MIC1 Boost Volume", M98088_REG_35_LVL_MIC1, 5, 2, 0, max98088_mic1pre_get, max98088_mic1pre_set, max98088_micboost_tlv), SOC_SINGLE_EXT_TLV("MIC2 Boost Volume", M98088_REG_36_LVL_MIC2, 5, 2, 0, max98088_mic2pre_get, max98088_mic2pre_set, max98088_micboost_tlv), SOC_SINGLE("INA Volume", M98088_REG_37_LVL_INA, 0, 7, 1), SOC_SINGLE("INB Volume", M98088_REG_38_LVL_INB, 0, 7, 1), SOC_SINGLE("ADCL Volume", M98088_REG_33_LVL_ADC_L, 0, 15, 0), SOC_SINGLE("ADCR Volume", M98088_REG_34_LVL_ADC_R, 0, 15, 0), SOC_SINGLE("ADCL Boost Volume", M98088_REG_33_LVL_ADC_L, 4, 3, 0), SOC_SINGLE("ADCR Boost Volume", M98088_REG_34_LVL_ADC_R, 4, 3, 0), SOC_SINGLE("EQ1 Switch", M98088_REG_49_CFG_LEVEL, 0, 1, 0), SOC_SINGLE("EQ2 Switch", M98088_REG_49_CFG_LEVEL, 1, 1, 0), SOC_ENUM("EX Limiter Mode", max98088_exmode_enum), SOC_ENUM("EX Limiter Threshold", max98088_ex_thresh_enum), SOC_ENUM("DAI1 Filter Mode", max98088_filter_mode_enum), SOC_ENUM("DAI1 DAC Filter", max98088_dai1_dac_filter_enum), SOC_ENUM("DAI1 ADC Filter", max98088_dai1_adc_filter_enum), SOC_SINGLE("DAI2 DC Block Switch", M98088_REG_20_DAI2_FILTERS, 0, 1, 0), SOC_SINGLE("ALC Switch", M98088_REG_43_SPKALC_COMP, 7, 1, 0), SOC_SINGLE("ALC Threshold", M98088_REG_43_SPKALC_COMP, 0, 7, 0), SOC_SINGLE("ALC Multiband", M98088_REG_43_SPKALC_COMP, 3, 1, 0), SOC_SINGLE("ALC Release Time", M98088_REG_43_SPKALC_COMP, 4, 7, 0), SOC_SINGLE("PWR Limiter Threshold", M98088_REG_44_PWRLMT_CFG, 4, 15, 0), SOC_SINGLE("PWR Limiter Weight", M98088_REG_44_PWRLMT_CFG, 0, 7, 0), SOC_SINGLE("PWR Limiter Time1", M98088_REG_45_PWRLMT_TIME, 0, 15, 0), SOC_SINGLE("PWR Limiter Time2", M98088_REG_45_PWRLMT_TIME, 4, 15, 0), SOC_SINGLE("THD Limiter Threshold", M98088_REG_46_THDLMT_CFG, 4, 15, 0), SOC_SINGLE("THD Limiter Time", M98088_REG_46_THDLMT_CFG, 0, 7, 0), }; /* Left speaker mixer switch */ static const struct snd_kcontrol_new max98088_left_speaker_mixer_controls[] = { SOC_DAPM_SINGLE("Left DAC1 Switch", M98088_REG_2B_MIX_SPK_LEFT, 0, 1, 0), SOC_DAPM_SINGLE("Right DAC1 Switch", M98088_REG_2B_MIX_SPK_LEFT, 7, 1, 0), SOC_DAPM_SINGLE("Left DAC2 Switch", M98088_REG_2B_MIX_SPK_LEFT, 0, 1, 0), SOC_DAPM_SINGLE("Right DAC2 Switch", M98088_REG_2B_MIX_SPK_LEFT, 7, 1, 0), SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_2B_MIX_SPK_LEFT, 5, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_2B_MIX_SPK_LEFT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_2B_MIX_SPK_LEFT, 1, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_2B_MIX_SPK_LEFT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_2B_MIX_SPK_LEFT, 3, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_2B_MIX_SPK_LEFT, 4, 1, 0), }; /* Right speaker mixer switch */ static const struct snd_kcontrol_new max98088_right_speaker_mixer_controls[] = { SOC_DAPM_SINGLE("Left DAC1 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 7, 1, 0), SOC_DAPM_SINGLE("Right DAC1 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 0, 1, 0), SOC_DAPM_SINGLE("Left DAC2 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 7, 1, 0), SOC_DAPM_SINGLE("Right DAC2 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 0, 1, 0), SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 5, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 1, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 3, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 4, 1, 0), }; /* Left headphone mixer switch */ static const struct snd_kcontrol_new max98088_left_hp_mixer_controls[] = { SOC_DAPM_SINGLE("Left DAC1 Switch", M98088_REG_25_MIX_HP_LEFT, 0, 1, 0), SOC_DAPM_SINGLE("Right DAC1 Switch", M98088_REG_25_MIX_HP_LEFT, 7, 1, 0), SOC_DAPM_SINGLE("Left DAC2 Switch", M98088_REG_25_MIX_HP_LEFT, 0, 1, 0), SOC_DAPM_SINGLE("Right DAC2 Switch", M98088_REG_25_MIX_HP_LEFT, 7, 1, 0), SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_25_MIX_HP_LEFT, 5, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_25_MIX_HP_LEFT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_25_MIX_HP_LEFT, 1, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_25_MIX_HP_LEFT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_25_MIX_HP_LEFT, 3, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_25_MIX_HP_LEFT, 4, 1, 0), }; /* Right headphone mixer switch */ static const struct snd_kcontrol_new max98088_right_hp_mixer_controls[] = { SOC_DAPM_SINGLE("Left DAC1 Switch", M98088_REG_26_MIX_HP_RIGHT, 7, 1, 0), SOC_DAPM_SINGLE("Right DAC1 Switch", M98088_REG_26_MIX_HP_RIGHT, 0, 1, 0), SOC_DAPM_SINGLE("Left DAC2 Switch", M98088_REG_26_MIX_HP_RIGHT, 7, 1, 0), SOC_DAPM_SINGLE("Right DAC2 Switch", M98088_REG_26_MIX_HP_RIGHT, 0, 1, 0), SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_26_MIX_HP_RIGHT, 5, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_26_MIX_HP_RIGHT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_26_MIX_HP_RIGHT, 1, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_26_MIX_HP_RIGHT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_26_MIX_HP_RIGHT, 3, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_26_MIX_HP_RIGHT, 4, 1, 0), }; /* Left earpiece/receiver mixer switch */ static const struct snd_kcontrol_new max98088_left_rec_mixer_controls[] = { SOC_DAPM_SINGLE("Left DAC1 Switch", M98088_REG_28_MIX_REC_LEFT, 0, 1, 0), SOC_DAPM_SINGLE("Right DAC1 Switch", M98088_REG_28_MIX_REC_LEFT, 7, 1, 0), SOC_DAPM_SINGLE("Left DAC2 Switch", M98088_REG_28_MIX_REC_LEFT, 0, 1, 0), SOC_DAPM_SINGLE("Right DAC2 Switch", M98088_REG_28_MIX_REC_LEFT, 7, 1, 0), SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_28_MIX_REC_LEFT, 5, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_28_MIX_REC_LEFT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_28_MIX_REC_LEFT, 1, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_28_MIX_REC_LEFT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_28_MIX_REC_LEFT, 3, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_28_MIX_REC_LEFT, 4, 1, 0), }; /* Right earpiece/receiver mixer switch */ static const struct snd_kcontrol_new max98088_right_rec_mixer_controls[] = { SOC_DAPM_SINGLE("Left DAC1 Switch", M98088_REG_29_MIX_REC_RIGHT, 7, 1, 0), SOC_DAPM_SINGLE("Right DAC1 Switch", M98088_REG_29_MIX_REC_RIGHT, 0, 1, 0), SOC_DAPM_SINGLE("Left DAC2 Switch", M98088_REG_29_MIX_REC_RIGHT, 7, 1, 0), SOC_DAPM_SINGLE("Right DAC2 Switch", M98088_REG_29_MIX_REC_RIGHT, 0, 1, 0), SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_29_MIX_REC_RIGHT, 5, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_29_MIX_REC_RIGHT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_29_MIX_REC_RIGHT, 1, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_29_MIX_REC_RIGHT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_29_MIX_REC_RIGHT, 3, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_29_MIX_REC_RIGHT, 4, 1, 0), }; /* Left ADC mixer switch */ static const struct snd_kcontrol_new max98088_left_ADC_mixer_controls[] = { SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_23_MIX_ADC_LEFT, 7, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_23_MIX_ADC_LEFT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_23_MIX_ADC_LEFT, 3, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_23_MIX_ADC_LEFT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_23_MIX_ADC_LEFT, 1, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_23_MIX_ADC_LEFT, 0, 1, 0), }; /* Right ADC mixer switch */ static const struct snd_kcontrol_new max98088_right_ADC_mixer_controls[] = { SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_24_MIX_ADC_RIGHT, 7, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_24_MIX_ADC_RIGHT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_24_MIX_ADC_RIGHT, 3, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_24_MIX_ADC_RIGHT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_24_MIX_ADC_RIGHT, 1, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_24_MIX_ADC_RIGHT, 0, 1, 0), }; static int max98088_mic_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); switch (event) { case SND_SOC_DAPM_POST_PMU: if (w->reg == M98088_REG_35_LVL_MIC1) { snd_soc_update_bits(codec, w->reg, M98088_MICPRE_MASK, (1+max98088->mic1pre)<<M98088_MICPRE_SHIFT); } else { snd_soc_update_bits(codec, w->reg, M98088_MICPRE_MASK, (1+max98088->mic2pre)<<M98088_MICPRE_SHIFT); } break; case SND_SOC_DAPM_POST_PMD: snd_soc_update_bits(codec, w->reg, M98088_MICPRE_MASK, 0); break; default: return -EINVAL; } return 0; } /* * The line inputs are 2-channel stereo inputs with the left * and right channels sharing a common PGA power control signal. */ static int max98088_line_pga(struct snd_soc_dapm_widget *w, int event, int line, u8 channel) { struct snd_soc_codec *codec = w->codec; struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); u8 *state; BUG_ON(!((channel == 1) || (channel == 2))); switch (line) { case LINE_INA: state = &max98088->ina_state; break; case LINE_INB: state = &max98088->inb_state; break; default: return -EINVAL; } switch (event) { case SND_SOC_DAPM_POST_PMU: *state |= channel; snd_soc_update_bits(codec, w->reg, (1 << w->shift), (1 << w->shift)); break; case SND_SOC_DAPM_POST_PMD: *state &= ~channel; if (*state == 0) { snd_soc_update_bits(codec, w->reg, (1 << w->shift), 0); } break; default: return -EINVAL; } return 0; } static int max98088_pga_ina1_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { return max98088_line_pga(w, event, LINE_INA, 1); } static int max98088_pga_ina2_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { return max98088_line_pga(w, event, LINE_INA, 2); } static int max98088_pga_inb1_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { return max98088_line_pga(w, event, LINE_INB, 1); } static int max98088_pga_inb2_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { return max98088_line_pga(w, event, LINE_INB, 2); } static const struct snd_soc_dapm_widget max98088_dapm_widgets[] = { SND_SOC_DAPM_ADC("ADCL", "HiFi Capture", M98088_REG_4C_PWR_EN_IN, 1, 0), SND_SOC_DAPM_ADC("ADCR", "HiFi Capture", M98088_REG_4C_PWR_EN_IN, 0, 0), SND_SOC_DAPM_DAC("DACL1", "HiFi Playback", M98088_REG_4D_PWR_EN_OUT, 1, 0), SND_SOC_DAPM_DAC("DACR1", "HiFi Playback", M98088_REG_4D_PWR_EN_OUT, 0, 0), SND_SOC_DAPM_DAC("DACL2", "Aux Playback", M98088_REG_4D_PWR_EN_OUT, 1, 0), SND_SOC_DAPM_DAC("DACR2", "Aux Playback", M98088_REG_4D_PWR_EN_OUT, 0, 0), SND_SOC_DAPM_PGA("HP Left Out", M98088_REG_4D_PWR_EN_OUT, 7, 0, NULL, 0), SND_SOC_DAPM_PGA("HP Right Out", M98088_REG_4D_PWR_EN_OUT, 6, 0, NULL, 0), SND_SOC_DAPM_PGA("SPK Left Out", M98088_REG_4D_PWR_EN_OUT, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("SPK Right Out", M98088_REG_4D_PWR_EN_OUT, 4, 0, NULL, 0), SND_SOC_DAPM_PGA("REC Left Out", M98088_REG_4D_PWR_EN_OUT, 3, 0, NULL, 0), SND_SOC_DAPM_PGA("REC Right Out", M98088_REG_4D_PWR_EN_OUT, 2, 0, NULL, 0), SND_SOC_DAPM_MUX("External MIC", SND_SOC_NOPM, 0, 0, &max98088_extmic_mux), SND_SOC_DAPM_MIXER("Left HP Mixer", SND_SOC_NOPM, 0, 0, &max98088_left_hp_mixer_controls[0], ARRAY_SIZE(max98088_left_hp_mixer_controls)), SND_SOC_DAPM_MIXER("Right HP Mixer", SND_SOC_NOPM, 0, 0, &max98088_right_hp_mixer_controls[0], ARRAY_SIZE(max98088_right_hp_mixer_controls)), SND_SOC_DAPM_MIXER("Left SPK Mixer", SND_SOC_NOPM, 0, 0, &max98088_left_speaker_mixer_controls[0], ARRAY_SIZE(max98088_left_speaker_mixer_controls)), SND_SOC_DAPM_MIXER("Right SPK Mixer", SND_SOC_NOPM, 0, 0, &max98088_right_speaker_mixer_controls[0], ARRAY_SIZE(max98088_right_speaker_mixer_controls)), SND_SOC_DAPM_MIXER("Left REC Mixer", SND_SOC_NOPM, 0, 0, &max98088_left_rec_mixer_controls[0], ARRAY_SIZE(max98088_left_rec_mixer_controls)), SND_SOC_DAPM_MIXER("Right REC Mixer", SND_SOC_NOPM, 0, 0, &max98088_right_rec_mixer_controls[0], ARRAY_SIZE(max98088_right_rec_mixer_controls)), SND_SOC_DAPM_MIXER("Left ADC Mixer", SND_SOC_NOPM, 0, 0, &max98088_left_ADC_mixer_controls[0], ARRAY_SIZE(max98088_left_ADC_mixer_controls)), SND_SOC_DAPM_MIXER("Right ADC Mixer", SND_SOC_NOPM, 0, 0, &max98088_right_ADC_mixer_controls[0], ARRAY_SIZE(max98088_right_ADC_mixer_controls)), SND_SOC_DAPM_PGA_E("MIC1 Input", M98088_REG_35_LVL_MIC1, 5, 0, NULL, 0, max98088_mic_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA_E("MIC2 Input", M98088_REG_36_LVL_MIC2, 5, 0, NULL, 0, max98088_mic_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA_E("INA1 Input", M98088_REG_4C_PWR_EN_IN, 7, 0, NULL, 0, max98088_pga_ina1_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA_E("INA2 Input", M98088_REG_4C_PWR_EN_IN, 7, 0, NULL, 0, max98088_pga_ina2_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA_E("INB1 Input", M98088_REG_4C_PWR_EN_IN, 6, 0, NULL, 0, max98088_pga_inb1_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA_E("INB2 Input", M98088_REG_4C_PWR_EN_IN, 6, 0, NULL, 0, max98088_pga_inb2_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_MICBIAS("MICBIAS", M98088_REG_4C_PWR_EN_IN, 3, 0), SND_SOC_DAPM_OUTPUT("HPL"), SND_SOC_DAPM_OUTPUT("HPR"), SND_SOC_DAPM_OUTPUT("SPKL"), SND_SOC_DAPM_OUTPUT("SPKR"), SND_SOC_DAPM_OUTPUT("RECL"), SND_SOC_DAPM_OUTPUT("RECR"), SND_SOC_DAPM_INPUT("MIC1"), SND_SOC_DAPM_INPUT("MIC2"), SND_SOC_DAPM_INPUT("INA1"), SND_SOC_DAPM_INPUT("INA2"), SND_SOC_DAPM_INPUT("INB1"), SND_SOC_DAPM_INPUT("INB2"), }; static const struct snd_soc_dapm_route max98088_audio_map[] = { /* Left headphone output mixer */ {"Left HP Mixer", "Left DAC1 Switch", "DACL1"}, {"Left HP Mixer", "Left DAC2 Switch", "DACL2"}, {"Left HP Mixer", "Right DAC1 Switch", "DACR1"}, {"Left HP Mixer", "Right DAC2 Switch", "DACR2"}, {"Left HP Mixer", "MIC1 Switch", "MIC1 Input"}, {"Left HP Mixer", "MIC2 Switch", "MIC2 Input"}, {"Left HP Mixer", "INA1 Switch", "INA1 Input"}, {"Left HP Mixer", "INA2 Switch", "INA2 Input"}, {"Left HP Mixer", "INB1 Switch", "INB1 Input"}, {"Left HP Mixer", "INB2 Switch", "INB2 Input"}, /* Right headphone output mixer */ {"Right HP Mixer", "Left DAC1 Switch", "DACL1"}, {"Right HP Mixer", "Left DAC2 Switch", "DACL2" }, {"Right HP Mixer", "Right DAC1 Switch", "DACR1"}, {"Right HP Mixer", "Right DAC2 Switch", "DACR2"}, {"Right HP Mixer", "MIC1 Switch", "MIC1 Input"}, {"Right HP Mixer", "MIC2 Switch", "MIC2 Input"}, {"Right HP Mixer", "INA1 Switch", "INA1 Input"}, {"Right HP Mixer", "INA2 Switch", "INA2 Input"}, {"Right HP Mixer", "INB1 Switch", "INB1 Input"}, {"Right HP Mixer", "INB2 Switch", "INB2 Input"}, /* Left speaker output mixer */ {"Left SPK Mixer", "Left DAC1 Switch", "DACL1"}, {"Left SPK Mixer", "Left DAC2 Switch", "DACL2"}, {"Left SPK Mixer", "Right DAC1 Switch", "DACR1"}, {"Left SPK Mixer", "Right DAC2 Switch", "DACR2"}, {"Left SPK Mixer", "MIC1 Switch", "MIC1 Input"}, {"Left SPK Mixer", "MIC2 Switch", "MIC2 Input"}, {"Left SPK Mixer", "INA1 Switch", "INA1 Input"}, {"Left SPK Mixer", "INA2 Switch", "INA2 Input"}, {"Left SPK Mixer", "INB1 Switch", "INB1 Input"}, {"Left SPK Mixer", "INB2 Switch", "INB2 Input"}, /* Right speaker output mixer */ {"Right SPK Mixer", "Left DAC1 Switch", "DACL1"}, {"Right SPK Mixer", "Left DAC2 Switch", "DACL2"}, {"Right SPK Mixer", "Right DAC1 Switch", "DACR1"}, {"Right SPK Mixer", "Right DAC2 Switch", "DACR2"}, {"Right SPK Mixer", "MIC1 Switch", "MIC1 Input"}, {"Right SPK Mixer", "MIC2 Switch", "MIC2 Input"}, {"Right SPK Mixer", "INA1 Switch", "INA1 Input"}, {"Right SPK Mixer", "INA2 Switch", "INA2 Input"}, {"Right SPK Mixer", "INB1 Switch", "INB1 Input"}, {"Right SPK Mixer", "INB2 Switch", "INB2 Input"}, /* Earpiece/Receiver output mixer */ {"Left REC Mixer", "Left DAC1 Switch", "DACL1"}, {"Left REC Mixer", "Left DAC2 Switch", "DACL2"}, {"Left REC Mixer", "Right DAC1 Switch", "DACR1"}, {"Left REC Mixer", "Right DAC2 Switch", "DACR2"}, {"Left REC Mixer", "MIC1 Switch", "MIC1 Input"}, {"Left REC Mixer", "MIC2 Switch", "MIC2 Input"}, {"Left REC Mixer", "INA1 Switch", "INA1 Input"}, {"Left REC Mixer", "INA2 Switch", "INA2 Input"}, {"Left REC Mixer", "INB1 Switch", "INB1 Input"}, {"Left REC Mixer", "INB2 Switch", "INB2 Input"}, /* Earpiece/Receiver output mixer */ {"Right REC Mixer", "Left DAC1 Switch", "DACL1"}, {"Right REC Mixer", "Left DAC2 Switch", "DACL2"}, {"Right REC Mixer", "Right DAC1 Switch", "DACR1"}, {"Right REC Mixer", "Right DAC2 Switch", "DACR2"}, {"Right REC Mixer", "MIC1 Switch", "MIC1 Input"}, {"Right REC Mixer", "MIC2 Switch", "MIC2 Input"}, {"Right REC Mixer", "INA1 Switch", "INA1 Input"}, {"Right REC Mixer", "INA2 Switch", "INA2 Input"}, {"Right REC Mixer", "INB1 Switch", "INB1 Input"}, {"Right REC Mixer", "INB2 Switch", "INB2 Input"}, {"HP Left Out", NULL, "Left HP Mixer"}, {"HP Right Out", NULL, "Right HP Mixer"}, {"SPK Left Out", NULL, "Left SPK Mixer"}, {"SPK Right Out", NULL, "Right SPK Mixer"}, {"REC Left Out", NULL, "Left REC Mixer"}, {"REC Right Out", NULL, "Right REC Mixer"}, {"HPL", NULL, "HP Left Out"}, {"HPR", NULL, "HP Right Out"}, {"SPKL", NULL, "SPK Left Out"}, {"SPKR", NULL, "SPK Right Out"}, {"RECL", NULL, "REC Left Out"}, {"RECR", NULL, "REC Right Out"}, /* Left ADC input mixer */ {"Left ADC Mixer", "MIC1 Switch", "MIC1 Input"}, {"Left ADC Mixer", "MIC2 Switch", "MIC2 Input"}, {"Left ADC Mixer", "INA1 Switch", "INA1 Input"}, {"Left ADC Mixer", "INA2 Switch", "INA2 Input"}, {"Left ADC Mixer", "INB1 Switch", "INB1 Input"}, {"Left ADC Mixer", "INB2 Switch", "INB2 Input"}, /* Right ADC input mixer */ {"Right ADC Mixer", "MIC1 Switch", "MIC1 Input"}, {"Right ADC Mixer", "MIC2 Switch", "MIC2 Input"}, {"Right ADC Mixer", "INA1 Switch", "INA1 Input"}, {"Right ADC Mixer", "INA2 Switch", "INA2 Input"}, {"Right ADC Mixer", "INB1 Switch", "INB1 Input"}, {"Right ADC Mixer", "INB2 Switch", "INB2 Input"}, /* Inputs */ {"ADCL", NULL, "Left ADC Mixer"}, {"ADCR", NULL, "Right ADC Mixer"}, {"INA1 Input", NULL, "INA1"}, {"INA2 Input", NULL, "INA2"}, {"INB1 Input", NULL, "INB1"}, {"INB2 Input", NULL, "INB2"}, {"MIC1 Input", NULL, "MIC1"}, {"MIC2 Input", NULL, "MIC2"}, }; /* codec mclk clock divider coefficients */ static const struct { u32 rate; u8 sr; } rate_table[] = { {8000, 0x10}, {11025, 0x20}, {16000, 0x30}, {22050, 0x40}, {24000, 0x50}, {32000, 0x60}, {44100, 0x70}, {48000, 0x80}, {88200, 0x90}, {96000, 0xA0}, }; static inline int rate_value(int rate, u8 *value) { int i; for (i = 0; i < ARRAY_SIZE(rate_table); i++) { if (rate_table[i].rate >= rate) { *value = rate_table[i].sr; return 0; } } *value = rate_table[0].sr; return -EINVAL; } static int max98088_dai1_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_cdata *cdata; unsigned long long ni; unsigned int rate; u8 regval; cdata = &max98088->dai[0]; rate = params_rate(params); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: snd_soc_update_bits(codec, M98088_REG_14_DAI1_FORMAT, M98088_DAI_WS, 0); break; case SNDRV_PCM_FORMAT_S24_LE: snd_soc_update_bits(codec, M98088_REG_14_DAI1_FORMAT, M98088_DAI_WS, M98088_DAI_WS); break; default: return -EINVAL; } snd_soc_update_bits(codec, M98088_REG_51_PWR_SYS, M98088_SHDNRUN, 0); if (rate_value(rate, &regval)) return -EINVAL; snd_soc_update_bits(codec, M98088_REG_11_DAI1_CLKMODE, M98088_CLKMODE_MASK, regval); cdata->rate = rate; /* Configure NI when operating as master */ if (snd_soc_read(codec, M98088_REG_14_DAI1_FORMAT) & M98088_DAI_MAS) { if (max98088->sysclk == 0) { dev_err(codec->dev, "Invalid system clock frequency\n"); return -EINVAL; } ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL) * (unsigned long long int)rate; do_div(ni, (unsigned long long int)max98088->sysclk); snd_soc_write(codec, M98088_REG_12_DAI1_CLKCFG_HI, (ni >> 8) & 0x7F); snd_soc_write(codec, M98088_REG_13_DAI1_CLKCFG_LO, ni & 0xFF); } /* Update sample rate mode */ if (rate < 50000) snd_soc_update_bits(codec, M98088_REG_18_DAI1_FILTERS, M98088_DAI_DHF, 0); else snd_soc_update_bits(codec, M98088_REG_18_DAI1_FILTERS, M98088_DAI_DHF, M98088_DAI_DHF); snd_soc_update_bits(codec, M98088_REG_51_PWR_SYS, M98088_SHDNRUN, M98088_SHDNRUN); return 0; } static int max98088_dai2_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_cdata *cdata; unsigned long long ni; unsigned int rate; u8 regval; cdata = &max98088->dai[1]; rate = params_rate(params); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: snd_soc_update_bits(codec, M98088_REG_1C_DAI2_FORMAT, M98088_DAI_WS, 0); break; case SNDRV_PCM_FORMAT_S24_LE: snd_soc_update_bits(codec, M98088_REG_1C_DAI2_FORMAT, M98088_DAI_WS, M98088_DAI_WS); break; default: return -EINVAL; } snd_soc_update_bits(codec, M98088_REG_51_PWR_SYS, M98088_SHDNRUN, 0); if (rate_value(rate, &regval)) return -EINVAL; snd_soc_update_bits(codec, M98088_REG_19_DAI2_CLKMODE, M98088_CLKMODE_MASK, regval); cdata->rate = rate; /* Configure NI when operating as master */ if (snd_soc_read(codec, M98088_REG_1C_DAI2_FORMAT) & M98088_DAI_MAS) { if (max98088->sysclk == 0) { dev_err(codec->dev, "Invalid system clock frequency\n"); return -EINVAL; } ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL) * (unsigned long long int)rate; do_div(ni, (unsigned long long int)max98088->sysclk); snd_soc_write(codec, M98088_REG_1A_DAI2_CLKCFG_HI, (ni >> 8) & 0x7F); snd_soc_write(codec, M98088_REG_1B_DAI2_CLKCFG_LO, ni & 0xFF); } /* Update sample rate mode */ if (rate < 50000) snd_soc_update_bits(codec, M98088_REG_20_DAI2_FILTERS, M98088_DAI_DHF, 0); else snd_soc_update_bits(codec, M98088_REG_20_DAI2_FILTERS, M98088_DAI_DHF, M98088_DAI_DHF); snd_soc_update_bits(codec, M98088_REG_51_PWR_SYS, M98088_SHDNRUN, M98088_SHDNRUN); return 0; } static int max98088_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = dai->codec; struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); /* Requested clock frequency is already setup */ if (freq == max98088->sysclk) return 0; /* Setup clocks for slave mode, and using the PLL * PSCLK = 0x01 (when master clk is 10MHz to 20MHz) * 0x02 (when master clk is 20MHz to 30MHz).. */ if ((freq >= 10000000) && (freq < 20000000)) { snd_soc_write(codec, M98088_REG_10_SYS_CLK, 0x10); } else if ((freq >= 20000000) && (freq < 30000000)) { snd_soc_write(codec, M98088_REG_10_SYS_CLK, 0x20); } else { dev_err(codec->dev, "Invalid master clock frequency\n"); return -EINVAL; } if (snd_soc_read(codec, M98088_REG_51_PWR_SYS) & M98088_SHDNRUN) { snd_soc_update_bits(codec, M98088_REG_51_PWR_SYS, M98088_SHDNRUN, 0); snd_soc_update_bits(codec, M98088_REG_51_PWR_SYS, M98088_SHDNRUN, M98088_SHDNRUN); } dev_dbg(dai->dev, "Clock source is %d at %uHz\n", clk_id, freq); max98088->sysclk = freq; return 0; } static int max98088_dai1_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_cdata *cdata; u8 reg15val; u8 reg14val = 0; cdata = &max98088->dai[0]; if (fmt != cdata->fmt) { cdata->fmt = fmt; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: /* Slave mode PLL */ snd_soc_write(codec, M98088_REG_12_DAI1_CLKCFG_HI, 0x80); snd_soc_write(codec, M98088_REG_13_DAI1_CLKCFG_LO, 0x00); break; case SND_SOC_DAIFMT_CBM_CFM: /* Set to master mode */ reg14val |= M98088_DAI_MAS; break; case SND_SOC_DAIFMT_CBS_CFM: case SND_SOC_DAIFMT_CBM_CFS: default: dev_err(codec->dev, "Clock mode unsupported"); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: reg14val |= M98088_DAI_DLY; break; case SND_SOC_DAIFMT_LEFT_J: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_NB_IF: reg14val |= M98088_DAI_WCI; break; case SND_SOC_DAIFMT_IB_NF: reg14val |= M98088_DAI_BCI; break; case SND_SOC_DAIFMT_IB_IF: reg14val |= M98088_DAI_BCI|M98088_DAI_WCI; break; default: return -EINVAL; } snd_soc_update_bits(codec, M98088_REG_14_DAI1_FORMAT, M98088_DAI_MAS | M98088_DAI_DLY | M98088_DAI_BCI | M98088_DAI_WCI, reg14val); reg15val = M98088_DAI_BSEL64; if (max98088->digmic) reg15val |= M98088_DAI_OSR64; snd_soc_write(codec, M98088_REG_15_DAI1_CLOCK, reg15val); } return 0; } static int max98088_dai2_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_cdata *cdata; u8 reg1Cval = 0; cdata = &max98088->dai[1]; if (fmt != cdata->fmt) { cdata->fmt = fmt; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: /* Slave mode PLL */ snd_soc_write(codec, M98088_REG_1A_DAI2_CLKCFG_HI, 0x80); snd_soc_write(codec, M98088_REG_1B_DAI2_CLKCFG_LO, 0x00); break; case SND_SOC_DAIFMT_CBM_CFM: /* Set to master mode */ reg1Cval |= M98088_DAI_MAS; break; case SND_SOC_DAIFMT_CBS_CFM: case SND_SOC_DAIFMT_CBM_CFS: default: dev_err(codec->dev, "Clock mode unsupported"); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: reg1Cval |= M98088_DAI_DLY; break; case SND_SOC_DAIFMT_LEFT_J: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_NB_IF: reg1Cval |= M98088_DAI_WCI; break; case SND_SOC_DAIFMT_IB_NF: reg1Cval |= M98088_DAI_BCI; break; case SND_SOC_DAIFMT_IB_IF: reg1Cval |= M98088_DAI_BCI|M98088_DAI_WCI; break; default: return -EINVAL; } snd_soc_update_bits(codec, M98088_REG_1C_DAI2_FORMAT, M98088_DAI_MAS | M98088_DAI_DLY | M98088_DAI_BCI | M98088_DAI_WCI, reg1Cval); snd_soc_write(codec, M98088_REG_1D_DAI2_CLOCK, M98088_DAI_BSEL64); } return 0; } static int max98088_dai1_digital_mute(struct snd_soc_dai *codec_dai, int mute) { struct snd_soc_codec *codec = codec_dai->codec; int reg; if (mute) reg = M98088_DAI_MUTE; else reg = 0; snd_soc_update_bits(codec, M98088_REG_2F_LVL_DAI1_PLAY, M98088_DAI_MUTE_MASK, reg); return 0; } static int max98088_dai2_digital_mute(struct snd_soc_dai *codec_dai, int mute) { struct snd_soc_codec *codec = codec_dai->codec; int reg; if (mute) reg = M98088_DAI_MUTE; else reg = 0; snd_soc_update_bits(codec, M98088_REG_31_LVL_DAI2_PLAY, M98088_DAI_MUTE_MASK, reg); return 0; } static void max98088_sync_cache(struct snd_soc_codec *codec) { u16 *reg_cache = codec->reg_cache; int i; if (!codec->cache_sync) return; codec->cache_only = 0; /* write back cached values if they're writeable and * different from the hardware default. */ for (i = 1; i < codec->driver->reg_cache_size; i++) { if (!max98088_access[i].writable) continue; if (reg_cache[i] == max98088_reg[i]) continue; snd_soc_write(codec, i, reg_cache[i]); } codec->cache_sync = 0; } static int max98088_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) max98088_sync_cache(codec); snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN, M98088_MBEN, M98088_MBEN); break; case SND_SOC_BIAS_OFF: snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN, M98088_MBEN, 0); codec->cache_sync = 1; break; } codec->dapm.bias_level = level; return 0; } #define MAX98088_RATES SNDRV_PCM_RATE_8000_96000 #define MAX98088_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE) static const struct snd_soc_dai_ops max98088_dai1_ops = { .set_sysclk = max98088_dai_set_sysclk, .set_fmt = max98088_dai1_set_fmt, .hw_params = max98088_dai1_hw_params, .digital_mute = max98088_dai1_digital_mute, }; static const struct snd_soc_dai_ops max98088_dai2_ops = { .set_sysclk = max98088_dai_set_sysclk, .set_fmt = max98088_dai2_set_fmt, .hw_params = max98088_dai2_hw_params, .digital_mute = max98088_dai2_digital_mute, }; static struct snd_soc_dai_driver max98088_dai[] = { { .name = "HiFi", .playback = { .stream_name = "HiFi Playback", .channels_min = 1, .channels_max = 2, .rates = MAX98088_RATES, .formats = MAX98088_FORMATS, }, .capture = { .stream_name = "HiFi Capture", .channels_min = 1, .channels_max = 2, .rates = MAX98088_RATES, .formats = MAX98088_FORMATS, }, .ops = &max98088_dai1_ops, }, { .name = "Aux", .playback = { .stream_name = "Aux Playback", .channels_min = 1, .channels_max = 2, .rates = MAX98088_RATES, .formats = MAX98088_FORMATS, }, .ops = &max98088_dai2_ops, } }; static const char *eq_mode_name[] = {"EQ1 Mode", "EQ2 Mode"}; static int max98088_get_channel(struct snd_soc_codec *codec, const char *name) { int i; for (i = 0; i < ARRAY_SIZE(eq_mode_name); i++) if (strcmp(name, eq_mode_name[i]) == 0) return i; /* Shouldn't happen */ dev_err(codec->dev, "Bad EQ channel name '%s'\n", name); return -EINVAL; } static void max98088_setup_eq1(struct snd_soc_codec *codec) { struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_pdata *pdata = max98088->pdata; struct max98088_eq_cfg *coef_set; int best, best_val, save, i, sel, fs; struct max98088_cdata *cdata; cdata = &max98088->dai[0]; if (!pdata || !max98088->eq_textcnt) return; /* Find the selected configuration with nearest sample rate */ fs = cdata->rate; sel = cdata->eq_sel; best = 0; best_val = INT_MAX; for (i = 0; i < pdata->eq_cfgcnt; i++) { if (strcmp(pdata->eq_cfg[i].name, max98088->eq_texts[sel]) == 0 && abs(pdata->eq_cfg[i].rate - fs) < best_val) { best = i; best_val = abs(pdata->eq_cfg[i].rate - fs); } } dev_dbg(codec->dev, "Selected %s/%dHz for %dHz sample rate\n", pdata->eq_cfg[best].name, pdata->eq_cfg[best].rate, fs); /* Disable EQ while configuring, and save current on/off state */ save = snd_soc_read(codec, M98088_REG_49_CFG_LEVEL); snd_soc_update_bits(codec, M98088_REG_49_CFG_LEVEL, M98088_EQ1EN, 0); coef_set = &pdata->eq_cfg[sel]; m98088_eq_band(codec, 0, 0, coef_set->band1); m98088_eq_band(codec, 0, 1, coef_set->band2); m98088_eq_band(codec, 0, 2, coef_set->band3); m98088_eq_band(codec, 0, 3, coef_set->band4); m98088_eq_band(codec, 0, 4, coef_set->band5); /* Restore the original on/off state */ snd_soc_update_bits(codec, M98088_REG_49_CFG_LEVEL, M98088_EQ1EN, save); } static void max98088_setup_eq2(struct snd_soc_codec *codec) { struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_pdata *pdata = max98088->pdata; struct max98088_eq_cfg *coef_set; int best, best_val, save, i, sel, fs; struct max98088_cdata *cdata; cdata = &max98088->dai[1]; if (!pdata || !max98088->eq_textcnt) return; /* Find the selected configuration with nearest sample rate */ fs = cdata->rate; sel = cdata->eq_sel; best = 0; best_val = INT_MAX; for (i = 0; i < pdata->eq_cfgcnt; i++) { if (strcmp(pdata->eq_cfg[i].name, max98088->eq_texts[sel]) == 0 && abs(pdata->eq_cfg[i].rate - fs) < best_val) { best = i; best_val = abs(pdata->eq_cfg[i].rate - fs); } } dev_dbg(codec->dev, "Selected %s/%dHz for %dHz sample rate\n", pdata->eq_cfg[best].name, pdata->eq_cfg[best].rate, fs); /* Disable EQ while configuring, and save current on/off state */ save = snd_soc_read(codec, M98088_REG_49_CFG_LEVEL); snd_soc_update_bits(codec, M98088_REG_49_CFG_LEVEL, M98088_EQ2EN, 0); coef_set = &pdata->eq_cfg[sel]; m98088_eq_band(codec, 1, 0, coef_set->band1); m98088_eq_band(codec, 1, 1, coef_set->band2); m98088_eq_band(codec, 1, 2, coef_set->band3); m98088_eq_band(codec, 1, 3, coef_set->band4); m98088_eq_band(codec, 1, 4, coef_set->band5); /* Restore the original on/off state */ snd_soc_update_bits(codec, M98088_REG_49_CFG_LEVEL, M98088_EQ2EN, save); } static int max98088_put_eq_enum(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_pdata *pdata = max98088->pdata; int channel = max98088_get_channel(codec, kcontrol->id.name); struct max98088_cdata *cdata; int sel = ucontrol->value.integer.value[0]; if (channel < 0) return channel; cdata = &max98088->dai[channel]; if (sel >= pdata->eq_cfgcnt) return -EINVAL; cdata->eq_sel = sel; switch (channel) { case 0: max98088_setup_eq1(codec); break; case 1: max98088_setup_eq2(codec); break; } return 0; } static int max98088_get_eq_enum(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); int channel = max98088_get_channel(codec, kcontrol->id.name); struct max98088_cdata *cdata; if (channel < 0) return channel; cdata = &max98088->dai[channel]; ucontrol->value.enumerated.item[0] = cdata->eq_sel; return 0; } static void max98088_handle_eq_pdata(struct snd_soc_codec *codec) { struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_pdata *pdata = max98088->pdata; struct max98088_eq_cfg *cfg; unsigned int cfgcnt; int i, j; const char **t; int ret; struct snd_kcontrol_new controls[] = { SOC_ENUM_EXT((char *)eq_mode_name[0], max98088->eq_enum, max98088_get_eq_enum, max98088_put_eq_enum), SOC_ENUM_EXT((char *)eq_mode_name[1], max98088->eq_enum, max98088_get_eq_enum, max98088_put_eq_enum), }; BUILD_BUG_ON(ARRAY_SIZE(controls) != ARRAY_SIZE(eq_mode_name)); cfg = pdata->eq_cfg; cfgcnt = pdata->eq_cfgcnt; /* Setup an array of texts for the equalizer enum. * This is based on Mark Brown's equalizer driver code. */ max98088->eq_textcnt = 0; max98088->eq_texts = NULL; for (i = 0; i < cfgcnt; i++) { for (j = 0; j < max98088->eq_textcnt; j++) { if (strcmp(cfg[i].name, max98088->eq_texts[j]) == 0) break; } if (j != max98088->eq_textcnt) continue; /* Expand the array */ t = krealloc(max98088->eq_texts, sizeof(char *) * (max98088->eq_textcnt + 1), GFP_KERNEL); if (t == NULL) continue; /* Store the new entry */ t[max98088->eq_textcnt] = cfg[i].name; max98088->eq_textcnt++; max98088->eq_texts = t; } /* Now point the soc_enum to .texts array items */ max98088->eq_enum.texts = max98088->eq_texts; max98088->eq_enum.max = max98088->eq_textcnt; ret = snd_soc_add_codec_controls(codec, controls, ARRAY_SIZE(controls)); if (ret != 0) dev_err(codec->dev, "Failed to add EQ control: %d\n", ret); } static void max98088_handle_pdata(struct snd_soc_codec *codec) { struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_pdata *pdata = max98088->pdata; u8 regval = 0; if (!pdata) { dev_dbg(codec->dev, "No platform data\n"); return; } /* Configure mic for analog/digital mic mode */ if (pdata->digmic_left_mode) regval |= M98088_DIGMIC_L; if (pdata->digmic_right_mode) regval |= M98088_DIGMIC_R; max98088->digmic = (regval ? 1 : 0); snd_soc_write(codec, M98088_REG_48_CFG_MIC, regval); /* Configure receiver output */ regval = ((pdata->receiver_mode) ? M98088_REC_LINEMODE : 0); snd_soc_update_bits(codec, M98088_REG_2A_MIC_REC_CNTL, M98088_REC_LINEMODE_MASK, regval); /* Configure equalizers */ if (pdata->eq_cfgcnt) max98088_handle_eq_pdata(codec); } #ifdef CONFIG_PM static int max98088_suspend(struct snd_soc_codec *codec) { max98088_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int max98088_resume(struct snd_soc_codec *codec) { max98088_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } #else #define max98088_suspend NULL #define max98088_resume NULL #endif static int max98088_probe(struct snd_soc_codec *codec) { struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_cdata *cdata; int ret = 0; codec->cache_sync = 1; ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C); if (ret != 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } /* initialize private data */ max98088->sysclk = (unsigned)-1; max98088->eq_textcnt = 0; cdata = &max98088->dai[0]; cdata->rate = (unsigned)-1; cdata->fmt = (unsigned)-1; cdata->eq_sel = 0; cdata = &max98088->dai[1]; cdata->rate = (unsigned)-1; cdata->fmt = (unsigned)-1; cdata->eq_sel = 0; max98088->ina_state = 0; max98088->inb_state = 0; max98088->ex_mode = 0; max98088->digmic = 0; max98088->mic1pre = 0; max98088->mic2pre = 0; ret = snd_soc_read(codec, M98088_REG_FF_REV_ID); if (ret < 0) { dev_err(codec->dev, "Failed to read device revision: %d\n", ret); goto err_access; } dev_info(codec->dev, "revision %c\n", ret + 'A'); snd_soc_write(codec, M98088_REG_51_PWR_SYS, M98088_PWRSV); /* initialize registers cache to hardware default */ max98088_set_bias_level(codec, SND_SOC_BIAS_STANDBY); snd_soc_write(codec, M98088_REG_0F_IRQ_ENABLE, 0x00); snd_soc_write(codec, M98088_REG_22_MIX_DAC, M98088_DAI1L_TO_DACL|M98088_DAI2L_TO_DACL| M98088_DAI1R_TO_DACR|M98088_DAI2R_TO_DACR); snd_soc_write(codec, M98088_REG_4E_BIAS_CNTL, 0xF0); snd_soc_write(codec, M98088_REG_50_DAC_BIAS2, 0x0F); snd_soc_write(codec, M98088_REG_16_DAI1_IOCFG, M98088_S1NORMAL|M98088_SDATA); snd_soc_write(codec, M98088_REG_1E_DAI2_IOCFG, M98088_S2NORMAL|M98088_SDATA); max98088_handle_pdata(codec); snd_soc_add_codec_controls(codec, max98088_snd_controls, ARRAY_SIZE(max98088_snd_controls)); err_access: return ret; } static int max98088_remove(struct snd_soc_codec *codec) { struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); max98088_set_bias_level(codec, SND_SOC_BIAS_OFF); kfree(max98088->eq_texts); return 0; } static struct snd_soc_codec_driver soc_codec_dev_max98088 = { .probe = max98088_probe, .remove = max98088_remove, .suspend = max98088_suspend, .resume = max98088_resume, .set_bias_level = max98088_set_bias_level, .reg_cache_size = ARRAY_SIZE(max98088_reg), .reg_word_size = sizeof(u8), .reg_cache_default = max98088_reg, .volatile_register = max98088_volatile_register, .dapm_widgets = max98088_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(max98088_dapm_widgets), .dapm_routes = max98088_audio_map, .num_dapm_routes = ARRAY_SIZE(max98088_audio_map), }; static int max98088_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct max98088_priv *max98088; int ret; max98088 = devm_kzalloc(&i2c->dev, sizeof(struct max98088_priv), GFP_KERNEL); if (max98088 == NULL) return -ENOMEM; max98088->devtype = id->driver_data; i2c_set_clientdata(i2c, max98088); max98088->pdata = i2c->dev.platform_data; ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_max98088, &max98088_dai[0], 2); return ret; } static int __devexit max98088_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id max98088_i2c_id[] = { { "max98088", MAX98088 }, { "max98089", MAX98089 }, { } }; MODULE_DEVICE_TABLE(i2c, max98088_i2c_id); static struct i2c_driver max98088_i2c_driver = { .driver = { .name = "max98088", .owner = THIS_MODULE, }, .probe = max98088_i2c_probe, .remove = __devexit_p(max98088_i2c_remove), .id_table = max98088_i2c_id, }; static int __init max98088_init(void) { int ret; ret = i2c_add_driver(&max98088_i2c_driver); if (ret) pr_err("Failed to register max98088 I2C driver: %d\n", ret); return ret; } module_init(max98088_init); static void __exit max98088_exit(void) { i2c_del_driver(&max98088_i2c_driver); } module_exit(max98088_exit); MODULE_DESCRIPTION("ALSA SoC MAX98088 driver"); MODULE_AUTHOR("Peter Hsiang, Jesse Marroquin"); MODULE_LICENSE("GPL");
gpl-2.0
manveru0/FeaCore_Phoenix_S3_JellyBean
arch/m68k/platform/532x/config.c
3953
15590
/***************************************************************************/ /* * linux/arch/m68knommu/platform/532x/config.c * * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com) * Copyright (C) 2000, Lineo (www.lineo.com) * Yaroslav Vinogradov yaroslav.vinogradov@freescale.com * Copyright Freescale Semiconductor, Inc 2006 * Copyright (c) 2006, emlix, Sebastian Hess <sh@emlix.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /***************************************************************************/ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/io.h> #include <linux/spi/spi.h> #include <linux/gpio.h> #include <asm/machdep.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfuart.h> #include <asm/mcfdma.h> #include <asm/mcfwdebug.h> #include <asm/mcfqspi.h> /***************************************************************************/ static struct mcf_platform_uart m532x_uart_platform[] = { { .mapbase = MCFUART_BASE1, .irq = MCFINT_VECBASE + MCFINT_UART0, }, { .mapbase = MCFUART_BASE2, .irq = MCFINT_VECBASE + MCFINT_UART1, }, { .mapbase = MCFUART_BASE3, .irq = MCFINT_VECBASE + MCFINT_UART2, }, { }, }; static struct platform_device m532x_uart = { .name = "mcfuart", .id = 0, .dev.platform_data = m532x_uart_platform, }; static struct resource m532x_fec_resources[] = { { .start = 0xfc030000, .end = 0xfc0307ff, .flags = IORESOURCE_MEM, }, { .start = 64 + 36, .end = 64 + 36, .flags = IORESOURCE_IRQ, }, { .start = 64 + 40, .end = 64 + 40, .flags = IORESOURCE_IRQ, }, { .start = 64 + 42, .end = 64 + 42, .flags = IORESOURCE_IRQ, }, }; static struct platform_device m532x_fec = { .name = "fec", .id = 0, .num_resources = ARRAY_SIZE(m532x_fec_resources), .resource = m532x_fec_resources, }; #if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) static struct resource m532x_qspi_resources[] = { { .start = MCFQSPI_IOBASE, .end = MCFQSPI_IOBASE + MCFQSPI_IOSIZE - 1, .flags = IORESOURCE_MEM, }, { .start = MCFINT_VECBASE + MCFINT_QSPI, .end = MCFINT_VECBASE + MCFINT_QSPI, .flags = IORESOURCE_IRQ, }, }; #define MCFQSPI_CS0 84 #define MCFQSPI_CS1 85 #define MCFQSPI_CS2 86 static int m532x_cs_setup(struct mcfqspi_cs_control *cs_control) { int status; status = gpio_request(MCFQSPI_CS0, "MCFQSPI_CS0"); if (status) { pr_debug("gpio_request for MCFQSPI_CS0 failed\n"); goto fail0; } status = gpio_direction_output(MCFQSPI_CS0, 1); if (status) { pr_debug("gpio_direction_output for MCFQSPI_CS0 failed\n"); goto fail1; } status = gpio_request(MCFQSPI_CS1, "MCFQSPI_CS1"); if (status) { pr_debug("gpio_request for MCFQSPI_CS1 failed\n"); goto fail1; } status = gpio_direction_output(MCFQSPI_CS1, 1); if (status) { pr_debug("gpio_direction_output for MCFQSPI_CS1 failed\n"); goto fail2; } status = gpio_request(MCFQSPI_CS2, "MCFQSPI_CS2"); if (status) { pr_debug("gpio_request for MCFQSPI_CS2 failed\n"); goto fail2; } status = gpio_direction_output(MCFQSPI_CS2, 1); if (status) { pr_debug("gpio_direction_output for MCFQSPI_CS2 failed\n"); goto fail3; } return 0; fail3: gpio_free(MCFQSPI_CS2); fail2: gpio_free(MCFQSPI_CS1); fail1: gpio_free(MCFQSPI_CS0); fail0: return status; } static void m532x_cs_teardown(struct mcfqspi_cs_control *cs_control) { gpio_free(MCFQSPI_CS2); gpio_free(MCFQSPI_CS1); gpio_free(MCFQSPI_CS0); } static void m532x_cs_select(struct mcfqspi_cs_control *cs_control, u8 chip_select, bool cs_high) { gpio_set_value(MCFQSPI_CS0 + chip_select, cs_high); } static void m532x_cs_deselect(struct mcfqspi_cs_control *cs_control, u8 chip_select, bool cs_high) { gpio_set_value(MCFQSPI_CS0 + chip_select, !cs_high); } static struct mcfqspi_cs_control m532x_cs_control = { .setup = m532x_cs_setup, .teardown = m532x_cs_teardown, .select = m532x_cs_select, .deselect = m532x_cs_deselect, }; static struct mcfqspi_platform_data m532x_qspi_data = { .bus_num = 0, .num_chipselect = 3, .cs_control = &m532x_cs_control, }; static struct platform_device m532x_qspi = { .name = "mcfqspi", .id = 0, .num_resources = ARRAY_SIZE(m532x_qspi_resources), .resource = m532x_qspi_resources, .dev.platform_data = &m532x_qspi_data, }; static void __init m532x_qspi_init(void) { /* setup QSPS pins for QSPI with gpio CS control */ writew(0x01f0, MCF_GPIO_PAR_QSPI); } #endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */ static struct platform_device *m532x_devices[] __initdata = { &m532x_uart, &m532x_fec, #if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) &m532x_qspi, #endif }; /***************************************************************************/ static void __init m532x_uart_init_line(int line, int irq) { if (line == 0) { /* GPIO initialization */ MCF_GPIO_PAR_UART |= 0x000F; } else if (line == 1) { /* GPIO initialization */ MCF_GPIO_PAR_UART |= 0x0FF0; } } static void __init m532x_uarts_init(void) { const int nrlines = ARRAY_SIZE(m532x_uart_platform); int line; for (line = 0; (line < nrlines); line++) m532x_uart_init_line(line, m532x_uart_platform[line].irq); } /***************************************************************************/ static void __init m532x_fec_init(void) { /* Set multi-function pins to ethernet mode for fec0 */ MCF_GPIO_PAR_FECI2C |= (MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC | MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO); MCF_GPIO_PAR_FEC = (MCF_GPIO_PAR_FEC_PAR_FEC_7W_FEC | MCF_GPIO_PAR_FEC_PAR_FEC_MII_FEC); } /***************************************************************************/ static void m532x_cpu_reset(void) { local_irq_disable(); __raw_writeb(MCF_RCR_SWRESET, MCF_RCR); } /***************************************************************************/ void __init config_BSP(char *commandp, int size) { #if !defined(CONFIG_BOOTPARAM) /* Copy command line from FLASH to local buffer... */ memcpy(commandp, (char *) 0x4000, 4); if(strncmp(commandp, "kcl ", 4) == 0){ memcpy(commandp, (char *) 0x4004, size); commandp[size-1] = 0; } else { memset(commandp, 0, size); } #endif #ifdef CONFIG_BDM_DISABLE /* * Disable the BDM clocking. This also turns off most of the rest of * the BDM device. This is good for EMC reasons. This option is not * incompatible with the memory protection option. */ wdebug(MCFDEBUG_CSR, MCFDEBUG_CSR_PSTCLK); #endif } /***************************************************************************/ static int __init init_BSP(void) { m532x_uarts_init(); m532x_fec_init(); #if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) m532x_qspi_init(); #endif platform_add_devices(m532x_devices, ARRAY_SIZE(m532x_devices)); return 0; } arch_initcall(init_BSP); /***************************************************************************/ /* Board initialization */ /***************************************************************************/ /* * PLL min/max specifications */ #define MAX_FVCO 500000 /* KHz */ #define MAX_FSYS 80000 /* KHz */ #define MIN_FSYS 58333 /* KHz */ #define FREF 16000 /* KHz */ #define MAX_MFD 135 /* Multiplier */ #define MIN_MFD 88 /* Multiplier */ #define BUSDIV 6 /* Divider */ /* * Low Power Divider specifications */ #define MIN_LPD (1 << 0) /* Divider (not encoded) */ #define MAX_LPD (1 << 15) /* Divider (not encoded) */ #define DEFAULT_LPD (1 << 1) /* Divider (not encoded) */ #define SYS_CLK_KHZ 80000 #define SYSTEM_PERIOD 12.5 /* * SDRAM Timing Parameters */ #define SDRAM_BL 8 /* # of beats in a burst */ #define SDRAM_TWR 2 /* in clocks */ #define SDRAM_CASL 2.5 /* CASL in clocks */ #define SDRAM_TRCD 2 /* in clocks */ #define SDRAM_TRP 2 /* in clocks */ #define SDRAM_TRFC 7 /* in clocks */ #define SDRAM_TREFI 7800 /* in ns */ #define EXT_SRAM_ADDRESS (0xC0000000) #define FLASH_ADDRESS (0x00000000) #define SDRAM_ADDRESS (0x40000000) #define NAND_FLASH_ADDRESS (0xD0000000) int sys_clk_khz = 0; int sys_clk_mhz = 0; void wtm_init(void); void scm_init(void); void gpio_init(void); void fbcs_init(void); void sdramc_init(void); int clock_pll (int fsys, int flags); int clock_limp (int); int clock_exit_limp (void); int get_sys_clock (void); asmlinkage void __init sysinit(void) { sys_clk_khz = clock_pll(0, 0); sys_clk_mhz = sys_clk_khz/1000; wtm_init(); scm_init(); gpio_init(); fbcs_init(); sdramc_init(); } void wtm_init(void) { /* Disable watchdog timer */ MCF_WTM_WCR = 0; } #define MCF_SCM_BCR_GBW (0x00000100) #define MCF_SCM_BCR_GBR (0x00000200) void scm_init(void) { /* All masters are trusted */ MCF_SCM_MPR = 0x77777777; /* Allow supervisor/user, read/write, and trusted/untrusted access to all slaves */ MCF_SCM_PACRA = 0; MCF_SCM_PACRB = 0; MCF_SCM_PACRC = 0; MCF_SCM_PACRD = 0; MCF_SCM_PACRE = 0; MCF_SCM_PACRF = 0; /* Enable bursts */ MCF_SCM_BCR = (MCF_SCM_BCR_GBR | MCF_SCM_BCR_GBW); } void fbcs_init(void) { MCF_GPIO_PAR_CS = 0x0000003E; /* Latch chip select */ MCF_FBCS1_CSAR = 0x10080000; MCF_FBCS1_CSCR = 0x002A3780; MCF_FBCS1_CSMR = (MCF_FBCS_CSMR_BAM_2M | MCF_FBCS_CSMR_V); /* Initialize latch to drive signals to inactive states */ *((u16 *)(0x10080000)) = 0xFFFF; /* External SRAM */ MCF_FBCS1_CSAR = EXT_SRAM_ADDRESS; MCF_FBCS1_CSCR = (MCF_FBCS_CSCR_PS_16 | MCF_FBCS_CSCR_AA | MCF_FBCS_CSCR_SBM | MCF_FBCS_CSCR_WS(1)); MCF_FBCS1_CSMR = (MCF_FBCS_CSMR_BAM_512K | MCF_FBCS_CSMR_V); /* Boot Flash connected to FBCS0 */ MCF_FBCS0_CSAR = FLASH_ADDRESS; MCF_FBCS0_CSCR = (MCF_FBCS_CSCR_PS_16 | MCF_FBCS_CSCR_BEM | MCF_FBCS_CSCR_AA | MCF_FBCS_CSCR_SBM | MCF_FBCS_CSCR_WS(7)); MCF_FBCS0_CSMR = (MCF_FBCS_CSMR_BAM_32M | MCF_FBCS_CSMR_V); } void sdramc_init(void) { /* * Check to see if the SDRAM has already been initialized * by a run control tool */ if (!(MCF_SDRAMC_SDCR & MCF_SDRAMC_SDCR_REF)) { /* SDRAM chip select initialization */ /* Initialize SDRAM chip select */ MCF_SDRAMC_SDCS0 = (0 | MCF_SDRAMC_SDCS_BA(SDRAM_ADDRESS) | MCF_SDRAMC_SDCS_CSSZ(MCF_SDRAMC_SDCS_CSSZ_32MBYTE)); /* * Basic configuration and initialization */ MCF_SDRAMC_SDCFG1 = (0 | MCF_SDRAMC_SDCFG1_SRD2RW((int)((SDRAM_CASL + 2) + 0.5 )) | MCF_SDRAMC_SDCFG1_SWT2RD(SDRAM_TWR + 1) | MCF_SDRAMC_SDCFG1_RDLAT((int)((SDRAM_CASL*2) + 2)) | MCF_SDRAMC_SDCFG1_ACT2RW((int)((SDRAM_TRCD ) + 0.5)) | MCF_SDRAMC_SDCFG1_PRE2ACT((int)((SDRAM_TRP ) + 0.5)) | MCF_SDRAMC_SDCFG1_REF2ACT((int)(((SDRAM_TRFC) ) + 0.5)) | MCF_SDRAMC_SDCFG1_WTLAT(3)); MCF_SDRAMC_SDCFG2 = (0 | MCF_SDRAMC_SDCFG2_BRD2PRE(SDRAM_BL/2 + 1) | MCF_SDRAMC_SDCFG2_BWT2RW(SDRAM_BL/2 + SDRAM_TWR) | MCF_SDRAMC_SDCFG2_BRD2WT((int)((SDRAM_CASL+SDRAM_BL/2-1.0)+0.5)) | MCF_SDRAMC_SDCFG2_BL(SDRAM_BL-1)); /* * Precharge and enable write to SDMR */ MCF_SDRAMC_SDCR = (0 | MCF_SDRAMC_SDCR_MODE_EN | MCF_SDRAMC_SDCR_CKE | MCF_SDRAMC_SDCR_DDR | MCF_SDRAMC_SDCR_MUX(1) | MCF_SDRAMC_SDCR_RCNT((int)(((SDRAM_TREFI/(SYSTEM_PERIOD*64)) - 1) + 0.5)) | MCF_SDRAMC_SDCR_PS_16 | MCF_SDRAMC_SDCR_IPALL); /* * Write extended mode register */ MCF_SDRAMC_SDMR = (0 | MCF_SDRAMC_SDMR_BNKAD_LEMR | MCF_SDRAMC_SDMR_AD(0x0) | MCF_SDRAMC_SDMR_CMD); /* * Write mode register and reset DLL */ MCF_SDRAMC_SDMR = (0 | MCF_SDRAMC_SDMR_BNKAD_LMR | MCF_SDRAMC_SDMR_AD(0x163) | MCF_SDRAMC_SDMR_CMD); /* * Execute a PALL command */ MCF_SDRAMC_SDCR |= MCF_SDRAMC_SDCR_IPALL; /* * Perform two REF cycles */ MCF_SDRAMC_SDCR |= MCF_SDRAMC_SDCR_IREF; MCF_SDRAMC_SDCR |= MCF_SDRAMC_SDCR_IREF; /* * Write mode register and clear reset DLL */ MCF_SDRAMC_SDMR = (0 | MCF_SDRAMC_SDMR_BNKAD_LMR | MCF_SDRAMC_SDMR_AD(0x063) | MCF_SDRAMC_SDMR_CMD); /* * Enable auto refresh and lock SDMR */ MCF_SDRAMC_SDCR &= ~MCF_SDRAMC_SDCR_MODE_EN; MCF_SDRAMC_SDCR |= (0 | MCF_SDRAMC_SDCR_REF | MCF_SDRAMC_SDCR_DQS_OE(0xC)); } } void gpio_init(void) { /* Enable UART0 pins */ MCF_GPIO_PAR_UART = ( 0 | MCF_GPIO_PAR_UART_PAR_URXD0 | MCF_GPIO_PAR_UART_PAR_UTXD0); /* Initialize TIN3 as a GPIO output to enable the write half of the latch */ MCF_GPIO_PAR_TIMER = 0x00; __raw_writeb(0x08, MCFGPIO_PDDR_TIMER); __raw_writeb(0x00, MCFGPIO_PCLRR_TIMER); } int clock_pll(int fsys, int flags) { int fref, temp, fout, mfd; u32 i; fref = FREF; if (fsys == 0) { /* Return current PLL output */ mfd = MCF_PLL_PFDR; return (fref * mfd / (BUSDIV * 4)); } /* Check bounds of requested system clock */ if (fsys > MAX_FSYS) fsys = MAX_FSYS; if (fsys < MIN_FSYS) fsys = MIN_FSYS; /* Multiplying by 100 when calculating the temp value, and then dividing by 100 to calculate the mfd allows for exact values without needing to include floating point libraries. */ temp = 100 * fsys / fref; mfd = 4 * BUSDIV * temp / 100; /* Determine the output frequency for selected values */ fout = (fref * mfd / (BUSDIV * 4)); /* * Check to see if the SDRAM has already been initialized. * If it has then the SDRAM needs to be put into self refresh * mode before reprogramming the PLL. */ if (MCF_SDRAMC_SDCR & MCF_SDRAMC_SDCR_REF) /* Put SDRAM into self refresh mode */ MCF_SDRAMC_SDCR &= ~MCF_SDRAMC_SDCR_CKE; /* * Initialize the PLL to generate the new system clock frequency. * The device must be put into LIMP mode to reprogram the PLL. */ /* Enter LIMP mode */ clock_limp(DEFAULT_LPD); /* Reprogram PLL for desired fsys */ MCF_PLL_PODR = (0 | MCF_PLL_PODR_CPUDIV(BUSDIV/3) | MCF_PLL_PODR_BUSDIV(BUSDIV)); MCF_PLL_PFDR = mfd; /* Exit LIMP mode */ clock_exit_limp(); /* * Return the SDRAM to normal operation if it is in use. */ if (MCF_SDRAMC_SDCR & MCF_SDRAMC_SDCR_REF) /* Exit self refresh mode */ MCF_SDRAMC_SDCR |= MCF_SDRAMC_SDCR_CKE; /* Errata - workaround for SDRAM opeartion after exiting LIMP mode */ MCF_SDRAMC_LIMP_FIX = MCF_SDRAMC_REFRESH; /* wait for DQS logic to relock */ for (i = 0; i < 0x200; i++) ; return fout; } int clock_limp(int div) { u32 temp; /* Check bounds of divider */ if (div < MIN_LPD) div = MIN_LPD; if (div > MAX_LPD) div = MAX_LPD; /* Save of the current value of the SSIDIV so we don't overwrite the value*/ temp = (MCF_CCM_CDR & MCF_CCM_CDR_SSIDIV(0xF)); /* Apply the divider to the system clock */ MCF_CCM_CDR = ( 0 | MCF_CCM_CDR_LPDIV(div) | MCF_CCM_CDR_SSIDIV(temp)); MCF_CCM_MISCCR |= MCF_CCM_MISCCR_LIMP; return (FREF/(3*(1 << div))); } int clock_exit_limp(void) { int fout; /* Exit LIMP mode */ MCF_CCM_MISCCR = (MCF_CCM_MISCCR & ~ MCF_CCM_MISCCR_LIMP); /* Wait for PLL to lock */ while (!(MCF_CCM_MISCCR & MCF_CCM_MISCCR_PLL_LOCK)) ; fout = get_sys_clock(); return fout; } int get_sys_clock(void) { int divider; /* Test to see if device is in LIMP mode */ if (MCF_CCM_MISCCR & MCF_CCM_MISCCR_LIMP) { divider = MCF_CCM_CDR & MCF_CCM_CDR_LPDIV(0xF); return (FREF/(2 << divider)); } else return ((FREF * MCF_PLL_PFDR) / (BUSDIV * 4)); }
gpl-2.0
SlimRoms/kernel_samsung_mondrianwifi
arch/s390/mm/init.c
4465
6809
/* * arch/s390/mm/init.c * * S390 version * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Hartmut Penner (hp@de.ibm.com) * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1995 Linus Torvalds */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/bootmem.h> #include <linux/pfn.h> #include <linux/poison.h> #include <linux/initrd.h> #include <linux/export.h> #include <linux/gfp.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/dma.h> #include <asm/lowcore.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/sections.h> #include <asm/ctl_reg.h> pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); unsigned long empty_zero_page, zero_page_mask; EXPORT_SYMBOL(empty_zero_page); static unsigned long setup_zero_pages(void) { struct cpuid cpu_id; unsigned int order; unsigned long size; struct page *page; int i; get_cpu_id(&cpu_id); switch (cpu_id.machine) { case 0x9672: /* g5 */ case 0x2064: /* z900 */ case 0x2066: /* z900 */ case 0x2084: /* z990 */ case 0x2086: /* z990 */ case 0x2094: /* z9-109 */ case 0x2096: /* z9-109 */ order = 0; break; case 0x2097: /* z10 */ case 0x2098: /* z10 */ default: order = 2; break; } empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!empty_zero_page) panic("Out of memory in setup_zero_pages"); page = virt_to_page((void *) empty_zero_page); split_page(page, order); for (i = 1 << order; i > 0; i--) { SetPageReserved(page); page++; } size = PAGE_SIZE << order; zero_page_mask = (size - 1) & PAGE_MASK; return 1UL << order; } /* * paging_init() sets up the page tables */ void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long pgd_type, asce_bits; init_mm.pgd = swapper_pg_dir; #ifdef CONFIG_64BIT if (VMALLOC_END > (1UL << 42)) { asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; pgd_type = _REGION2_ENTRY_EMPTY; } else { asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; pgd_type = _REGION3_ENTRY_EMPTY; } #else asce_bits = _ASCE_TABLE_LENGTH; pgd_type = _SEGMENT_ENTRY_EMPTY; #endif S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; clear_table((unsigned long *) init_mm.pgd, pgd_type, sizeof(unsigned long)*2048); vmem_map_init(); /* enable virtual mapping in kernel mode */ __ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 13, 13); arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); atomic_set(&init_mm.context.attach_count, 1); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); max_zone_pfns[ZONE_NORMAL] = max_low_pfn; free_area_init_nodes(max_zone_pfns); fault_init(); } void __init mem_init(void) { unsigned long codesize, reservedpages, datasize, initsize; max_mapnr = num_physpages = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); /* Setup guest page hinting */ cmma_init(); /* this will put all low memory onto the freelists */ totalram_pages += free_all_bootmem(); totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ reservedpages = 0; codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", nr_free_pages() << (PAGE_SHIFT-10), max_mapnr << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >>10, initsize >> 10); printk("Write protected kernel read-only data: %#lx - %#lx\n", (unsigned long)&_stext, PFN_ALIGN((unsigned long)&_eshared) - 1); } #ifdef CONFIG_DEBUG_PAGEALLOC void kernel_map_pages(struct page *page, int numpages, int enable) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long address; int i; for (i = 0; i < numpages; i++) { address = page_to_phys(page + i); pgd = pgd_offset_k(address); pud = pud_offset(pgd, address); pmd = pmd_offset(pud, address); pte = pte_offset_kernel(pmd, address); if (!enable) { __ptep_ipte(address, pte); pte_val(*pte) = _PAGE_TYPE_EMPTY; continue; } *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); /* Flush cpu write queue. */ mb(); } } #endif void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long addr = begin; if (begin >= end) return; for (; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM, PAGE_SIZE); free_page(addr); totalram_pages++; } printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); } void free_initmem(void) { free_init_pages("unused kernel memory", (unsigned long)&__init_begin, (unsigned long)&__init_end); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { free_init_pages("initrd memory", start, end); } #endif #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size) { unsigned long zone_start_pfn, zone_end_pfn, nr_pages; unsigned long start_pfn = PFN_DOWN(start); unsigned long size_pages = PFN_DOWN(size); struct zone *zone; int rc; rc = vmem_add_mapping(start, size); if (rc) return rc; for_each_zone(zone) { if (zone_idx(zone) != ZONE_MOVABLE) { /* Add range within existing zone limits */ zone_start_pfn = zone->zone_start_pfn; zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; } else { /* Add remaining range to ZONE_MOVABLE */ zone_start_pfn = start_pfn; zone_end_pfn = start_pfn + size_pages; } if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn) continue; nr_pages = (start_pfn + size_pages > zone_end_pfn) ? zone_end_pfn - start_pfn : size_pages; rc = __add_pages(nid, zone, start_pfn, nr_pages); if (rc) break; start_pfn += nr_pages; size_pages -= nr_pages; if (!size_pages) break; } if (rc) vmem_remove_mapping(start, size); return rc; } #endif /* CONFIG_MEMORY_HOTPLUG */
gpl-2.0
Hashcode/android_kernel_samsung-jf-common
arch/arm/mach-ep93xx/core.c
4721
25524
/* * arch/arm/mach-ep93xx/core.c * Core routines for Cirrus EP93xx chips. * * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> * Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org> * * Thanks go to Michael Burian and Ray Lehtiniemi for their key * role in the ep93xx linux community. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/timex.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/leds.h> #include <linux/termios.h> #include <linux/amba/bus.h> #include <linux/amba/serial.h> #include <linux/mtd/physmap.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <linux/spi/spi.h> #include <linux/export.h> #include <mach/hardware.h> #include <mach/fb.h> #include <mach/ep93xx_keypad.h> #include <mach/ep93xx_spi.h> #include <mach/gpio-ep93xx.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <asm/hardware/vic.h> #include "soc.h" /************************************************************************* * Static I/O mappings that are needed for all EP93xx platforms *************************************************************************/ static struct map_desc ep93xx_io_desc[] __initdata = { { .virtual = EP93XX_AHB_VIRT_BASE, .pfn = __phys_to_pfn(EP93XX_AHB_PHYS_BASE), .length = EP93XX_AHB_SIZE, .type = MT_DEVICE, }, { .virtual = EP93XX_APB_VIRT_BASE, .pfn = __phys_to_pfn(EP93XX_APB_PHYS_BASE), .length = EP93XX_APB_SIZE, .type = MT_DEVICE, }, }; void __init ep93xx_map_io(void) { iotable_init(ep93xx_io_desc, ARRAY_SIZE(ep93xx_io_desc)); } /************************************************************************* * Timer handling for EP93xx ************************************************************************* * The ep93xx has four internal timers. Timers 1, 2 (both 16 bit) and * 3 (32 bit) count down at 508 kHz, are self-reloading, and can generate * an interrupt on underflow. Timer 4 (40 bit) counts down at 983.04 kHz, * is free-running, and can't generate interrupts. * * The 508 kHz timers are ideal for use for the timer interrupt, as the * most common values of HZ divide 508 kHz nicely. We pick one of the 16 * bit timers (timer 1) since we don't need more than 16 bits of reload * value as long as HZ >= 8. * * The higher clock rate of timer 4 makes it a better choice than the * other timers for use in gettimeoffset(), while the fact that it can't * generate interrupts means we don't have to worry about not being able * to use this timer for something else. We also use timer 4 for keeping * track of lost jiffies. */ #define EP93XX_TIMER_REG(x) (EP93XX_TIMER_BASE + (x)) #define EP93XX_TIMER1_LOAD EP93XX_TIMER_REG(0x00) #define EP93XX_TIMER1_VALUE EP93XX_TIMER_REG(0x04) #define EP93XX_TIMER1_CONTROL EP93XX_TIMER_REG(0x08) #define EP93XX_TIMER123_CONTROL_ENABLE (1 << 7) #define EP93XX_TIMER123_CONTROL_MODE (1 << 6) #define EP93XX_TIMER123_CONTROL_CLKSEL (1 << 3) #define EP93XX_TIMER1_CLEAR EP93XX_TIMER_REG(0x0c) #define EP93XX_TIMER2_LOAD EP93XX_TIMER_REG(0x20) #define EP93XX_TIMER2_VALUE EP93XX_TIMER_REG(0x24) #define EP93XX_TIMER2_CONTROL EP93XX_TIMER_REG(0x28) #define EP93XX_TIMER2_CLEAR EP93XX_TIMER_REG(0x2c) #define EP93XX_TIMER4_VALUE_LOW EP93XX_TIMER_REG(0x60) #define EP93XX_TIMER4_VALUE_HIGH EP93XX_TIMER_REG(0x64) #define EP93XX_TIMER4_VALUE_HIGH_ENABLE (1 << 8) #define EP93XX_TIMER3_LOAD EP93XX_TIMER_REG(0x80) #define EP93XX_TIMER3_VALUE EP93XX_TIMER_REG(0x84) #define EP93XX_TIMER3_CONTROL EP93XX_TIMER_REG(0x88) #define EP93XX_TIMER3_CLEAR EP93XX_TIMER_REG(0x8c) #define EP93XX_TIMER123_CLOCK 508469 #define EP93XX_TIMER4_CLOCK 983040 #define TIMER1_RELOAD ((EP93XX_TIMER123_CLOCK / HZ) - 1) #define TIMER4_TICKS_PER_JIFFY DIV_ROUND_CLOSEST(CLOCK_TICK_RATE, HZ) static unsigned int last_jiffy_time; static irqreturn_t ep93xx_timer_interrupt(int irq, void *dev_id) { /* Writing any value clears the timer interrupt */ __raw_writel(1, EP93XX_TIMER1_CLEAR); /* Recover lost jiffies */ while ((signed long) (__raw_readl(EP93XX_TIMER4_VALUE_LOW) - last_jiffy_time) >= TIMER4_TICKS_PER_JIFFY) { last_jiffy_time += TIMER4_TICKS_PER_JIFFY; timer_tick(); } return IRQ_HANDLED; } static struct irqaction ep93xx_timer_irq = { .name = "ep93xx timer", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = ep93xx_timer_interrupt, }; static void __init ep93xx_timer_init(void) { u32 tmode = EP93XX_TIMER123_CONTROL_MODE | EP93XX_TIMER123_CONTROL_CLKSEL; /* Enable periodic HZ timer. */ __raw_writel(tmode, EP93XX_TIMER1_CONTROL); __raw_writel(TIMER1_RELOAD, EP93XX_TIMER1_LOAD); __raw_writel(tmode | EP93XX_TIMER123_CONTROL_ENABLE, EP93XX_TIMER1_CONTROL); /* Enable lost jiffy timer. */ __raw_writel(EP93XX_TIMER4_VALUE_HIGH_ENABLE, EP93XX_TIMER4_VALUE_HIGH); setup_irq(IRQ_EP93XX_TIMER1, &ep93xx_timer_irq); } static unsigned long ep93xx_gettimeoffset(void) { int offset; offset = __raw_readl(EP93XX_TIMER4_VALUE_LOW) - last_jiffy_time; /* Calculate (1000000 / 983040) * offset. */ return offset + (53 * offset / 3072); } struct sys_timer ep93xx_timer = { .init = ep93xx_timer_init, .offset = ep93xx_gettimeoffset, }; /************************************************************************* * EP93xx IRQ handling *************************************************************************/ void __init ep93xx_init_irq(void) { vic_init(EP93XX_VIC1_BASE, 0, EP93XX_VIC1_VALID_IRQ_MASK, 0); vic_init(EP93XX_VIC2_BASE, 32, EP93XX_VIC2_VALID_IRQ_MASK, 0); } /************************************************************************* * EP93xx System Controller Software Locked register handling *************************************************************************/ /* * syscon_swlock prevents anything else from writing to the syscon * block while a software locked register is being written. */ static DEFINE_SPINLOCK(syscon_swlock); void ep93xx_syscon_swlocked_write(unsigned int val, void __iomem *reg) { unsigned long flags; spin_lock_irqsave(&syscon_swlock, flags); __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); __raw_writel(val, reg); spin_unlock_irqrestore(&syscon_swlock, flags); } void ep93xx_devcfg_set_clear(unsigned int set_bits, unsigned int clear_bits) { unsigned long flags; unsigned int val; spin_lock_irqsave(&syscon_swlock, flags); val = __raw_readl(EP93XX_SYSCON_DEVCFG); val &= ~clear_bits; val |= set_bits; __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); __raw_writel(val, EP93XX_SYSCON_DEVCFG); spin_unlock_irqrestore(&syscon_swlock, flags); } /** * ep93xx_chip_revision() - returns the EP93xx chip revision * * See <mach/platform.h> for more information. */ unsigned int ep93xx_chip_revision(void) { unsigned int v; v = __raw_readl(EP93XX_SYSCON_SYSCFG); v &= EP93XX_SYSCON_SYSCFG_REV_MASK; v >>= EP93XX_SYSCON_SYSCFG_REV_SHIFT; return v; } /************************************************************************* * EP93xx GPIO *************************************************************************/ static struct resource ep93xx_gpio_resource[] = { { .start = EP93XX_GPIO_PHYS_BASE, .end = EP93XX_GPIO_PHYS_BASE + 0xcc - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device ep93xx_gpio_device = { .name = "gpio-ep93xx", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_gpio_resource), .resource = ep93xx_gpio_resource, }; /************************************************************************* * EP93xx peripheral handling *************************************************************************/ #define EP93XX_UART_MCR_OFFSET (0x0100) static void ep93xx_uart_set_mctrl(struct amba_device *dev, void __iomem *base, unsigned int mctrl) { unsigned int mcr; mcr = 0; if (mctrl & TIOCM_RTS) mcr |= 2; if (mctrl & TIOCM_DTR) mcr |= 1; __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET); } static struct amba_pl010_data ep93xx_uart_data = { .set_mctrl = ep93xx_uart_set_mctrl, }; static AMBA_APB_DEVICE(uart1, "apb:uart1", 0x00041010, EP93XX_UART1_PHYS_BASE, { IRQ_EP93XX_UART1 }, &ep93xx_uart_data); static AMBA_APB_DEVICE(uart2, "apb:uart2", 0x00041010, EP93XX_UART2_PHYS_BASE, { IRQ_EP93XX_UART2 }, &ep93xx_uart_data); static AMBA_APB_DEVICE(uart3, "apb:uart3", 0x00041010, EP93XX_UART3_PHYS_BASE, { IRQ_EP93XX_UART3 }, &ep93xx_uart_data); static struct resource ep93xx_rtc_resource[] = { { .start = EP93XX_RTC_PHYS_BASE, .end = EP93XX_RTC_PHYS_BASE + 0x10c - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device ep93xx_rtc_device = { .name = "ep93xx-rtc", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_rtc_resource), .resource = ep93xx_rtc_resource, }; static struct resource ep93xx_ohci_resources[] = { [0] = { .start = EP93XX_USB_PHYS_BASE, .end = EP93XX_USB_PHYS_BASE + 0x0fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_EP93XX_USB, .end = IRQ_EP93XX_USB, .flags = IORESOURCE_IRQ, }, }; static struct platform_device ep93xx_ohci_device = { .name = "ep93xx-ohci", .id = -1, .dev = { .dma_mask = &ep93xx_ohci_device.dev.coherent_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(ep93xx_ohci_resources), .resource = ep93xx_ohci_resources, }; /************************************************************************* * EP93xx physmap'ed flash *************************************************************************/ static struct physmap_flash_data ep93xx_flash_data; static struct resource ep93xx_flash_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device ep93xx_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &ep93xx_flash_data, }, .num_resources = 1, .resource = &ep93xx_flash_resource, }; /** * ep93xx_register_flash() - Register the external flash device. * @width: bank width in octets * @start: resource start address * @size: resource size */ void __init ep93xx_register_flash(unsigned int width, resource_size_t start, resource_size_t size) { ep93xx_flash_data.width = width; ep93xx_flash_resource.start = start; ep93xx_flash_resource.end = start + size - 1; platform_device_register(&ep93xx_flash); } /************************************************************************* * EP93xx ethernet peripheral handling *************************************************************************/ static struct ep93xx_eth_data ep93xx_eth_data; static struct resource ep93xx_eth_resource[] = { { .start = EP93XX_ETHERNET_PHYS_BASE, .end = EP93XX_ETHERNET_PHYS_BASE + 0xffff, .flags = IORESOURCE_MEM, }, { .start = IRQ_EP93XX_ETHERNET, .end = IRQ_EP93XX_ETHERNET, .flags = IORESOURCE_IRQ, } }; static u64 ep93xx_eth_dma_mask = DMA_BIT_MASK(32); static struct platform_device ep93xx_eth_device = { .name = "ep93xx-eth", .id = -1, .dev = { .platform_data = &ep93xx_eth_data, .coherent_dma_mask = DMA_BIT_MASK(32), .dma_mask = &ep93xx_eth_dma_mask, }, .num_resources = ARRAY_SIZE(ep93xx_eth_resource), .resource = ep93xx_eth_resource, }; /** * ep93xx_register_eth - Register the built-in ethernet platform device. * @data: platform specific ethernet configuration (__initdata) * @copy_addr: flag indicating that the MAC address should be copied * from the IndAd registers (as programmed by the bootloader) */ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr) { if (copy_addr) memcpy_fromio(data->dev_addr, EP93XX_ETHERNET_BASE + 0x50, 6); ep93xx_eth_data = *data; platform_device_register(&ep93xx_eth_device); } /************************************************************************* * EP93xx i2c peripheral handling *************************************************************************/ static struct i2c_gpio_platform_data ep93xx_i2c_data; static struct platform_device ep93xx_i2c_device = { .name = "i2c-gpio", .id = 0, .dev = { .platform_data = &ep93xx_i2c_data, }, }; /** * ep93xx_register_i2c - Register the i2c platform device. * @data: platform specific i2c-gpio configuration (__initdata) * @devices: platform specific i2c bus device information (__initdata) * @num: the number of devices on the i2c bus */ void __init ep93xx_register_i2c(struct i2c_gpio_platform_data *data, struct i2c_board_info *devices, int num) { /* * Set the EEPROM interface pin drive type control. * Defines the driver type for the EECLK and EEDAT pins as either * open drain, which will require an external pull-up, or a normal * CMOS driver. */ if (data->sda_is_open_drain && data->sda_pin != EP93XX_GPIO_LINE_EEDAT) pr_warning("sda != EEDAT, open drain has no effect\n"); if (data->scl_is_open_drain && data->scl_pin != EP93XX_GPIO_LINE_EECLK) pr_warning("scl != EECLK, open drain has no effect\n"); __raw_writel((data->sda_is_open_drain << 1) | (data->scl_is_open_drain << 0), EP93XX_GPIO_EEDRIVE); ep93xx_i2c_data = *data; i2c_register_board_info(0, devices, num); platform_device_register(&ep93xx_i2c_device); } /************************************************************************* * EP93xx SPI peripheral handling *************************************************************************/ static struct ep93xx_spi_info ep93xx_spi_master_data; static struct resource ep93xx_spi_resources[] = { { .start = EP93XX_SPI_PHYS_BASE, .end = EP93XX_SPI_PHYS_BASE + 0x18 - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_EP93XX_SSP, .end = IRQ_EP93XX_SSP, .flags = IORESOURCE_IRQ, }, }; static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32); static struct platform_device ep93xx_spi_device = { .name = "ep93xx-spi", .id = 0, .dev = { .platform_data = &ep93xx_spi_master_data, .coherent_dma_mask = DMA_BIT_MASK(32), .dma_mask = &ep93xx_spi_dma_mask, }, .num_resources = ARRAY_SIZE(ep93xx_spi_resources), .resource = ep93xx_spi_resources, }; /** * ep93xx_register_spi() - registers spi platform device * @info: ep93xx board specific spi master info (__initdata) * @devices: SPI devices to register (__initdata) * @num: number of SPI devices to register * * This function registers platform device for the EP93xx SPI controller and * also makes sure that SPI pins are muxed so that I2S is not using those pins. */ void __init ep93xx_register_spi(struct ep93xx_spi_info *info, struct spi_board_info *devices, int num) { /* * When SPI is used, we need to make sure that I2S is muxed off from * SPI pins. */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2SONSSP); ep93xx_spi_master_data = *info; spi_register_board_info(devices, num); platform_device_register(&ep93xx_spi_device); } /************************************************************************* * EP93xx LEDs *************************************************************************/ static struct gpio_led ep93xx_led_pins[] = { { .name = "platform:grled", .gpio = EP93XX_GPIO_LINE_GRLED, }, { .name = "platform:rdled", .gpio = EP93XX_GPIO_LINE_RDLED, }, }; static struct gpio_led_platform_data ep93xx_led_data = { .num_leds = ARRAY_SIZE(ep93xx_led_pins), .leds = ep93xx_led_pins, }; static struct platform_device ep93xx_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &ep93xx_led_data, }, }; /************************************************************************* * EP93xx pwm peripheral handling *************************************************************************/ static struct resource ep93xx_pwm0_resource[] = { { .start = EP93XX_PWM_PHYS_BASE, .end = EP93XX_PWM_PHYS_BASE + 0x10 - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device ep93xx_pwm0_device = { .name = "ep93xx-pwm", .id = 0, .num_resources = ARRAY_SIZE(ep93xx_pwm0_resource), .resource = ep93xx_pwm0_resource, }; static struct resource ep93xx_pwm1_resource[] = { { .start = EP93XX_PWM_PHYS_BASE + 0x20, .end = EP93XX_PWM_PHYS_BASE + 0x30 - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device ep93xx_pwm1_device = { .name = "ep93xx-pwm", .id = 1, .num_resources = ARRAY_SIZE(ep93xx_pwm1_resource), .resource = ep93xx_pwm1_resource, }; void __init ep93xx_register_pwm(int pwm0, int pwm1) { if (pwm0) platform_device_register(&ep93xx_pwm0_device); /* NOTE: EP9307 does not have PWMOUT1 (pin EGPIO14) */ if (pwm1) platform_device_register(&ep93xx_pwm1_device); } int ep93xx_pwm_acquire_gpio(struct platform_device *pdev) { int err; if (pdev->id == 0) { err = 0; } else if (pdev->id == 1) { err = gpio_request(EP93XX_GPIO_LINE_EGPIO14, dev_name(&pdev->dev)); if (err) return err; err = gpio_direction_output(EP93XX_GPIO_LINE_EGPIO14, 0); if (err) goto fail; /* PWM 1 output on EGPIO[14] */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_PONG); } else { err = -ENODEV; } return err; fail: gpio_free(EP93XX_GPIO_LINE_EGPIO14); return err; } EXPORT_SYMBOL(ep93xx_pwm_acquire_gpio); void ep93xx_pwm_release_gpio(struct platform_device *pdev) { if (pdev->id == 1) { gpio_direction_input(EP93XX_GPIO_LINE_EGPIO14); gpio_free(EP93XX_GPIO_LINE_EGPIO14); /* EGPIO[14] used for GPIO */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_PONG); } } EXPORT_SYMBOL(ep93xx_pwm_release_gpio); /************************************************************************* * EP93xx video peripheral handling *************************************************************************/ static struct ep93xxfb_mach_info ep93xxfb_data; static struct resource ep93xx_fb_resource[] = { { .start = EP93XX_RASTER_PHYS_BASE, .end = EP93XX_RASTER_PHYS_BASE + 0x800 - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device ep93xx_fb_device = { .name = "ep93xx-fb", .id = -1, .dev = { .platform_data = &ep93xxfb_data, .coherent_dma_mask = DMA_BIT_MASK(32), .dma_mask = &ep93xx_fb_device.dev.coherent_dma_mask, }, .num_resources = ARRAY_SIZE(ep93xx_fb_resource), .resource = ep93xx_fb_resource, }; /* The backlight use a single register in the framebuffer's register space */ #define EP93XX_RASTER_REG_BRIGHTNESS 0x20 static struct resource ep93xx_bl_resources[] = { DEFINE_RES_MEM(EP93XX_RASTER_PHYS_BASE + EP93XX_RASTER_REG_BRIGHTNESS, 0x04), }; static struct platform_device ep93xx_bl_device = { .name = "ep93xx-bl", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_bl_resources), .resource = ep93xx_bl_resources, }; /** * ep93xx_register_fb - Register the framebuffer platform device. * @data: platform specific framebuffer configuration (__initdata) */ void __init ep93xx_register_fb(struct ep93xxfb_mach_info *data) { ep93xxfb_data = *data; platform_device_register(&ep93xx_fb_device); platform_device_register(&ep93xx_bl_device); } /************************************************************************* * EP93xx matrix keypad peripheral handling *************************************************************************/ static struct ep93xx_keypad_platform_data ep93xx_keypad_data; static struct resource ep93xx_keypad_resource[] = { { .start = EP93XX_KEY_MATRIX_PHYS_BASE, .end = EP93XX_KEY_MATRIX_PHYS_BASE + 0x0c - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_EP93XX_KEY, .end = IRQ_EP93XX_KEY, .flags = IORESOURCE_IRQ, }, }; static struct platform_device ep93xx_keypad_device = { .name = "ep93xx-keypad", .id = -1, .dev = { .platform_data = &ep93xx_keypad_data, }, .num_resources = ARRAY_SIZE(ep93xx_keypad_resource), .resource = ep93xx_keypad_resource, }; /** * ep93xx_register_keypad - Register the keypad platform device. * @data: platform specific keypad configuration (__initdata) */ void __init ep93xx_register_keypad(struct ep93xx_keypad_platform_data *data) { ep93xx_keypad_data = *data; platform_device_register(&ep93xx_keypad_device); } int ep93xx_keypad_acquire_gpio(struct platform_device *pdev) { int err; int i; for (i = 0; i < 8; i++) { err = gpio_request(EP93XX_GPIO_LINE_C(i), dev_name(&pdev->dev)); if (err) goto fail_gpio_c; err = gpio_request(EP93XX_GPIO_LINE_D(i), dev_name(&pdev->dev)); if (err) goto fail_gpio_d; } /* Enable the keypad controller; GPIO ports C and D used for keypad */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_KEYS | EP93XX_SYSCON_DEVCFG_GONK); return 0; fail_gpio_d: gpio_free(EP93XX_GPIO_LINE_C(i)); fail_gpio_c: for ( ; i >= 0; --i) { gpio_free(EP93XX_GPIO_LINE_C(i)); gpio_free(EP93XX_GPIO_LINE_D(i)); } return err; } EXPORT_SYMBOL(ep93xx_keypad_acquire_gpio); void ep93xx_keypad_release_gpio(struct platform_device *pdev) { int i; for (i = 0; i < 8; i++) { gpio_free(EP93XX_GPIO_LINE_C(i)); gpio_free(EP93XX_GPIO_LINE_D(i)); } /* Disable the keypad controller; GPIO ports C and D used for GPIO */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS | EP93XX_SYSCON_DEVCFG_GONK); } EXPORT_SYMBOL(ep93xx_keypad_release_gpio); /************************************************************************* * EP93xx I2S audio peripheral handling *************************************************************************/ static struct resource ep93xx_i2s_resource[] = { { .start = EP93XX_I2S_PHYS_BASE, .end = EP93XX_I2S_PHYS_BASE + 0x100 - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device ep93xx_i2s_device = { .name = "ep93xx-i2s", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_i2s_resource), .resource = ep93xx_i2s_resource, }; static struct platform_device ep93xx_pcm_device = { .name = "ep93xx-pcm-audio", .id = -1, }; void __init ep93xx_register_i2s(void) { platform_device_register(&ep93xx_i2s_device); platform_device_register(&ep93xx_pcm_device); } #define EP93XX_SYSCON_DEVCFG_I2S_MASK (EP93XX_SYSCON_DEVCFG_I2SONSSP | \ EP93XX_SYSCON_DEVCFG_I2SONAC97) #define EP93XX_I2SCLKDIV_MASK (EP93XX_SYSCON_I2SCLKDIV_ORIDE | \ EP93XX_SYSCON_I2SCLKDIV_SPOL) int ep93xx_i2s_acquire(void) { unsigned val; ep93xx_devcfg_set_clear(EP93XX_SYSCON_DEVCFG_I2SONAC97, EP93XX_SYSCON_DEVCFG_I2S_MASK); /* * This is potentially racy with the clock api for i2s_mclk, sclk and * lrclk. Since the i2s driver is the only user of those clocks we * rely on it to prevent parallel use of this function and the * clock api for the i2s clocks. */ val = __raw_readl(EP93XX_SYSCON_I2SCLKDIV); val &= ~EP93XX_I2SCLKDIV_MASK; val |= EP93XX_SYSCON_I2SCLKDIV_ORIDE | EP93XX_SYSCON_I2SCLKDIV_SPOL; ep93xx_syscon_swlocked_write(val, EP93XX_SYSCON_I2SCLKDIV); return 0; } EXPORT_SYMBOL(ep93xx_i2s_acquire); void ep93xx_i2s_release(void) { ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2S_MASK); } EXPORT_SYMBOL(ep93xx_i2s_release); /************************************************************************* * EP93xx AC97 audio peripheral handling *************************************************************************/ static struct resource ep93xx_ac97_resources[] = { { .start = EP93XX_AAC_PHYS_BASE, .end = EP93XX_AAC_PHYS_BASE + 0xac - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_EP93XX_AACINTR, .end = IRQ_EP93XX_AACINTR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device ep93xx_ac97_device = { .name = "ep93xx-ac97", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_ac97_resources), .resource = ep93xx_ac97_resources, }; void __init ep93xx_register_ac97(void) { /* * Make sure that the AC97 pins are not used by I2S. */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2SONAC97); platform_device_register(&ep93xx_ac97_device); platform_device_register(&ep93xx_pcm_device); } /************************************************************************* * EP93xx Watchdog *************************************************************************/ static struct resource ep93xx_wdt_resources[] = { DEFINE_RES_MEM(EP93XX_WATCHDOG_PHYS_BASE, 0x08), }; static struct platform_device ep93xx_wdt_device = { .name = "ep93xx-wdt", .id = -1, .num_resources = ARRAY_SIZE(ep93xx_wdt_resources), .resource = ep93xx_wdt_resources, }; void __init ep93xx_init_devices(void) { /* Disallow access to MaverickCrunch initially */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_CPENA); /* Default all ports to GPIO */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS | EP93XX_SYSCON_DEVCFG_GONK | EP93XX_SYSCON_DEVCFG_EONIDE | EP93XX_SYSCON_DEVCFG_GONIDE | EP93XX_SYSCON_DEVCFG_HONIDE); /* Get the GPIO working early, other devices need it */ platform_device_register(&ep93xx_gpio_device); amba_device_register(&uart1_device, &iomem_resource); amba_device_register(&uart2_device, &iomem_resource); amba_device_register(&uart3_device, &iomem_resource); platform_device_register(&ep93xx_rtc_device); platform_device_register(&ep93xx_ohci_device); platform_device_register(&ep93xx_leds); platform_device_register(&ep93xx_wdt_device); } void ep93xx_restart(char mode, const char *cmd) { /* * Set then clear the SWRST bit to initiate a software reset */ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_SWRST); ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_SWRST); while (1) ; }
gpl-2.0
kennysgithub/sm-p607t-kernel
arch/arm/mach-tegra/board-harmony-pinmux.c
4721
12156
/* * arch/arm/mach-tegra/board-harmony-pinmux.c * * Copyright (C) 2010 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/gpio.h> #include <linux/of.h> #include <mach/pinmux.h> #include <mach/pinmux-tegra20.h> #include "gpio-names.h" #include "board-harmony.h" #include "board-pinmux.h" static struct tegra_pingroup_config harmony_pinmux[] = { {TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_ATD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_ATE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_CDEV1, TEGRA_MUX_PLLA_OUT, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_CDEV2, TEGRA_MUX_PLLP_OUT4, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_CRTP, TEGRA_MUX_CRT, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_CSUS, TEGRA_MUX_VI_SENSOR_CLK, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DAP1, TEGRA_MUX_DAP1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_DAP2, TEGRA_MUX_DAP2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DDC, TEGRA_MUX_I2C2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_DTA, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_DTB, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_DTC, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DTD, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_DTE, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_GMB, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_GMC, TEGRA_MUX_UARTD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_GMD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_GME, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_GPU, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_GPU7, TEGRA_MUX_RTCK, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_GPV, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_HDINT, TEGRA_MUX_HDMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_I2CP, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_IRRX, TEGRA_MUX_UARTA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_IRTX, TEGRA_MUX_UARTA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_KBCA, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_KBCB, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_KBCC, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_KBCD, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_KBCE, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_KBCF, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LCSN, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LD0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD10, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD11, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD12, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD13, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD14, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD15, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD16, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD17, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD3, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD4, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD5, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD6, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD7, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD8, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LD9, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LDC, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LHP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LHP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LHP2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LHS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LM0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LM1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LPP, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LPW0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LPW1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LPW2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LSC0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LSC1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LSCK, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LSDA, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LSDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LSPI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LVP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_LVP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_OWC, TEGRA_MUX_RSVD2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_PTA, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_SDB, TEGRA_MUX_PWM, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SDC, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_SDD, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_SDIO1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SLXA, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SLXD, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SLXK, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_SPDI, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPDO, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPIA, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_SPIB, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_SPIC, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPID, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPIE, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPIF, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_SPIH, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_UAA, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_UAB, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_UAC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_UAD, TEGRA_MUX_IRDA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_UCA, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_UCB, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_UDA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, {TEGRA_PINGROUP_CK32, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_DDRC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_PMCA, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_PMCB, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_PMCC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_PMCD, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_PMCE, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_XM2C, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, }; static struct tegra_gpio_table gpio_table[] = { { .gpio = TEGRA_GPIO_SD2_CD, .enable = true }, { .gpio = TEGRA_GPIO_SD2_WP, .enable = true }, { .gpio = TEGRA_GPIO_SD2_POWER, .enable = true }, { .gpio = TEGRA_GPIO_SD4_CD, .enable = true }, { .gpio = TEGRA_GPIO_SD4_WP, .enable = true }, { .gpio = TEGRA_GPIO_SD4_POWER, .enable = true }, { .gpio = TEGRA_GPIO_CDC_IRQ, .enable = true }, { .gpio = TEGRA_GPIO_HP_DET, .enable = true }, { .gpio = TEGRA_GPIO_INT_MIC_EN, .enable = true }, { .gpio = TEGRA_GPIO_EXT_MIC_EN, .enable = true }, }; static struct tegra_board_pinmux_conf conf = { .pgs = harmony_pinmux, .pg_count = ARRAY_SIZE(harmony_pinmux), .gpios = gpio_table, .gpio_count = ARRAY_SIZE(gpio_table), }; void harmony_pinmux_init(void) { tegra_board_pinmux_init(&conf, NULL); }
gpl-2.0
roguesyko/the_reaper_g3
drivers/atm/zatm.c
4977
44403
/* drivers/atm/zatm.c - ZeitNet ZN122x device driver */ /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/sonet.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/atm_zatm.h> #include <linux/capability.h> #include <linux/bitops.h> #include <linux/wait.h> #include <linux/slab.h> #include <asm/byteorder.h> #include <asm/string.h> #include <asm/io.h> #include <linux/atomic.h> #include <asm/uaccess.h> #include "uPD98401.h" #include "uPD98402.h" #include "zeprom.h" #include "zatm.h" /* * TODO: * * Minor features * - support 64 kB SDUs (will have to use multibuffer batches then :-( ) * - proper use of CDV, credit = max(1,CDVT*PCR) * - AAL0 * - better receive timestamps * - OAM */ #define ZATM_COPPER 1 #if 0 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) #else #define DPRINTK(format,args...) #endif #ifndef CONFIG_ATM_ZATM_DEBUG #define NULLCHECK(x) #define EVENT(s,a,b) static void event_dump(void) { } #else /* * NULL pointer checking */ #define NULLCHECK(x) \ if ((unsigned long) (x) < 0x30) printk(KERN_CRIT #x "==0x%x\n", (int) (x)) /* * Very extensive activity logging. Greatly improves bug detection speed but * costs a few Mbps if enabled. */ #define EV 64 static const char *ev[EV]; static unsigned long ev_a[EV],ev_b[EV]; static int ec = 0; static void EVENT(const char *s,unsigned long a,unsigned long b) { ev[ec] = s; ev_a[ec] = a; ev_b[ec] = b; ec = (ec+1) % EV; } static void event_dump(void) { int n,i; printk(KERN_NOTICE "----- event dump follows -----\n"); for (n = 0; n < EV; n++) { i = (ec+n) % EV; printk(KERN_NOTICE); printk(ev[i] ? ev[i] : "(null)",ev_a[i],ev_b[i]); } printk(KERN_NOTICE "----- event dump ends here -----\n"); } #endif /* CONFIG_ATM_ZATM_DEBUG */ #define RING_BUSY 1 /* indication from do_tx that PDU has to be backlogged */ static struct atm_dev *zatm_boards = NULL; static unsigned long dummy[2] = {0,0}; #define zin_n(r) inl(zatm_dev->base+r*4) #define zin(r) inl(zatm_dev->base+uPD98401_##r*4) #define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4) #define zwait while (zin(CMR) & uPD98401_BUSY) /* RX0, RX1, TX0, TX1 */ static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 }; static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */ #define MBX_SIZE(i) (mbx_entries[i]*mbx_esize[i]) /*-------------------------------- utilities --------------------------------*/ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr) { zwait; zout(value,CER); zout(uPD98401_IND_ACC | uPD98401_IA_BALL | (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); } static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr) { zwait; zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW | (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); zwait; return zin(CER); } /*------------------------------- free lists --------------------------------*/ /* * Free buffer head structure: * [0] pointer to buffer (for SAR) * [1] buffer descr link pointer (for SAR) * [2] back pointer to skb (for poll_rx) * [3] data * ... */ struct rx_buffer_head { u32 buffer; /* pointer to buffer (for SAR) */ u32 link; /* buffer descriptor link pointer (for SAR) */ struct sk_buff *skb; /* back pointer to skb (for poll_rx) */ }; static void refill_pool(struct atm_dev *dev,int pool) { struct zatm_dev *zatm_dev; struct sk_buff *skb; struct rx_buffer_head *first; unsigned long flags; int align,offset,free,count,size; EVENT("refill_pool\n",0,0); zatm_dev = ZATM_DEV(dev); size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 : pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head); if (size < PAGE_SIZE) { align = 32; /* for 32 byte alignment */ offset = sizeof(struct rx_buffer_head); } else { align = 4096; offset = zatm_dev->pool_info[pool].offset+ sizeof(struct rx_buffer_head); } size += align; spin_lock_irqsave(&zatm_dev->lock, flags); free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) & uPD98401_RXFP_REMAIN; spin_unlock_irqrestore(&zatm_dev->lock, flags); if (free >= zatm_dev->pool_info[pool].low_water) return; EVENT("starting ... POOL: 0x%x, 0x%x\n", zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); count = 0; first = NULL; while (free < zatm_dev->pool_info[pool].high_water) { struct rx_buffer_head *head; skb = alloc_skb(size,GFP_ATOMIC); if (!skb) { printk(KERN_WARNING DEV_LABEL "(Itf %d): got no new " "skb (%d) with %d free\n",dev->number,size,free); break; } skb_reserve(skb,(unsigned char *) ((((unsigned long) skb->data+ align+offset-1) & ~(unsigned long) (align-1))-offset)- skb->data); head = (struct rx_buffer_head *) skb->data; skb_reserve(skb,sizeof(struct rx_buffer_head)); if (!first) first = head; count++; head->buffer = virt_to_bus(skb->data); head->link = 0; head->skb = skb; EVENT("enq skb 0x%08lx/0x%08lx\n",(unsigned long) skb, (unsigned long) head); spin_lock_irqsave(&zatm_dev->lock, flags); if (zatm_dev->last_free[pool]) ((struct rx_buffer_head *) (zatm_dev->last_free[pool]-> data))[-1].link = virt_to_bus(head); zatm_dev->last_free[pool] = skb; skb_queue_tail(&zatm_dev->pool[pool],skb); spin_unlock_irqrestore(&zatm_dev->lock, flags); free++; } if (first) { spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(virt_to_bus(first),CER); zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count, CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); EVENT ("POOL: 0x%x, 0x%x\n", zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); } } static void drain_free(struct atm_dev *dev,int pool) { skb_queue_purge(&ZATM_DEV(dev)->pool[pool]); } static int pool_index(int max_pdu) { int i; if (max_pdu % ATM_CELL_PAYLOAD) printk(KERN_ERR DEV_LABEL ": driver error in pool_index: " "max_pdu is %d\n",max_pdu); if (max_pdu > 65536) return -1; for (i = 0; (64 << i) < max_pdu; i++); return i+ZATM_AAL5_POOL_BASE; } /* use_pool isn't reentrant */ static void use_pool(struct atm_dev *dev,int pool) { struct zatm_dev *zatm_dev; unsigned long flags; int size; zatm_dev = ZATM_DEV(dev); if (!(zatm_dev->pool_info[pool].ref_count++)) { skb_queue_head_init(&zatm_dev->pool[pool]); size = pool-ZATM_AAL5_POOL_BASE; if (size < 0) size = 0; /* 64B... */ else if (size > 10) size = 10; /* ... 64kB */ spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) << uPD98401_RXFP_ALERT_SHIFT) | (1 << uPD98401_RXFP_BTSZ_SHIFT) | (size << uPD98401_RXFP_BFSZ_SHIFT), zatm_dev->pool_base+pool*2); zpokel(zatm_dev,(unsigned long) dummy,zatm_dev->pool_base+ pool*2+1); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->last_free[pool] = NULL; refill_pool(dev,pool); } DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count); } static void unuse_pool(struct atm_dev *dev,int pool) { if (!(--ZATM_DEV(dev)->pool_info[pool].ref_count)) drain_free(dev,pool); } /*----------------------------------- RX ------------------------------------*/ #if 0 static void exception(struct atm_vcc *vcc) { static int count = 0; struct zatm_dev *zatm_dev = ZATM_DEV(vcc->dev); struct zatm_vcc *zatm_vcc = ZATM_VCC(vcc); unsigned long *qrp; int i; if (count++ > 2) return; for (i = 0; i < 8; i++) printk("TX%d: 0x%08lx\n",i, zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+i)); for (i = 0; i < 5; i++) printk("SH%d: 0x%08lx\n",i, zpeekl(zatm_dev,uPD98401_IM(zatm_vcc->shaper)+16*i)); qrp = (unsigned long *) zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ uPD98401_TXVC_QRP); printk("qrp=0x%08lx\n",(unsigned long) qrp); for (i = 0; i < 4; i++) printk("QRP[%d]: 0x%08lx",i,qrp[i]); } #endif static const char *err_txt[] = { "No error", "RX buf underflow", "RX FIFO overrun", "Maximum len violation", "CRC error", "User abort", "Length violation", "T1 error", "Deactivated", "???", "???", "???", "???", "???", "???", "???" }; static void poll_rx(struct atm_dev *dev,int mbx) { struct zatm_dev *zatm_dev; unsigned long pos; u32 x; int error; EVENT("poll_rx\n",0,0); zatm_dev = ZATM_DEV(dev); pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx)); while (x = zin(MWA(mbx)), (pos & 0xffff) != x) { u32 *here; struct sk_buff *skb; struct atm_vcc *vcc; int cells,size,chan; EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x); here = (u32 *) pos; if (((pos += 16) & 0xffff) == zatm_dev->mbx_end[mbx]) pos = zatm_dev->mbx_start[mbx]; cells = here[0] & uPD98401_AAL5_SIZE; #if 0 printk("RX IND: 0x%x, 0x%x, 0x%x, 0x%x\n",here[0],here[1],here[2],here[3]); { unsigned long *x; printk("POOL: 0x%08x, 0x%08x\n",zpeekl(zatm_dev, zatm_dev->pool_base), zpeekl(zatm_dev,zatm_dev->pool_base+1)); x = (unsigned long *) here[2]; printk("[0..3] = 0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx\n", x[0],x[1],x[2],x[3]); } #endif error = 0; if (here[3] & uPD98401_AAL5_ERR) { error = (here[3] & uPD98401_AAL5_ES) >> uPD98401_AAL5_ES_SHIFT; if (error == uPD98401_AAL5_ES_DEACT || error == uPD98401_AAL5_ES_FREE) continue; } EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >> uPD98401_AAL5_ES_SHIFT,error); skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb; __net_timestamp(skb); #if 0 printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3], ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1], ((unsigned *) skb->data)[0]); #endif EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb, (unsigned long) here); #if 0 printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); #endif size = error ? 0 : ntohs(((__be16 *) skb->data)[cells* ATM_CELL_PAYLOAD/sizeof(u16)-3]); EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size); chan = (here[3] & uPD98401_AAL5_CHAN) >> uPD98401_AAL5_CHAN_SHIFT; if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { int pos; vcc = zatm_dev->rx_map[chan]; pos = ZATM_VCC(vcc)->pool; if (skb == zatm_dev->last_free[pos]) zatm_dev->last_free[pos] = NULL; skb_unlink(skb, zatm_dev->pool + pos); } else { printk(KERN_ERR DEV_LABEL "(itf %d): RX indication " "for non-existing channel\n",dev->number); size = 0; vcc = NULL; event_dump(); } if (error) { static unsigned long silence = 0; static int last_error = 0; if (error != last_error || time_after(jiffies, silence) || silence == 0){ printk(KERN_WARNING DEV_LABEL "(itf %d): " "chan %d error %s\n",dev->number,chan, err_txt[error]); last_error = error; silence = (jiffies+2*HZ)|1; } size = 0; } if (size && (size > cells*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER || size <= (cells-1)*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER)) { printk(KERN_ERR DEV_LABEL "(itf %d): size %d with %d " "cells\n",dev->number,size,cells); size = 0; event_dump(); } if (size > ATM_MAX_AAL5_PDU) { printk(KERN_ERR DEV_LABEL "(itf %d): size too big " "(%d)\n",dev->number,size); size = 0; event_dump(); } if (!size) { dev_kfree_skb_irq(skb); if (vcc) atomic_inc(&vcc->stats->rx_err); continue; } if (!atm_charge(vcc,skb->truesize)) { dev_kfree_skb_irq(skb); continue; } skb->len = size; ATM_SKB(skb)->vcc = vcc; vcc->push(vcc,skb); atomic_inc(&vcc->stats->rx); } zout(pos & 0xffff,MTA(mbx)); #if 0 /* probably a stupid idea */ refill_pool(dev,zatm_vcc->pool); /* maybe this saves us a few interrupts */ #endif } static int open_rx_first(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; unsigned short chan; int cells; DPRINTK("open_rx_first (0x%x)\n",inb_p(0xc053)); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); zatm_vcc->rx_chan = 0; if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0; if (vcc->qos.aal == ATM_AAL5) { if (vcc->qos.rxtp.max_sdu > 65464) vcc->qos.rxtp.max_sdu = 65464; /* fix this - we may want to receive 64kB SDUs later */ cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER, ATM_CELL_PAYLOAD); zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD); } else { cells = 1; zatm_vcc->pool = ZATM_AAL0_POOL; } if (zatm_vcc->pool < 0) return -EMSGSIZE; spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(uPD98401_OPEN_CHAN,CMR); zwait; DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; spin_unlock_irqrestore(&zatm_dev->lock, flags); DPRINTK("chan is %d\n",chan); if (!chan) return -EAGAIN; use_pool(vcc->dev,zatm_vcc->pool); DPRINTK("pool %d\n",zatm_vcc->pool); /* set up VC descriptor */ spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,zatm_vcc->pool << uPD98401_RXVC_POOL_SHIFT, chan*VC_SIZE/4); zpokel(zatm_dev,uPD98401_RXVC_OD | (vcc->qos.aal == ATM_AAL5 ? uPD98401_RXVC_AR : 0) | cells,chan*VC_SIZE/4+1); zpokel(zatm_dev,0,chan*VC_SIZE/4+2); zatm_vcc->rx_chan = chan; zatm_dev->rx_map[chan] = vcc; spin_unlock_irqrestore(&zatm_dev->lock, flags); return 0; } static int open_rx_second(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; int pos,shift; DPRINTK("open_rx_second (0x%x)\n",inb_p(0xc053)); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); if (!zatm_vcc->rx_chan) return 0; spin_lock_irqsave(&zatm_dev->lock, flags); /* should also handle VPI @@@ */ pos = vcc->vci >> 1; shift = (1-(vcc->vci & 1)) << 4; zpokel(zatm_dev,(zpeekl(zatm_dev,pos) & ~(0xffff << shift)) | ((zatm_vcc->rx_chan | uPD98401_RXLT_ENBL) << shift),pos); spin_unlock_irqrestore(&zatm_dev->lock, flags); return 0; } static void close_rx(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; int pos,shift; zatm_vcc = ZATM_VCC(vcc); zatm_dev = ZATM_DEV(vcc->dev); if (!zatm_vcc->rx_chan) return; DPRINTK("close_rx\n"); /* disable receiver */ if (vcc->vpi != ATM_VPI_UNSPEC && vcc->vci != ATM_VCI_UNSPEC) { spin_lock_irqsave(&zatm_dev->lock, flags); pos = vcc->vci >> 1; shift = (1-(vcc->vci & 1)) << 4; zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos); zwait; zout(uPD98401_NOP,CMR); zwait; zout(uPD98401_NOP,CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); } spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); zwait; udelay(10); /* why oh why ... ? */ zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); zwait; if (!(zin(CMR) & uPD98401_CHAN_ADDR)) printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel " "%d\n",vcc->dev->number,zatm_vcc->rx_chan); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->rx_map[zatm_vcc->rx_chan] = NULL; zatm_vcc->rx_chan = 0; unuse_pool(vcc->dev,zatm_vcc->pool); } static int start_rx(struct atm_dev *dev) { struct zatm_dev *zatm_dev; int size,i; DPRINTK("start_rx\n"); zatm_dev = ZATM_DEV(dev); size = sizeof(struct atm_vcc *)*zatm_dev->chans; zatm_dev->rx_map = kzalloc(size,GFP_KERNEL); if (!zatm_dev->rx_map) return -ENOMEM; /* set VPI/VCI split (use all VCIs and give what's left to VPIs) */ zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR); /* prepare free buffer pools */ for (i = 0; i <= ZATM_LAST_POOL; i++) { zatm_dev->pool_info[i].ref_count = 0; zatm_dev->pool_info[i].rqa_count = 0; zatm_dev->pool_info[i].rqu_count = 0; zatm_dev->pool_info[i].low_water = LOW_MARK; zatm_dev->pool_info[i].high_water = HIGH_MARK; zatm_dev->pool_info[i].offset = 0; zatm_dev->pool_info[i].next_off = 0; zatm_dev->pool_info[i].next_cnt = 0; zatm_dev->pool_info[i].next_thres = OFF_CNG_THRES; } return 0; } /*----------------------------------- TX ------------------------------------*/ static int do_tx(struct sk_buff *skb) { struct atm_vcc *vcc; struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; u32 *dsc; unsigned long flags; EVENT("do_tx\n",0,0); DPRINTK("sending skb %p\n",skb); vcc = ATM_SKB(skb)->vcc; zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); EVENT("iovcnt=%d\n",skb_shinfo(skb)->nr_frags,0); spin_lock_irqsave(&zatm_dev->lock, flags); if (!skb_shinfo(skb)->nr_frags) { if (zatm_vcc->txing == RING_ENTRIES-1) { spin_unlock_irqrestore(&zatm_dev->lock, flags); return RING_BUSY; } zatm_vcc->txing++; dsc = zatm_vcc->ring+zatm_vcc->ring_curr; zatm_vcc->ring_curr = (zatm_vcc->ring_curr+RING_WORDS) & (RING_ENTRIES*RING_WORDS-1); dsc[1] = 0; dsc[2] = skb->len; dsc[3] = virt_to_bus(skb->data); mb(); dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM | (vcc->qos.aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 | (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? uPD98401_CLPM_1 : uPD98401_CLPM_0)); EVENT("dsc (0x%lx)\n",(unsigned long) dsc,0); } else { printk("NONONONOO!!!!\n"); dsc = NULL; #if 0 u32 *put; int i; dsc = kmalloc(uPD98401_TXPD_SIZE * 2 + uPD98401_TXBD_SIZE * ATM_SKB(skb)->iovcnt, GFP_ATOMIC); if (!dsc) { if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_irq(skb); return -EAGAIN; } /* @@@ should check alignment */ put = dsc+8; dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | (vcc->aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 | (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? uPD98401_CLPM_1 : uPD98401_CLPM_0)); dsc[1] = 0; dsc[2] = ATM_SKB(skb)->iovcnt * uPD98401_TXBD_SIZE; dsc[3] = virt_to_bus(put); for (i = 0; i < ATM_SKB(skb)->iovcnt; i++) { *put++ = ((struct iovec *) skb->data)[i].iov_len; *put++ = virt_to_bus(((struct iovec *) skb->data)[i].iov_base); } put[-2] |= uPD98401_TXBD_LAST; #endif } ZATM_PRV_DSC(skb) = dsc; skb_queue_tail(&zatm_vcc->tx_queue,skb); DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ uPD98401_TXVC_QRP)); zwait; zout(uPD98401_TX_READY | (zatm_vcc->tx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); EVENT("done\n",0,0); return 0; } static inline void dequeue_tx(struct atm_vcc *vcc) { struct zatm_vcc *zatm_vcc; struct sk_buff *skb; EVENT("dequeue_tx\n",0,0); zatm_vcc = ZATM_VCC(vcc); skb = skb_dequeue(&zatm_vcc->tx_queue); if (!skb) { printk(KERN_CRIT DEV_LABEL "(itf %d): dequeue_tx but not " "txing\n",vcc->dev->number); return; } #if 0 /* @@@ would fail on CLP */ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM | uPD98401_TXPD_AAL5)) printk("@#*$!!!! (%08x)\n", *ZATM_PRV_DSC(skb)); #endif *ZATM_PRV_DSC(skb) = 0; /* mark as invalid */ zatm_vcc->txing--; if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb_irq(skb); while ((skb = skb_dequeue(&zatm_vcc->backlog))) if (do_tx(skb) == RING_BUSY) { skb_queue_head(&zatm_vcc->backlog,skb); break; } atomic_inc(&vcc->stats->tx); wake_up(&zatm_vcc->tx_wait); } static void poll_tx(struct atm_dev *dev,int mbx) { struct zatm_dev *zatm_dev; unsigned long pos; u32 x; EVENT("poll_tx\n",0,0); zatm_dev = ZATM_DEV(dev); pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx)); while (x = zin(MWA(mbx)), (pos & 0xffff) != x) { int chan; #if 1 u32 data,*addr; EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x); addr = (u32 *) pos; data = *addr; chan = (data & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT; EVENT("addr = 0x%lx, data = 0x%08x,",(unsigned long) addr, data); EVENT("chan = %d\n",chan,0); #else NO ! chan = (zatm_dev->mbx_start[mbx][pos >> 2] & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT; #endif if (chan < zatm_dev->chans && zatm_dev->tx_map[chan]) dequeue_tx(zatm_dev->tx_map[chan]); else { printk(KERN_CRIT DEV_LABEL "(itf %d): TX indication " "for non-existing channel %d\n",dev->number,chan); event_dump(); } if (((pos += 4) & 0xffff) == zatm_dev->mbx_end[mbx]) pos = zatm_dev->mbx_start[mbx]; } zout(pos & 0xffff,MTA(mbx)); } /* * BUG BUG BUG: Doesn't handle "new-style" rate specification yet. */ static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr) { struct zatm_dev *zatm_dev; unsigned long flags; unsigned long i,m,c; int shaper; DPRINTK("alloc_shaper (min = %d, max = %d)\n",min,max); zatm_dev = ZATM_DEV(dev); if (!zatm_dev->free_shapers) return -EAGAIN; for (shaper = 0; !((zatm_dev->free_shapers >> shaper) & 1); shaper++); zatm_dev->free_shapers &= ~1 << shaper; if (ubr) { c = 5; i = m = 1; zatm_dev->ubr_ref_cnt++; zatm_dev->ubr = shaper; *pcr = 0; } else { if (min) { if (min <= 255) { i = min; m = ATM_OC3_PCR; } else { i = 255; m = ATM_OC3_PCR*255/min; } } else { if (max > zatm_dev->tx_bw) max = zatm_dev->tx_bw; if (max <= 255) { i = max; m = ATM_OC3_PCR; } else { i = 255; m = DIV_ROUND_UP(ATM_OC3_PCR*255, max); } } if (i > m) { printk(KERN_CRIT DEV_LABEL "shaper algorithm botched " "[%d,%d] -> i=%ld,m=%ld\n",min,max,i,m); m = i; } *pcr = i*ATM_OC3_PCR/m; c = 20; /* @@@ should use max_cdv ! */ if ((min && *pcr < min) || (max && *pcr > max)) return -EINVAL; if (zatm_dev->tx_bw < *pcr) return -EAGAIN; zatm_dev->tx_bw -= *pcr; } spin_lock_irqsave(&zatm_dev->lock, flags); DPRINTK("i = %d, m = %d, PCR = %d\n",i,m,*pcr); zpokel(zatm_dev,(i << uPD98401_IM_I_SHIFT) | m,uPD98401_IM(shaper)); zpokel(zatm_dev,c << uPD98401_PC_C_SHIFT,uPD98401_PC(shaper)); zpokel(zatm_dev,0,uPD98401_X(shaper)); zpokel(zatm_dev,0,uPD98401_Y(shaper)); zpokel(zatm_dev,uPD98401_PS_E,uPD98401_PS(shaper)); spin_unlock_irqrestore(&zatm_dev->lock, flags); return shaper; } static void dealloc_shaper(struct atm_dev *dev,int shaper) { struct zatm_dev *zatm_dev; unsigned long flags; zatm_dev = ZATM_DEV(dev); if (shaper == zatm_dev->ubr) { if (--zatm_dev->ubr_ref_cnt) return; zatm_dev->ubr = -1; } spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,zpeekl(zatm_dev,uPD98401_PS(shaper)) & ~uPD98401_PS_E, uPD98401_PS(shaper)); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->free_shapers |= 1 << shaper; } static void close_tx(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; int chan; zatm_vcc = ZATM_VCC(vcc); zatm_dev = ZATM_DEV(vcc->dev); chan = zatm_vcc->tx_chan; if (!chan) return; DPRINTK("close_tx\n"); if (skb_peek(&zatm_vcc->backlog)) { printk("waiting for backlog to drain ...\n"); event_dump(); wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog)); } if (skb_peek(&zatm_vcc->tx_queue)) { printk("waiting for TX queue to drain ...\n"); event_dump(); wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->tx_queue)); } spin_lock_irqsave(&zatm_dev->lock, flags); #if 0 zwait; zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); #endif zwait; zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); zwait; if (!(zin(CMR) & uPD98401_CHAN_ADDR)) printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel " "%d\n",vcc->dev->number,chan); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_vcc->tx_chan = 0; zatm_dev->tx_map[chan] = NULL; if (zatm_vcc->shaper != zatm_dev->ubr) { zatm_dev->tx_bw += vcc->qos.txtp.min_pcr; dealloc_shaper(vcc->dev,zatm_vcc->shaper); } kfree(zatm_vcc->ring); } static int open_tx_first(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; u32 *loop; unsigned short chan; int unlimited; DPRINTK("open_tx_first\n"); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); zatm_vcc->tx_chan = 0; if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0; spin_lock_irqsave(&zatm_dev->lock, flags); zwait; zout(uPD98401_OPEN_CHAN,CMR); zwait; DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; spin_unlock_irqrestore(&zatm_dev->lock, flags); DPRINTK("chan is %d\n",chan); if (!chan) return -EAGAIN; unlimited = vcc->qos.txtp.traffic_class == ATM_UBR && (!vcc->qos.txtp.max_pcr || vcc->qos.txtp.max_pcr == ATM_MAX_PCR || vcc->qos.txtp.max_pcr >= ATM_OC3_PCR); if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr; else { int uninitialized_var(pcr); if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU; if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr, vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited)) < 0) { close_tx(vcc); return zatm_vcc->shaper; } if (pcr > ATM_OC3_PCR) pcr = ATM_OC3_PCR; vcc->qos.txtp.min_pcr = vcc->qos.txtp.max_pcr = pcr; } zatm_vcc->tx_chan = chan; skb_queue_head_init(&zatm_vcc->tx_queue); init_waitqueue_head(&zatm_vcc->tx_wait); /* initialize ring */ zatm_vcc->ring = kzalloc(RING_SIZE,GFP_KERNEL); if (!zatm_vcc->ring) return -ENOMEM; loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS; loop[0] = uPD98401_TXPD_V; loop[1] = loop[2] = 0; loop[3] = virt_to_bus(zatm_vcc->ring); zatm_vcc->ring_curr = 0; zatm_vcc->txing = 0; skb_queue_head_init(&zatm_vcc->backlog); zpokel(zatm_dev,virt_to_bus(zatm_vcc->ring), chan*VC_SIZE/4+uPD98401_TXVC_QRP); return 0; } static int open_tx_second(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; unsigned long flags; DPRINTK("open_tx_second\n"); zatm_dev = ZATM_DEV(vcc->dev); zatm_vcc = ZATM_VCC(vcc); if (!zatm_vcc->tx_chan) return 0; /* set up VC descriptor */ spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4); zpokel(zatm_dev,uPD98401_TXVC_L | (zatm_vcc->shaper << uPD98401_TXVC_SHP_SHIFT) | (vcc->vpi << uPD98401_TXVC_VPI_SHIFT) | vcc->vci,zatm_vcc->tx_chan*VC_SIZE/4+1); zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4+2); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->tx_map[zatm_vcc->tx_chan] = vcc; return 0; } static int start_tx(struct atm_dev *dev) { struct zatm_dev *zatm_dev; int i; DPRINTK("start_tx\n"); zatm_dev = ZATM_DEV(dev); zatm_dev->tx_map = kmalloc(sizeof(struct atm_vcc *)* zatm_dev->chans,GFP_KERNEL); if (!zatm_dev->tx_map) return -ENOMEM; zatm_dev->tx_bw = ATM_OC3_PCR; zatm_dev->free_shapers = (1 << NR_SHAPERS)-1; zatm_dev->ubr = -1; zatm_dev->ubr_ref_cnt = 0; /* initialize shapers */ for (i = 0; i < NR_SHAPERS; i++) zpokel(zatm_dev,0,uPD98401_PS(i)); return 0; } /*------------------------------- interrupts --------------------------------*/ static irqreturn_t zatm_int(int irq,void *dev_id) { struct atm_dev *dev; struct zatm_dev *zatm_dev; u32 reason; int handled = 0; dev = dev_id; zatm_dev = ZATM_DEV(dev); while ((reason = zin(GSR))) { handled = 1; EVENT("reason 0x%x\n",reason,0); if (reason & uPD98401_INT_PI) { EVENT("PHY int\n",0,0); dev->phy->interrupt(dev); } if (reason & uPD98401_INT_RQA) { unsigned long pools; int i; pools = zin(RQA); EVENT("RQA (0x%08x)\n",pools,0); for (i = 0; pools; i++) { if (pools & 1) { refill_pool(dev,i); zatm_dev->pool_info[i].rqa_count++; } pools >>= 1; } } if (reason & uPD98401_INT_RQU) { unsigned long pools; int i; pools = zin(RQU); printk(KERN_WARNING DEV_LABEL "(itf %d): RQU 0x%08lx\n", dev->number,pools); event_dump(); for (i = 0; pools; i++) { if (pools & 1) { refill_pool(dev,i); zatm_dev->pool_info[i].rqu_count++; } pools >>= 1; } } /* don't handle RD */ if (reason & uPD98401_INT_SPE) printk(KERN_ALERT DEV_LABEL "(itf %d): system parity " "error at 0x%08x\n",dev->number,zin(ADDR)); if (reason & uPD98401_INT_CPE) printk(KERN_ALERT DEV_LABEL "(itf %d): control memory " "parity error at 0x%08x\n",dev->number,zin(ADDR)); if (reason & uPD98401_INT_SBE) { printk(KERN_ALERT DEV_LABEL "(itf %d): system bus " "error at 0x%08x\n",dev->number,zin(ADDR)); event_dump(); } /* don't handle IND */ if (reason & uPD98401_INT_MF) { printk(KERN_CRIT DEV_LABEL "(itf %d): mailbox full " "(0x%x)\n",dev->number,(reason & uPD98401_INT_MF) >> uPD98401_INT_MF_SHIFT); event_dump(); /* @@@ should try to recover */ } if (reason & uPD98401_INT_MM) { if (reason & 1) poll_rx(dev,0); if (reason & 2) poll_rx(dev,1); if (reason & 4) poll_tx(dev,2); if (reason & 8) poll_tx(dev,3); } /* @@@ handle RCRn */ } return IRQ_RETVAL(handled); } /*----------------------------- (E)EPROM access -----------------------------*/ static void __devinit eprom_set(struct zatm_dev *zatm_dev,unsigned long value, unsigned short cmd) { int error; if ((error = pci_write_config_dword(zatm_dev->pci_dev,cmd,value))) printk(KERN_ERR DEV_LABEL ": PCI write failed (0x%02x)\n", error); } static unsigned long __devinit eprom_get(struct zatm_dev *zatm_dev, unsigned short cmd) { unsigned int value; int error; if ((error = pci_read_config_dword(zatm_dev->pci_dev,cmd,&value))) printk(KERN_ERR DEV_LABEL ": PCI read failed (0x%02x)\n", error); return value; } static void __devinit eprom_put_bits(struct zatm_dev *zatm_dev, unsigned long data,int bits,unsigned short cmd) { unsigned long value; int i; for (i = bits-1; i >= 0; i--) { value = ZEPROM_CS | (((data >> i) & 1) ? ZEPROM_DI : 0); eprom_set(zatm_dev,value,cmd); eprom_set(zatm_dev,value | ZEPROM_SK,cmd); eprom_set(zatm_dev,value,cmd); } } static void __devinit eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,unsigned short cmd) { int i; *byte = 0; for (i = 8; i; i--) { eprom_set(zatm_dev,ZEPROM_CS,cmd); eprom_set(zatm_dev,ZEPROM_CS | ZEPROM_SK,cmd); *byte <<= 1; if (eprom_get(zatm_dev,cmd) & ZEPROM_DO) *byte |= 1; eprom_set(zatm_dev,ZEPROM_CS,cmd); } } static unsigned char __devinit eprom_try_esi(struct atm_dev *dev, unsigned short cmd,int offset,int swap) { unsigned char buf[ZEPROM_SIZE]; struct zatm_dev *zatm_dev; int i; zatm_dev = ZATM_DEV(dev); for (i = 0; i < ZEPROM_SIZE; i += 2) { eprom_set(zatm_dev,ZEPROM_CS,cmd); /* select EPROM */ eprom_put_bits(zatm_dev,ZEPROM_CMD_READ,ZEPROM_CMD_LEN,cmd); eprom_put_bits(zatm_dev,i >> 1,ZEPROM_ADDR_LEN,cmd); eprom_get_byte(zatm_dev,buf+i+swap,cmd); eprom_get_byte(zatm_dev,buf+i+1-swap,cmd); eprom_set(zatm_dev,0,cmd); /* deselect EPROM */ } memcpy(dev->esi,buf+offset,ESI_LEN); return memcmp(dev->esi,"\0\0\0\0\0",ESI_LEN); /* assumes ESI_LEN == 6 */ } static void __devinit eprom_get_esi(struct atm_dev *dev) { if (eprom_try_esi(dev,ZEPROM_V1_REG,ZEPROM_V1_ESI_OFF,1)) return; (void) eprom_try_esi(dev,ZEPROM_V2_REG,ZEPROM_V2_ESI_OFF,0); } /*--------------------------------- entries ---------------------------------*/ static int __devinit zatm_init(struct atm_dev *dev) { struct zatm_dev *zatm_dev; struct pci_dev *pci_dev; unsigned short command; int error,i,last; unsigned long t0,t1,t2; DPRINTK(">zatm_init\n"); zatm_dev = ZATM_DEV(dev); spin_lock_init(&zatm_dev->lock); pci_dev = zatm_dev->pci_dev; zatm_dev->base = pci_resource_start(pci_dev, 0); zatm_dev->irq = pci_dev->irq; if ((error = pci_read_config_word(pci_dev,PCI_COMMAND,&command))) { printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%02x\n", dev->number,error); return -EINVAL; } if ((error = pci_write_config_word(pci_dev,PCI_COMMAND, command | PCI_COMMAND_IO | PCI_COMMAND_MASTER))) { printk(KERN_ERR DEV_LABEL "(itf %d): can't enable IO (0x%02x)" "\n",dev->number,error); return -EIO; } eprom_get_esi(dev); printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%x,irq=%d,", dev->number,pci_dev->revision,zatm_dev->base,zatm_dev->irq); /* reset uPD98401 */ zout(0,SWR); while (!(zin(GSR) & uPD98401_INT_IND)); zout(uPD98401_GMR_ONE /*uPD98401_BURST4*/,GMR); last = MAX_CRAM_SIZE; for (i = last-RAM_INCREMENT; i >= 0; i -= RAM_INCREMENT) { zpokel(zatm_dev,0x55555555,i); if (zpeekl(zatm_dev,i) != 0x55555555) last = i; else { zpokel(zatm_dev,0xAAAAAAAA,i); if (zpeekl(zatm_dev,i) != 0xAAAAAAAA) last = i; else zpokel(zatm_dev,i,i); } } for (i = 0; i < last; i += RAM_INCREMENT) if (zpeekl(zatm_dev,i) != i) break; zatm_dev->mem = i << 2; while (i) zpokel(zatm_dev,0,--i); /* reset again to rebuild memory pointers */ zout(0,SWR); while (!(zin(GSR) & uPD98401_INT_IND)); zout(uPD98401_GMR_ONE | uPD98401_BURST8 | uPD98401_BURST4 | uPD98401_BURST2 | uPD98401_GMR_PM | uPD98401_GMR_DR,GMR); /* TODO: should shrink allocation now */ printk("mem=%dkB,%s (",zatm_dev->mem >> 10,zatm_dev->copper ? "UTP" : "MMF"); for (i = 0; i < ESI_LEN; i++) printk("%02X%s",dev->esi[i],i == ESI_LEN-1 ? ")\n" : "-"); do { unsigned long flags; spin_lock_irqsave(&zatm_dev->lock, flags); t0 = zpeekl(zatm_dev,uPD98401_TSR); udelay(10); t1 = zpeekl(zatm_dev,uPD98401_TSR); udelay(1010); t2 = zpeekl(zatm_dev,uPD98401_TSR); spin_unlock_irqrestore(&zatm_dev->lock, flags); } while (t0 > t1 || t1 > t2); /* loop if wrapping ... */ zatm_dev->khz = t2-2*t1+t0; printk(KERN_NOTICE DEV_LABEL "(itf %d): uPD98401 %d.%d at %d.%03d " "MHz\n",dev->number, (zin(VER) & uPD98401_MAJOR) >> uPD98401_MAJOR_SHIFT, zin(VER) & uPD98401_MINOR,zatm_dev->khz/1000,zatm_dev->khz % 1000); return uPD98402_init(dev); } static int __devinit zatm_start(struct atm_dev *dev) { struct zatm_dev *zatm_dev = ZATM_DEV(dev); struct pci_dev *pdev = zatm_dev->pci_dev; unsigned long curr; int pools,vccs,rx; int error, i, ld; DPRINTK("zatm_start\n"); zatm_dev->rx_map = zatm_dev->tx_map = NULL; for (i = 0; i < NR_MBX; i++) zatm_dev->mbx_start[i] = 0; error = request_irq(zatm_dev->irq, zatm_int, IRQF_SHARED, DEV_LABEL, dev); if (error < 0) { printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n", dev->number,zatm_dev->irq); goto done; } /* define memory regions */ pools = NR_POOLS; if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE) pools = NR_SHAPERS*SHAPER_SIZE/POOL_SIZE; vccs = (zatm_dev->mem-NR_SHAPERS*SHAPER_SIZE-pools*POOL_SIZE)/ (2*VC_SIZE+RX_SIZE); ld = -1; for (rx = 1; rx < vccs; rx <<= 1) ld++; dev->ci_range.vpi_bits = 0; /* @@@ no VPI for now */ dev->ci_range.vci_bits = ld; dev->link_rate = ATM_OC3_PCR; zatm_dev->chans = vccs; /* ??? */ curr = rx*RX_SIZE/4; DPRINTK("RX pool 0x%08lx\n",curr); zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */ zatm_dev->pool_base = curr; curr += pools*POOL_SIZE/4; DPRINTK("Shapers 0x%08lx\n",curr); zpokel(zatm_dev,curr,uPD98401_SMA); /* shapers */ curr += NR_SHAPERS*SHAPER_SIZE/4; DPRINTK("Free 0x%08lx\n",curr); zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */ printk(KERN_INFO DEV_LABEL "(itf %d): %d shapers, %d pools, %d RX, " "%ld VCs\n",dev->number,NR_SHAPERS,pools,rx, (zatm_dev->mem-curr*4)/VC_SIZE); /* create mailboxes */ for (i = 0; i < NR_MBX; i++) { void *mbx; dma_addr_t mbx_dma; if (!mbx_entries[i]) continue; mbx = pci_alloc_consistent(pdev, 2*MBX_SIZE(i), &mbx_dma); if (!mbx) { error = -ENOMEM; goto out; } /* * Alignment provided by pci_alloc_consistent() isn't enough * for this device. */ if (((unsigned long)mbx ^ mbx_dma) & 0xffff) { printk(KERN_ERR DEV_LABEL "(itf %d): system " "bus incompatible with driver\n", dev->number); pci_free_consistent(pdev, 2*MBX_SIZE(i), mbx, mbx_dma); error = -ENODEV; goto out; } DPRINTK("mbx@0x%08lx-0x%08lx\n", mbx, mbx + MBX_SIZE(i)); zatm_dev->mbx_start[i] = (unsigned long)mbx; zatm_dev->mbx_dma[i] = mbx_dma; zatm_dev->mbx_end[i] = (zatm_dev->mbx_start[i] + MBX_SIZE(i)) & 0xffff; zout(mbx_dma >> 16, MSH(i)); zout(mbx_dma, MSL(i)); zout(zatm_dev->mbx_end[i], MBA(i)); zout((unsigned long)mbx & 0xffff, MTA(i)); zout((unsigned long)mbx & 0xffff, MWA(i)); } error = start_tx(dev); if (error) goto out; error = start_rx(dev); if (error) goto out_tx; error = dev->phy->start(dev); if (error) goto out_rx; zout(0xffffffff,IMR); /* enable interrupts */ /* enable TX & RX */ zout(zin(GMR) | uPD98401_GMR_SE | uPD98401_GMR_RE,GMR); done: return error; out_rx: kfree(zatm_dev->rx_map); out_tx: kfree(zatm_dev->tx_map); out: while (i-- > 0) { pci_free_consistent(pdev, 2*MBX_SIZE(i), (void *)zatm_dev->mbx_start[i], zatm_dev->mbx_dma[i]); } free_irq(zatm_dev->irq, dev); goto done; } static void zatm_close(struct atm_vcc *vcc) { DPRINTK(">zatm_close\n"); if (!ZATM_VCC(vcc)) return; clear_bit(ATM_VF_READY,&vcc->flags); close_rx(vcc); EVENT("close_tx\n",0,0); close_tx(vcc); DPRINTK("zatm_close: done waiting\n"); /* deallocate memory */ kfree(ZATM_VCC(vcc)); vcc->dev_data = NULL; clear_bit(ATM_VF_ADDR,&vcc->flags); } static int zatm_open(struct atm_vcc *vcc) { struct zatm_dev *zatm_dev; struct zatm_vcc *zatm_vcc; short vpi = vcc->vpi; int vci = vcc->vci; int error; DPRINTK(">zatm_open\n"); zatm_dev = ZATM_DEV(vcc->dev); if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) vcc->dev_data = NULL; if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC) set_bit(ATM_VF_ADDR,&vcc->flags); if (vcc->qos.aal != ATM_AAL5) return -EINVAL; /* @@@ AAL0 */ DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi, vcc->vci); if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) { zatm_vcc = kmalloc(sizeof(struct zatm_vcc),GFP_KERNEL); if (!zatm_vcc) { clear_bit(ATM_VF_ADDR,&vcc->flags); return -ENOMEM; } vcc->dev_data = zatm_vcc; ZATM_VCC(vcc)->tx_chan = 0; /* for zatm_close after open_rx */ if ((error = open_rx_first(vcc))) { zatm_close(vcc); return error; } if ((error = open_tx_first(vcc))) { zatm_close(vcc); return error; } } if (vci == ATM_VPI_UNSPEC || vpi == ATM_VCI_UNSPEC) return 0; if ((error = open_rx_second(vcc))) { zatm_close(vcc); return error; } if ((error = open_tx_second(vcc))) { zatm_close(vcc); return error; } set_bit(ATM_VF_READY,&vcc->flags); return 0; } static int zatm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags) { printk("Not yet implemented\n"); return -ENOSYS; /* @@@ */ } static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) { struct zatm_dev *zatm_dev; unsigned long flags; zatm_dev = ZATM_DEV(dev); switch (cmd) { case ZATM_GETPOOLZ: if (!capable(CAP_NET_ADMIN)) return -EPERM; /* fall through */ case ZATM_GETPOOL: { struct zatm_pool_info info; int pool; if (get_user(pool, &((struct zatm_pool_req __user *) arg)->pool_num)) return -EFAULT; if (pool < 0 || pool > ZATM_LAST_POOL) return -EINVAL; spin_lock_irqsave(&zatm_dev->lock, flags); info = zatm_dev->pool_info[pool]; if (cmd == ZATM_GETPOOLZ) { zatm_dev->pool_info[pool].rqa_count = 0; zatm_dev->pool_info[pool].rqu_count = 0; } spin_unlock_irqrestore(&zatm_dev->lock, flags); return copy_to_user( &((struct zatm_pool_req __user *) arg)->info, &info,sizeof(info)) ? -EFAULT : 0; } case ZATM_SETPOOL: { struct zatm_pool_info info; int pool; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (get_user(pool, &((struct zatm_pool_req __user *) arg)->pool_num)) return -EFAULT; if (pool < 0 || pool > ZATM_LAST_POOL) return -EINVAL; if (copy_from_user(&info, &((struct zatm_pool_req __user *) arg)->info, sizeof(info))) return -EFAULT; if (!info.low_water) info.low_water = zatm_dev-> pool_info[pool].low_water; if (!info.high_water) info.high_water = zatm_dev-> pool_info[pool].high_water; if (!info.next_thres) info.next_thres = zatm_dev-> pool_info[pool].next_thres; if (info.low_water >= info.high_water || info.low_water < 0) return -EINVAL; spin_lock_irqsave(&zatm_dev->lock, flags); zatm_dev->pool_info[pool].low_water = info.low_water; zatm_dev->pool_info[pool].high_water = info.high_water; zatm_dev->pool_info[pool].next_thres = info.next_thres; spin_unlock_irqrestore(&zatm_dev->lock, flags); return 0; } default: if (!dev->phy->ioctl) return -ENOIOCTLCMD; return dev->phy->ioctl(dev,cmd,arg); } } static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname, void __user *optval,int optlen) { return -EINVAL; } static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname, void __user *optval,unsigned int optlen) { return -EINVAL; } static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb) { int error; EVENT(">zatm_send 0x%lx\n",(unsigned long) skb,0); if (!ZATM_VCC(vcc)->tx_chan || !test_bit(ATM_VF_READY,&vcc->flags)) { if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); return -EINVAL; } if (!skb) { printk(KERN_CRIT "!skb in zatm_send ?\n"); if (vcc->pop) vcc->pop(vcc,skb); return -EINVAL; } ATM_SKB(skb)->vcc = vcc; error = do_tx(skb); if (error != RING_BUSY) return error; skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb); return 0; } static void zatm_phy_put(struct atm_dev *dev,unsigned char value, unsigned long addr) { struct zatm_dev *zatm_dev; zatm_dev = ZATM_DEV(dev); zwait; zout(value,CER); zout(uPD98401_IND_ACC | uPD98401_IA_B0 | (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); } static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr) { struct zatm_dev *zatm_dev; zatm_dev = ZATM_DEV(dev); zwait; zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW | (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); zwait; return zin(CER) & 0xff; } static const struct atmdev_ops ops = { .open = zatm_open, .close = zatm_close, .ioctl = zatm_ioctl, .getsockopt = zatm_getsockopt, .setsockopt = zatm_setsockopt, .send = zatm_send, .phy_put = zatm_phy_put, .phy_get = zatm_phy_get, .change_qos = zatm_change_qos, }; static int __devinit zatm_init_one(struct pci_dev *pci_dev, const struct pci_device_id *ent) { struct atm_dev *dev; struct zatm_dev *zatm_dev; int ret = -ENOMEM; zatm_dev = kmalloc(sizeof(*zatm_dev), GFP_KERNEL); if (!zatm_dev) { printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL); goto out; } dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL); if (!dev) goto out_free; ret = pci_enable_device(pci_dev); if (ret < 0) goto out_deregister; ret = pci_request_regions(pci_dev, DEV_LABEL); if (ret < 0) goto out_disable; zatm_dev->pci_dev = pci_dev; dev->dev_data = zatm_dev; zatm_dev->copper = (int)ent->driver_data; if ((ret = zatm_init(dev)) || (ret = zatm_start(dev))) goto out_release; pci_set_drvdata(pci_dev, dev); zatm_dev->more = zatm_boards; zatm_boards = dev; ret = 0; out: return ret; out_release: pci_release_regions(pci_dev); out_disable: pci_disable_device(pci_dev); out_deregister: atm_dev_deregister(dev); out_free: kfree(zatm_dev); goto out; } MODULE_LICENSE("GPL"); static struct pci_device_id zatm_pci_tbl[] __devinitdata = { { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER }, { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 }, { 0, } }; MODULE_DEVICE_TABLE(pci, zatm_pci_tbl); static struct pci_driver zatm_driver = { .name = DEV_LABEL, .id_table = zatm_pci_tbl, .probe = zatm_init_one, }; static int __init zatm_init_module(void) { return pci_register_driver(&zatm_driver); } module_init(zatm_init_module); /* module_exit not defined so not unloadable */
gpl-2.0
Jaegerjaquez69/android_kernel_ls840_zvk
arch/xtensa/kernel/syscall.c
8561
1502
/* * arch/xtensa/kernel/syscall.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2000 Silicon Graphics, Inc. * Copyright (C) 1995 - 2000 by Ralf Baechle * * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> * Chris Zankel <chris@zankel.net> * Kevin Chea * */ #include <asm/uaccess.h> #include <asm/syscall.h> #include <asm/unistd.h> #include <linux/linkage.h> #include <linux/stringify.h> #include <linux/errno.h> #include <linux/syscalls.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/mman.h> #include <linux/shm.h> typedef void (*syscall_t)(void); syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= { [0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall, #undef __SYSCALL #define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol, #undef _XTENSA_UNISTD_H #undef __KERNEL_SYSCALLS__ #include <asm/unistd.h> }; asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) { unsigned long ret; long err; err = do_shmat(shmid, shmaddr, shmflg, &ret); if (err) return err; return (long)ret; } asmlinkage long xtensa_fadvise64_64(int fd, int advice, unsigned long long offset, unsigned long long len) { return sys_fadvise64_64(fd, offset, len, advice); }
gpl-2.0
mrwargod/boeffla-kernel-slimversion-bacon
drivers/macintosh/windfarm_core.c
8817
11284
/* * Windfarm PowerMac thermal control. Core * * (c) Copyright 2005 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * Released under the term of the GNU GPL v2. * * This core code tracks the list of sensors & controls, register * clients, and holds the kernel thread used for control. * * TODO: * * Add some information about sensor/control type and data format to * sensors/controls, and have the sysfs attribute stuff be moved * generically here instead of hard coded in the platform specific * driver as it us currently * * This however requires solving some annoying lifetime issues with * sysfs which doesn't seem to have lifetime rules for struct attribute, * I may have to create full features kobjects for every sensor/control * instead which is a bit of an overkill imho */ #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/jiffies.h> #include <linux/reboot.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/freezer.h> #include <asm/prom.h> #include "windfarm.h" #define VERSION "0.2" #undef DEBUG #ifdef DEBUG #define DBG(args...) printk(args) #else #define DBG(args...) do { } while(0) #endif static LIST_HEAD(wf_controls); static LIST_HEAD(wf_sensors); static DEFINE_MUTEX(wf_lock); static BLOCKING_NOTIFIER_HEAD(wf_client_list); static int wf_client_count; static unsigned int wf_overtemp; static unsigned int wf_overtemp_counter; struct task_struct *wf_thread; static struct platform_device wf_platform_device = { .name = "windfarm", }; /* * Utilities & tick thread */ static inline void wf_notify(int event, void *param) { blocking_notifier_call_chain(&wf_client_list, event, param); } int wf_critical_overtemp(void) { static char * critical_overtemp_path = "/sbin/critical_overtemp"; char *argv[] = { critical_overtemp_path, NULL }; static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; return call_usermodehelper(critical_overtemp_path, argv, envp, UMH_WAIT_EXEC); } EXPORT_SYMBOL_GPL(wf_critical_overtemp); static int wf_thread_func(void *data) { unsigned long next, delay; next = jiffies; DBG("wf: thread started\n"); set_freezable(); while (!kthread_should_stop()) { try_to_freeze(); if (time_after_eq(jiffies, next)) { wf_notify(WF_EVENT_TICK, NULL); if (wf_overtemp) { wf_overtemp_counter++; /* 10 seconds overtemp, notify userland */ if (wf_overtemp_counter > 10) wf_critical_overtemp(); /* 30 seconds, shutdown */ if (wf_overtemp_counter > 30) { printk(KERN_ERR "windfarm: Overtemp " "for more than 30" " seconds, shutting down\n"); machine_power_off(); } } next += HZ; } delay = next - jiffies; if (delay <= HZ) schedule_timeout_interruptible(delay); } DBG("wf: thread stopped\n"); return 0; } static void wf_start_thread(void) { wf_thread = kthread_run(wf_thread_func, NULL, "kwindfarm"); if (IS_ERR(wf_thread)) { printk(KERN_ERR "windfarm: failed to create thread,err %ld\n", PTR_ERR(wf_thread)); wf_thread = NULL; } } static void wf_stop_thread(void) { if (wf_thread) kthread_stop(wf_thread); wf_thread = NULL; } /* * Controls */ static void wf_control_release(struct kref *kref) { struct wf_control *ct = container_of(kref, struct wf_control, ref); DBG("wf: Deleting control %s\n", ct->name); if (ct->ops && ct->ops->release) ct->ops->release(ct); else kfree(ct); } static ssize_t wf_show_control(struct device *dev, struct device_attribute *attr, char *buf) { struct wf_control *ctrl = container_of(attr, struct wf_control, attr); s32 val = 0; int err; err = ctrl->ops->get_value(ctrl, &val); if (err < 0) return err; return sprintf(buf, "%d\n", val); } /* This is really only for debugging... */ static ssize_t wf_store_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct wf_control *ctrl = container_of(attr, struct wf_control, attr); int val; int err; char *endp; val = simple_strtoul(buf, &endp, 0); while (endp < buf + count && (*endp == ' ' || *endp == '\n')) ++endp; if (endp - buf < count) return -EINVAL; err = ctrl->ops->set_value(ctrl, val); if (err < 0) return err; return count; } int wf_register_control(struct wf_control *new_ct) { struct wf_control *ct; mutex_lock(&wf_lock); list_for_each_entry(ct, &wf_controls, link) { if (!strcmp(ct->name, new_ct->name)) { printk(KERN_WARNING "windfarm: trying to register" " duplicate control %s\n", ct->name); mutex_unlock(&wf_lock); return -EEXIST; } } kref_init(&new_ct->ref); list_add(&new_ct->link, &wf_controls); sysfs_attr_init(&new_ct->attr.attr); new_ct->attr.attr.name = new_ct->name; new_ct->attr.attr.mode = 0644; new_ct->attr.show = wf_show_control; new_ct->attr.store = wf_store_control; if (device_create_file(&wf_platform_device.dev, &new_ct->attr)) printk(KERN_WARNING "windfarm: device_create_file failed" " for %s\n", new_ct->name); /* the subsystem still does useful work without the file */ DBG("wf: Registered control %s\n", new_ct->name); wf_notify(WF_EVENT_NEW_CONTROL, new_ct); mutex_unlock(&wf_lock); return 0; } EXPORT_SYMBOL_GPL(wf_register_control); void wf_unregister_control(struct wf_control *ct) { mutex_lock(&wf_lock); list_del(&ct->link); mutex_unlock(&wf_lock); DBG("wf: Unregistered control %s\n", ct->name); kref_put(&ct->ref, wf_control_release); } EXPORT_SYMBOL_GPL(wf_unregister_control); struct wf_control * wf_find_control(const char *name) { struct wf_control *ct; mutex_lock(&wf_lock); list_for_each_entry(ct, &wf_controls, link) { if (!strcmp(ct->name, name)) { if (wf_get_control(ct)) ct = NULL; mutex_unlock(&wf_lock); return ct; } } mutex_unlock(&wf_lock); return NULL; } EXPORT_SYMBOL_GPL(wf_find_control); int wf_get_control(struct wf_control *ct) { if (!try_module_get(ct->ops->owner)) return -ENODEV; kref_get(&ct->ref); return 0; } EXPORT_SYMBOL_GPL(wf_get_control); void wf_put_control(struct wf_control *ct) { struct module *mod = ct->ops->owner; kref_put(&ct->ref, wf_control_release); module_put(mod); } EXPORT_SYMBOL_GPL(wf_put_control); /* * Sensors */ static void wf_sensor_release(struct kref *kref) { struct wf_sensor *sr = container_of(kref, struct wf_sensor, ref); DBG("wf: Deleting sensor %s\n", sr->name); if (sr->ops && sr->ops->release) sr->ops->release(sr); else kfree(sr); } static ssize_t wf_show_sensor(struct device *dev, struct device_attribute *attr, char *buf) { struct wf_sensor *sens = container_of(attr, struct wf_sensor, attr); s32 val = 0; int err; err = sens->ops->get_value(sens, &val); if (err < 0) return err; return sprintf(buf, "%d.%03d\n", FIX32TOPRINT(val)); } int wf_register_sensor(struct wf_sensor *new_sr) { struct wf_sensor *sr; mutex_lock(&wf_lock); list_for_each_entry(sr, &wf_sensors, link) { if (!strcmp(sr->name, new_sr->name)) { printk(KERN_WARNING "windfarm: trying to register" " duplicate sensor %s\n", sr->name); mutex_unlock(&wf_lock); return -EEXIST; } } kref_init(&new_sr->ref); list_add(&new_sr->link, &wf_sensors); sysfs_attr_init(&new_sr->attr.attr); new_sr->attr.attr.name = new_sr->name; new_sr->attr.attr.mode = 0444; new_sr->attr.show = wf_show_sensor; new_sr->attr.store = NULL; if (device_create_file(&wf_platform_device.dev, &new_sr->attr)) printk(KERN_WARNING "windfarm: device_create_file failed" " for %s\n", new_sr->name); /* the subsystem still does useful work without the file */ DBG("wf: Registered sensor %s\n", new_sr->name); wf_notify(WF_EVENT_NEW_SENSOR, new_sr); mutex_unlock(&wf_lock); return 0; } EXPORT_SYMBOL_GPL(wf_register_sensor); void wf_unregister_sensor(struct wf_sensor *sr) { mutex_lock(&wf_lock); list_del(&sr->link); mutex_unlock(&wf_lock); DBG("wf: Unregistered sensor %s\n", sr->name); wf_put_sensor(sr); } EXPORT_SYMBOL_GPL(wf_unregister_sensor); struct wf_sensor * wf_find_sensor(const char *name) { struct wf_sensor *sr; mutex_lock(&wf_lock); list_for_each_entry(sr, &wf_sensors, link) { if (!strcmp(sr->name, name)) { if (wf_get_sensor(sr)) sr = NULL; mutex_unlock(&wf_lock); return sr; } } mutex_unlock(&wf_lock); return NULL; } EXPORT_SYMBOL_GPL(wf_find_sensor); int wf_get_sensor(struct wf_sensor *sr) { if (!try_module_get(sr->ops->owner)) return -ENODEV; kref_get(&sr->ref); return 0; } EXPORT_SYMBOL_GPL(wf_get_sensor); void wf_put_sensor(struct wf_sensor *sr) { struct module *mod = sr->ops->owner; kref_put(&sr->ref, wf_sensor_release); module_put(mod); } EXPORT_SYMBOL_GPL(wf_put_sensor); /* * Client & notification */ int wf_register_client(struct notifier_block *nb) { int rc; struct wf_control *ct; struct wf_sensor *sr; mutex_lock(&wf_lock); rc = blocking_notifier_chain_register(&wf_client_list, nb); if (rc != 0) goto bail; wf_client_count++; list_for_each_entry(ct, &wf_controls, link) wf_notify(WF_EVENT_NEW_CONTROL, ct); list_for_each_entry(sr, &wf_sensors, link) wf_notify(WF_EVENT_NEW_SENSOR, sr); if (wf_client_count == 1) wf_start_thread(); bail: mutex_unlock(&wf_lock); return rc; } EXPORT_SYMBOL_GPL(wf_register_client); int wf_unregister_client(struct notifier_block *nb) { mutex_lock(&wf_lock); blocking_notifier_chain_unregister(&wf_client_list, nb); wf_client_count++; if (wf_client_count == 0) wf_stop_thread(); mutex_unlock(&wf_lock); return 0; } EXPORT_SYMBOL_GPL(wf_unregister_client); void wf_set_overtemp(void) { mutex_lock(&wf_lock); wf_overtemp++; if (wf_overtemp == 1) { printk(KERN_WARNING "windfarm: Overtemp condition detected !\n"); wf_overtemp_counter = 0; wf_notify(WF_EVENT_OVERTEMP, NULL); } mutex_unlock(&wf_lock); } EXPORT_SYMBOL_GPL(wf_set_overtemp); void wf_clear_overtemp(void) { mutex_lock(&wf_lock); WARN_ON(wf_overtemp == 0); if (wf_overtemp == 0) { mutex_unlock(&wf_lock); return; } wf_overtemp--; if (wf_overtemp == 0) { printk(KERN_WARNING "windfarm: Overtemp condition cleared !\n"); wf_notify(WF_EVENT_NORMALTEMP, NULL); } mutex_unlock(&wf_lock); } EXPORT_SYMBOL_GPL(wf_clear_overtemp); int wf_is_overtemp(void) { return (wf_overtemp != 0); } EXPORT_SYMBOL_GPL(wf_is_overtemp); static int __init windfarm_core_init(void) { DBG("wf: core loaded\n"); /* Don't register on old machines that use therm_pm72 for now */ if (of_machine_is_compatible("PowerMac7,2") || of_machine_is_compatible("PowerMac7,3") || of_machine_is_compatible("RackMac3,1")) return -ENODEV; platform_device_register(&wf_platform_device); return 0; } static void __exit windfarm_core_exit(void) { BUG_ON(wf_client_count != 0); DBG("wf: core unloaded\n"); platform_device_unregister(&wf_platform_device); } module_init(windfarm_core_init); module_exit(windfarm_core_exit); MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); MODULE_DESCRIPTION("Core component of PowerMac thermal control"); MODULE_LICENSE("GPL");
gpl-2.0
SkrilaxCZ/android_kernel_moto_asanti_c
sound/pci/au88x0/au88x0_synth.c
9841
11076
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* * Someday its supposed to make use of the WT DMA engine * for a Wavetable synthesizer. */ #include "au88x0.h" #include "au88x0_wt.h" static void vortex_fifo_setwtvalid(vortex_t * vortex, int fifo, int en); static void vortex_connection_adb_mixin(vortex_t * vortex, int en, unsigned char channel, unsigned char source, unsigned char mixin); static void vortex_connection_mixin_mix(vortex_t * vortex, int en, unsigned char mixin, unsigned char mix, int a); static void vortex_fifo_wtinitialize(vortex_t * vortex, int fifo, int j); static int vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt, u32 val); /* WT */ /* Put 2 WT channels together for one stereo interlaced channel. */ static void vortex_wt_setstereo(vortex_t * vortex, u32 wt, u32 stereo) { int temp; //temp = hwread(vortex->mmio, 0x80 + ((wt >> 0x5)<< 0xf) + (((wt & 0x1f) >> 1) << 2)); temp = hwread(vortex->mmio, WT_STEREO(wt)); temp = (temp & 0xfe) | (stereo & 1); //hwwrite(vortex->mmio, 0x80 + ((wt >> 0x5)<< 0xf) + (((wt & 0x1f) >> 1) << 2), temp); hwwrite(vortex->mmio, WT_STEREO(wt), temp); } /* Join to mixdown route. */ static void vortex_wt_setdsout(vortex_t * vortex, u32 wt, int en) { int temp; /* There is one DSREG register for each bank (32 voices each). */ temp = hwread(vortex->mmio, WT_DSREG((wt >= 0x20) ? 1 : 0)); if (en) temp |= (1 << (wt & 0x1f)); else temp &= (1 << ~(wt & 0x1f)); hwwrite(vortex->mmio, WT_DSREG((wt >= 0x20) ? 1 : 0), temp); } /* Setup WT route. */ static int vortex_wt_allocroute(vortex_t * vortex, int wt, int nr_ch) { wt_voice_t *voice = &(vortex->wt_voice[wt]); int temp; //FIXME: WT audio routing. if (nr_ch) { vortex_fifo_wtinitialize(vortex, wt, 1); vortex_fifo_setwtvalid(vortex, wt, 1); vortex_wt_setstereo(vortex, wt, nr_ch - 1); } else vortex_fifo_setwtvalid(vortex, wt, 0); /* Set mixdown mode. */ vortex_wt_setdsout(vortex, wt, 1); /* Set other parameter registers. */ hwwrite(vortex->mmio, WT_SRAMP(0), 0x880000); //hwwrite(vortex->mmio, WT_GMODE(0), 0xffffffff); #ifdef CHIP_AU8830 hwwrite(vortex->mmio, WT_SRAMP(1), 0x880000); //hwwrite(vortex->mmio, WT_GMODE(1), 0xffffffff); #endif hwwrite(vortex->mmio, WT_PARM(wt, 0), 0); hwwrite(vortex->mmio, WT_PARM(wt, 1), 0); hwwrite(vortex->mmio, WT_PARM(wt, 2), 0); temp = hwread(vortex->mmio, WT_PARM(wt, 3)); printk(KERN_DEBUG "vortex: WT PARM3: %x\n", temp); //hwwrite(vortex->mmio, WT_PARM(wt, 3), temp); hwwrite(vortex->mmio, WT_DELAY(wt, 0), 0); hwwrite(vortex->mmio, WT_DELAY(wt, 1), 0); hwwrite(vortex->mmio, WT_DELAY(wt, 2), 0); hwwrite(vortex->mmio, WT_DELAY(wt, 3), 0); printk(KERN_DEBUG "vortex: WT GMODE: %x\n", hwread(vortex->mmio, WT_GMODE(wt))); hwwrite(vortex->mmio, WT_PARM(wt, 2), 0xffffffff); hwwrite(vortex->mmio, WT_PARM(wt, 3), 0xcff1c810); voice->parm0 = voice->parm1 = 0xcfb23e2f; hwwrite(vortex->mmio, WT_PARM(wt, 0), voice->parm0); hwwrite(vortex->mmio, WT_PARM(wt, 1), voice->parm1); printk(KERN_DEBUG "vortex: WT GMODE 2 : %x\n", hwread(vortex->mmio, WT_GMODE(wt))); return 0; } static void vortex_wt_connect(vortex_t * vortex, int en) { int i, ii, mix; #define NR_WTROUTES 6 #ifdef CHIP_AU8830 #define NR_WTBLOCKS 2 #else #define NR_WTBLOCKS 1 #endif for (i = 0; i < NR_WTBLOCKS; i++) { for (ii = 0; ii < NR_WTROUTES; ii++) { mix = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXIN); vortex->mixwt[(i * NR_WTROUTES) + ii] = mix; vortex_route(vortex, en, 0x11, ADB_WTOUT(i, ii + 0x20), ADB_MIXIN(mix)); vortex_connection_mixin_mix(vortex, en, mix, vortex->mixplayb[ii % 2], 0); if (VORTEX_IS_QUAD(vortex)) vortex_connection_mixin_mix(vortex, en, mix, vortex->mixplayb[2 + (ii % 2)], 0); } } for (i = 0; i < NR_WT; i++) { hwwrite(vortex->mmio, WT_RUN(i), 1); } } /* Read WT Register */ #if 0 static int vortex_wt_GetReg(vortex_t * vortex, char reg, int wt) { //int eax, esi; if (reg == 4) { return hwread(vortex->mmio, WT_PARM(wt, 3)); } if (reg == 7) { return hwread(vortex->mmio, WT_GMODE(wt)); } return 0; } /* WT hardware abstraction layer generic register interface. */ static int vortex_wt_SetReg2(vortex_t * vortex, unsigned char reg, int wt, u16 val) { /* int eax, edx; if (wt >= NR_WT) // 0x40 -> NR_WT return 0; if ((reg - 0x20) > 0) { if ((reg - 0x21) != 0) return 0; eax = ((((b & 0xff) << 0xb) + (edx & 0xff)) << 4) + 0x208; // param 2 } else { eax = ((((b & 0xff) << 0xb) + (edx & 0xff)) << 4) + 0x20a; // param 3 } hwwrite(vortex->mmio, eax, c); */ return 1; } /*public: static void __thiscall CWTHal::SetReg(unsigned char,int,unsigned long) */ #endif static int vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt, u32 val) { int ecx; if ((reg == 5) || ((reg >= 7) && (reg <= 10)) || (reg == 0xc)) { if (wt >= (NR_WT / NR_WT_PB)) { printk ("vortex: WT SetReg: bank out of range. reg=0x%x, wt=%d\n", reg, wt); return 0; } } else { if (wt >= NR_WT) { printk(KERN_ERR "vortex: WT SetReg: voice out of range\n"); return 0; } } if (reg > 0xc) return 0; switch (reg) { /* Voice specific parameters */ case 0: /* running */ /* printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n", WT_RUN(wt), (int)val); */ hwwrite(vortex->mmio, WT_RUN(wt), val); return 0xc; break; case 1: /* param 0 */ /* printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n", WT_PARM(wt,0), (int)val); */ hwwrite(vortex->mmio, WT_PARM(wt, 0), val); return 0xc; break; case 2: /* param 1 */ /* printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n", WT_PARM(wt,1), (int)val); */ hwwrite(vortex->mmio, WT_PARM(wt, 1), val); return 0xc; break; case 3: /* param 2 */ /* printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n", WT_PARM(wt,2), (int)val); */ hwwrite(vortex->mmio, WT_PARM(wt, 2), val); return 0xc; break; case 4: /* param 3 */ /* printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n", WT_PARM(wt,3), (int)val); */ hwwrite(vortex->mmio, WT_PARM(wt, 3), val); return 0xc; break; case 6: /* mute */ /* printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n", WT_MUTE(wt), (int)val); */ hwwrite(vortex->mmio, WT_MUTE(wt), val); return 0xc; break; case 0xb: { /* delay */ /* printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n", WT_DELAY(wt,0), (int)val); */ hwwrite(vortex->mmio, WT_DELAY(wt, 3), val); hwwrite(vortex->mmio, WT_DELAY(wt, 2), val); hwwrite(vortex->mmio, WT_DELAY(wt, 1), val); hwwrite(vortex->mmio, WT_DELAY(wt, 0), val); return 0xc; } break; /* Global WT block parameters */ case 5: /* sramp */ ecx = WT_SRAMP(wt); break; case 8: /* aramp */ ecx = WT_ARAMP(wt); break; case 9: /* mramp */ ecx = WT_MRAMP(wt); break; case 0xa: /* ctrl */ ecx = WT_CTRL(wt); break; case 0xc: /* ds_reg */ ecx = WT_DSREG(wt); break; default: return 0; break; } /* printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n", ecx, (int)val); */ hwwrite(vortex->mmio, ecx, val); return 1; } static void vortex_wt_init(vortex_t * vortex) { u32 var4, var8, varc, var10 = 0, edi; var10 &= 0xFFFFFFE3; var10 |= 0x22; var10 &= 0xFFFFFEBF; var10 |= 0x80; var10 |= 0x200; var10 &= 0xfffffffe; var10 &= 0xfffffbff; var10 |= 0x1800; // var10 = 0x1AA2 var4 = 0x10000000; varc = 0x00830000; var8 = 0x00830000; /* Init Bank registers. */ for (edi = 0; edi < (NR_WT / NR_WT_PB); edi++) { vortex_wt_SetReg(vortex, 0xc, edi, 0); /* ds_reg */ vortex_wt_SetReg(vortex, 0xa, edi, var10); /* ctrl */ vortex_wt_SetReg(vortex, 0x9, edi, var4); /* mramp */ vortex_wt_SetReg(vortex, 0x8, edi, varc); /* aramp */ vortex_wt_SetReg(vortex, 0x5, edi, var8); /* sramp */ } /* Init Voice registers. */ for (edi = 0; edi < NR_WT; edi++) { vortex_wt_SetReg(vortex, 0x4, edi, 0); /* param 3 0x20c */ vortex_wt_SetReg(vortex, 0x3, edi, 0); /* param 2 0x208 */ vortex_wt_SetReg(vortex, 0x2, edi, 0); /* param 1 0x204 */ vortex_wt_SetReg(vortex, 0x1, edi, 0); /* param 0 0x200 */ vortex_wt_SetReg(vortex, 0xb, edi, 0); /* delay 0x400 - 0x40c */ } var10 |= 1; for (edi = 0; edi < (NR_WT / NR_WT_PB); edi++) vortex_wt_SetReg(vortex, 0xa, edi, var10); /* ctrl */ } /* Extract of CAdbTopology::SetVolume(struct _ASPVOLUME *) */ #if 0 static void vortex_wt_SetVolume(vortex_t * vortex, int wt, int vol[]) { wt_voice_t *voice = &(vortex->wt_voice[wt]); int ecx = vol[1], eax = vol[0]; /* This is pure guess */ voice->parm0 &= 0xff00ffff; voice->parm0 |= (vol[0] & 0xff) << 0x10; voice->parm1 &= 0xff00ffff; voice->parm1 |= (vol[1] & 0xff) << 0x10; /* This is real */ hwwrite(vortex, WT_PARM(wt, 0), voice->parm0); hwwrite(vortex, WT_PARM(wt, 1), voice->parm0); if (voice->this_1D0 & 4) { eax >>= 8; ecx = eax; if (ecx < 0x80) ecx = 0x7f; voice->parm3 &= 0xFFFFC07F; voice->parm3 |= (ecx & 0x7f) << 7; voice->parm3 &= 0xFFFFFF80; voice->parm3 |= (eax & 0x7f); } else { voice->parm3 &= 0xFFE03FFF; voice->parm3 |= (eax & 0xFE00) << 5; } hwwrite(vortex, WT_PARM(wt, 3), voice->parm3); } /* Extract of CAdbTopology::SetFrequency(unsigned long arg_0) */ static void vortex_wt_SetFrequency(vortex_t * vortex, int wt, unsigned int sr) { wt_voice_t *voice = &(vortex->wt_voice[wt]); u32 eax, edx; //FIXME: 64 bit operation. eax = ((sr << 0xf) * 0x57619F1) & 0xffffffff; edx = (((sr << 0xf) * 0x57619F1)) >> 0x20; edx >>= 0xa; edx <<= 1; if (edx) { if (edx & 0x0FFF80000) eax = 0x7fff; else { edx <<= 0xd; eax = 7; while ((edx & 0x80000000) == 0) { edx <<= 1; eax--; if (eax == 0) break; } if (eax) edx <<= 1; eax <<= 0xc; edx >>= 0x14; eax |= edx; } } else eax = 0; voice->parm0 &= 0xffff0001; voice->parm0 |= (eax & 0x7fff) << 1; voice->parm1 = voice->parm0 | 1; // Wt: this_1D4 //AuWt::WriteReg((ulong)(this_1DC<<4)+0x200, (ulong)this_1E4); //AuWt::WriteReg((ulong)(this_1DC<<4)+0x204, (ulong)this_1E8); hwwrite(vortex->mmio, WT_PARM(wt, 0), voice->parm0); hwwrite(vortex->mmio, WT_PARM(wt, 1), voice->parm1); } #endif /* End of File */
gpl-2.0
LiquidSmokeX64/android_kernel_oneplus_msm8974-UR3.6
arch/sh/boards/mach-sh03/rtc.c
12145
3987
/* * linux/arch/sh/boards/sh03/rtc.c -- CTP/PCI-SH03 on-chip RTC support * * Copyright (C) 2004 Saito.K & Jeanne(ksaito@interface.co.jp) * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/bcd.h> #include <linux/rtc.h> #include <linux/spinlock.h> #include <asm/io.h> #include <asm/rtc.h> #define RTC_BASE 0xb0000000 #define RTC_SEC1 (RTC_BASE + 0) #define RTC_SEC10 (RTC_BASE + 1) #define RTC_MIN1 (RTC_BASE + 2) #define RTC_MIN10 (RTC_BASE + 3) #define RTC_HOU1 (RTC_BASE + 4) #define RTC_HOU10 (RTC_BASE + 5) #define RTC_WEE1 (RTC_BASE + 6) #define RTC_DAY1 (RTC_BASE + 7) #define RTC_DAY10 (RTC_BASE + 8) #define RTC_MON1 (RTC_BASE + 9) #define RTC_MON10 (RTC_BASE + 10) #define RTC_YEA1 (RTC_BASE + 11) #define RTC_YEA10 (RTC_BASE + 12) #define RTC_YEA100 (RTC_BASE + 13) #define RTC_YEA1000 (RTC_BASE + 14) #define RTC_CTL (RTC_BASE + 15) #define RTC_BUSY 1 #define RTC_STOP 2 static DEFINE_SPINLOCK(sh03_rtc_lock); unsigned long get_cmos_time(void) { unsigned int year, mon, day, hour, min, sec; spin_lock(&sh03_rtc_lock); again: do { sec = (__raw_readb(RTC_SEC1) & 0xf) + (__raw_readb(RTC_SEC10) & 0x7) * 10; min = (__raw_readb(RTC_MIN1) & 0xf) + (__raw_readb(RTC_MIN10) & 0xf) * 10; hour = (__raw_readb(RTC_HOU1) & 0xf) + (__raw_readb(RTC_HOU10) & 0xf) * 10; day = (__raw_readb(RTC_DAY1) & 0xf) + (__raw_readb(RTC_DAY10) & 0xf) * 10; mon = (__raw_readb(RTC_MON1) & 0xf) + (__raw_readb(RTC_MON10) & 0xf) * 10; year = (__raw_readb(RTC_YEA1) & 0xf) + (__raw_readb(RTC_YEA10) & 0xf) * 10 + (__raw_readb(RTC_YEA100 ) & 0xf) * 100 + (__raw_readb(RTC_YEA1000) & 0xf) * 1000; } while (sec != (__raw_readb(RTC_SEC1) & 0xf) + (__raw_readb(RTC_SEC10) & 0x7) * 10); if (year == 0 || mon < 1 || mon > 12 || day > 31 || day < 1 || hour > 23 || min > 59 || sec > 59) { printk(KERN_ERR "SH-03 RTC: invalid value, resetting to 1 Jan 2000\n"); printk("year=%d, mon=%d, day=%d, hour=%d, min=%d, sec=%d\n", year, mon, day, hour, min, sec); __raw_writeb(0, RTC_SEC1); __raw_writeb(0, RTC_SEC10); __raw_writeb(0, RTC_MIN1); __raw_writeb(0, RTC_MIN10); __raw_writeb(0, RTC_HOU1); __raw_writeb(0, RTC_HOU10); __raw_writeb(6, RTC_WEE1); __raw_writeb(1, RTC_DAY1); __raw_writeb(0, RTC_DAY10); __raw_writeb(1, RTC_MON1); __raw_writeb(0, RTC_MON10); __raw_writeb(0, RTC_YEA1); __raw_writeb(0, RTC_YEA10); __raw_writeb(0, RTC_YEA100); __raw_writeb(2, RTC_YEA1000); __raw_writeb(0, RTC_CTL); goto again; } spin_unlock(&sh03_rtc_lock); return mktime(year, mon, day, hour, min, sec); } void sh03_rtc_gettimeofday(struct timespec *tv) { tv->tv_sec = get_cmos_time(); tv->tv_nsec = 0; } static int set_rtc_mmss(unsigned long nowtime) { int retval = 0; int real_seconds, real_minutes, cmos_minutes; int i; /* gets recalled with irq locally disabled */ spin_lock(&sh03_rtc_lock); for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ if (!(__raw_readb(RTC_CTL) & RTC_BUSY)) break; cmos_minutes = (__raw_readb(RTC_MIN1) & 0xf) + (__raw_readb(RTC_MIN10) & 0xf) * 10; real_seconds = nowtime % 60; real_minutes = nowtime / 60; if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) real_minutes += 30; /* correct for half hour time zone */ real_minutes %= 60; if (abs(real_minutes - cmos_minutes) < 30) { __raw_writeb(real_seconds % 10, RTC_SEC1); __raw_writeb(real_seconds / 10, RTC_SEC10); __raw_writeb(real_minutes % 10, RTC_MIN1); __raw_writeb(real_minutes / 10, RTC_MIN10); } else { printk_once(KERN_NOTICE "set_rtc_mmss: can't update from %d to %d\n", cmos_minutes, real_minutes); retval = -1; } spin_unlock(&sh03_rtc_lock); return retval; } int sh03_rtc_settimeofday(const time_t secs) { unsigned long nowtime = secs; return set_rtc_mmss(nowtime); } void sh03_time_init(void) { rtc_sh_get_time = sh03_rtc_gettimeofday; rtc_sh_set_time = sh03_rtc_settimeofday; }
gpl-2.0
OpenELEC/linux
arch/sh/boards/mach-sh03/rtc.c
12145
3987
/* * linux/arch/sh/boards/sh03/rtc.c -- CTP/PCI-SH03 on-chip RTC support * * Copyright (C) 2004 Saito.K & Jeanne(ksaito@interface.co.jp) * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/bcd.h> #include <linux/rtc.h> #include <linux/spinlock.h> #include <asm/io.h> #include <asm/rtc.h> #define RTC_BASE 0xb0000000 #define RTC_SEC1 (RTC_BASE + 0) #define RTC_SEC10 (RTC_BASE + 1) #define RTC_MIN1 (RTC_BASE + 2) #define RTC_MIN10 (RTC_BASE + 3) #define RTC_HOU1 (RTC_BASE + 4) #define RTC_HOU10 (RTC_BASE + 5) #define RTC_WEE1 (RTC_BASE + 6) #define RTC_DAY1 (RTC_BASE + 7) #define RTC_DAY10 (RTC_BASE + 8) #define RTC_MON1 (RTC_BASE + 9) #define RTC_MON10 (RTC_BASE + 10) #define RTC_YEA1 (RTC_BASE + 11) #define RTC_YEA10 (RTC_BASE + 12) #define RTC_YEA100 (RTC_BASE + 13) #define RTC_YEA1000 (RTC_BASE + 14) #define RTC_CTL (RTC_BASE + 15) #define RTC_BUSY 1 #define RTC_STOP 2 static DEFINE_SPINLOCK(sh03_rtc_lock); unsigned long get_cmos_time(void) { unsigned int year, mon, day, hour, min, sec; spin_lock(&sh03_rtc_lock); again: do { sec = (__raw_readb(RTC_SEC1) & 0xf) + (__raw_readb(RTC_SEC10) & 0x7) * 10; min = (__raw_readb(RTC_MIN1) & 0xf) + (__raw_readb(RTC_MIN10) & 0xf) * 10; hour = (__raw_readb(RTC_HOU1) & 0xf) + (__raw_readb(RTC_HOU10) & 0xf) * 10; day = (__raw_readb(RTC_DAY1) & 0xf) + (__raw_readb(RTC_DAY10) & 0xf) * 10; mon = (__raw_readb(RTC_MON1) & 0xf) + (__raw_readb(RTC_MON10) & 0xf) * 10; year = (__raw_readb(RTC_YEA1) & 0xf) + (__raw_readb(RTC_YEA10) & 0xf) * 10 + (__raw_readb(RTC_YEA100 ) & 0xf) * 100 + (__raw_readb(RTC_YEA1000) & 0xf) * 1000; } while (sec != (__raw_readb(RTC_SEC1) & 0xf) + (__raw_readb(RTC_SEC10) & 0x7) * 10); if (year == 0 || mon < 1 || mon > 12 || day > 31 || day < 1 || hour > 23 || min > 59 || sec > 59) { printk(KERN_ERR "SH-03 RTC: invalid value, resetting to 1 Jan 2000\n"); printk("year=%d, mon=%d, day=%d, hour=%d, min=%d, sec=%d\n", year, mon, day, hour, min, sec); __raw_writeb(0, RTC_SEC1); __raw_writeb(0, RTC_SEC10); __raw_writeb(0, RTC_MIN1); __raw_writeb(0, RTC_MIN10); __raw_writeb(0, RTC_HOU1); __raw_writeb(0, RTC_HOU10); __raw_writeb(6, RTC_WEE1); __raw_writeb(1, RTC_DAY1); __raw_writeb(0, RTC_DAY10); __raw_writeb(1, RTC_MON1); __raw_writeb(0, RTC_MON10); __raw_writeb(0, RTC_YEA1); __raw_writeb(0, RTC_YEA10); __raw_writeb(0, RTC_YEA100); __raw_writeb(2, RTC_YEA1000); __raw_writeb(0, RTC_CTL); goto again; } spin_unlock(&sh03_rtc_lock); return mktime(year, mon, day, hour, min, sec); } void sh03_rtc_gettimeofday(struct timespec *tv) { tv->tv_sec = get_cmos_time(); tv->tv_nsec = 0; } static int set_rtc_mmss(unsigned long nowtime) { int retval = 0; int real_seconds, real_minutes, cmos_minutes; int i; /* gets recalled with irq locally disabled */ spin_lock(&sh03_rtc_lock); for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ if (!(__raw_readb(RTC_CTL) & RTC_BUSY)) break; cmos_minutes = (__raw_readb(RTC_MIN1) & 0xf) + (__raw_readb(RTC_MIN10) & 0xf) * 10; real_seconds = nowtime % 60; real_minutes = nowtime / 60; if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) real_minutes += 30; /* correct for half hour time zone */ real_minutes %= 60; if (abs(real_minutes - cmos_minutes) < 30) { __raw_writeb(real_seconds % 10, RTC_SEC1); __raw_writeb(real_seconds / 10, RTC_SEC10); __raw_writeb(real_minutes % 10, RTC_MIN1); __raw_writeb(real_minutes / 10, RTC_MIN10); } else { printk_once(KERN_NOTICE "set_rtc_mmss: can't update from %d to %d\n", cmos_minutes, real_minutes); retval = -1; } spin_unlock(&sh03_rtc_lock); return retval; } int sh03_rtc_settimeofday(const time_t secs) { unsigned long nowtime = secs; return set_rtc_mmss(nowtime); } void sh03_time_init(void) { rtc_sh_get_time = sh03_rtc_gettimeofday; rtc_sh_set_time = sh03_rtc_settimeofday; }
gpl-2.0
NaokiXie/android_kernel_samsung_wilcox
sound/core/hwdep_compat.c
14961
2352
/* * 32bit -> 64bit ioctl wrapper for hwdep API * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* This file is included from hwdep.c */ #include <linux/compat.h> struct snd_hwdep_dsp_image32 { u32 index; unsigned char name[64]; u32 image; /* pointer */ u32 length; u32 driver_data; } /* don't set packed attribute here */; static int snd_hwdep_dsp_load_compat(struct snd_hwdep *hw, struct snd_hwdep_dsp_image32 __user *src) { struct snd_hwdep_dsp_image __user *dst; compat_caddr_t ptr; u32 val; dst = compat_alloc_user_space(sizeof(*dst)); /* index and name */ if (copy_in_user(dst, src, 4 + 64)) return -EFAULT; if (get_user(ptr, &src->image) || put_user(compat_ptr(ptr), &dst->image)) return -EFAULT; if (get_user(val, &src->length) || put_user(val, &dst->length)) return -EFAULT; if (get_user(val, &src->driver_data) || put_user(val, &dst->driver_data)) return -EFAULT; return snd_hwdep_dsp_load(hw, dst); } enum { SNDRV_HWDEP_IOCTL_DSP_LOAD32 = _IOW('H', 0x03, struct snd_hwdep_dsp_image32) }; static long snd_hwdep_ioctl_compat(struct file * file, unsigned int cmd, unsigned long arg) { struct snd_hwdep *hw = file->private_data; void __user *argp = compat_ptr(arg); switch (cmd) { case SNDRV_HWDEP_IOCTL_PVERSION: case SNDRV_HWDEP_IOCTL_INFO: case SNDRV_HWDEP_IOCTL_DSP_STATUS: return snd_hwdep_ioctl(file, cmd, (unsigned long)argp); case SNDRV_HWDEP_IOCTL_DSP_LOAD32: return snd_hwdep_dsp_load_compat(hw, argp); } if (hw->ops.ioctl_compat) return hw->ops.ioctl_compat(hw, file, cmd, arg); return -ENOIOCTLCMD; }
gpl-2.0
jiangliu/linux
drivers/gpu/drm/msm/adreno/adreno_gpu.c
114
10406
/* * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "adreno_gpu.h" #include "msm_gem.h" #include "msm_mmu.h" struct adreno_info { struct adreno_rev rev; uint32_t revn; const char *name; const char *pm4fw, *pfpfw; uint32_t gmem; }; #define ANY_ID 0xff static const struct adreno_info gpulist[] = { { .rev = ADRENO_REV(3, 0, 5, ANY_ID), .revn = 305, .name = "A305", .pm4fw = "a300_pm4.fw", .pfpfw = "a300_pfp.fw", .gmem = SZ_256K, }, { .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID), .revn = 320, .name = "A320", .pm4fw = "a300_pm4.fw", .pfpfw = "a300_pfp.fw", .gmem = SZ_512K, }, { .rev = ADRENO_REV(3, 3, 0, ANY_ID), .revn = 330, .name = "A330", .pm4fw = "a330_pm4.fw", .pfpfw = "a330_pfp.fw", .gmem = SZ_1M, }, }; MODULE_FIRMWARE("a300_pm4.fw"); MODULE_FIRMWARE("a300_pfp.fw"); MODULE_FIRMWARE("a330_pm4.fw"); MODULE_FIRMWARE("a330_pfp.fw"); #define RB_SIZE SZ_32K #define RB_BLKSIZE 16 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); switch (param) { case MSM_PARAM_GPU_ID: *value = adreno_gpu->info->revn; return 0; case MSM_PARAM_GMEM_SIZE: *value = adreno_gpu->gmem; return 0; case MSM_PARAM_CHIP_ID: *value = adreno_gpu->rev.patchid | (adreno_gpu->rev.minor << 8) | (adreno_gpu->rev.major << 16) | (adreno_gpu->rev.core << 24); return 0; default: DBG("%s: invalid param: %u", gpu->name, param); return -EINVAL; } } #define rbmemptr(adreno_gpu, member) \ ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member)) int adreno_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); DBG("%s", gpu->name); /* Setup REG_CP_RB_CNTL: */ gpu_write(gpu, REG_AXXX_CP_RB_CNTL, /* size is log2(quad-words): */ AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8))); /* Setup ringbuffer address: */ gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova); gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr)); /* Setup scratch/timestamp: */ gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence)); gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1); return 0; } static uint32_t get_wptr(struct msm_ringbuffer *ring) { return ring->cur - ring->start; } uint32_t adreno_last_fence(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); return adreno_gpu->memptrs->fence; } void adreno_recover(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct drm_device *dev = gpu->dev; int ret; gpu->funcs->pm_suspend(gpu); /* reset ringbuffer: */ gpu->rb->cur = gpu->rb->start; /* reset completed fence seqno, just discard anything pending: */ adreno_gpu->memptrs->fence = gpu->submitted_fence; adreno_gpu->memptrs->rptr = 0; adreno_gpu->memptrs->wptr = 0; gpu->funcs->pm_resume(gpu); ret = gpu->funcs->hw_init(gpu); if (ret) { dev_err(dev->dev, "gpu hw init failed: %d\n", ret); /* hmm, oh well? */ } } int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = gpu->rb; unsigned i, ibs = 0; for (i = 0; i < submit->nr_cmds; i++) { switch (submit->cmd[i].type) { case MSM_SUBMIT_CMD_IB_TARGET_BUF: /* ignore IB-targets */ break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ if (priv->lastctx == ctx) break; case MSM_SUBMIT_CMD_BUF: OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2); OUT_RING(ring, submit->cmd[i].iova); OUT_RING(ring, submit->cmd[i].size); ibs++; break; } } /* on a320, at least, we seem to need to pad things out to an * even number of qwords to avoid issue w/ CP hanging on wrap- * around: */ if (ibs % 2) OUT_PKT2(ring); OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); OUT_RING(ring, submit->fence); if (adreno_is_a3xx(adreno_gpu)) { /* Flush HLSQ lazy updates to make sure there is nothing * pending for indirect loads after the timestamp has * passed: */ OUT_PKT3(ring, CP_EVENT_WRITE, 1); OUT_RING(ring, HLSQ_FLUSH); OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); OUT_RING(ring, 0x00000000); } OUT_PKT3(ring, CP_EVENT_WRITE, 3); OUT_RING(ring, CACHE_FLUSH_TS); OUT_RING(ring, rbmemptr(adreno_gpu, fence)); OUT_RING(ring, submit->fence); /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ OUT_PKT3(ring, CP_INTERRUPT, 1); OUT_RING(ring, 0x80000000); #if 0 if (adreno_is_a3xx(adreno_gpu)) { /* Dummy set-constant to trigger context rollover */ OUT_PKT3(ring, CP_SET_CONSTANT, 2); OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG)); OUT_RING(ring, 0x00000000); } #endif gpu->funcs->flush(gpu); return 0; } void adreno_flush(struct msm_gpu *gpu) { uint32_t wptr = get_wptr(gpu->rb); /* ensure writes to ringbuffer have hit system memory: */ mb(); gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr); } void adreno_idle(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); uint32_t wptr = get_wptr(gpu->rb); /* wait for CP to drain ringbuffer: */ if (spin_until(adreno_gpu->memptrs->rptr == wptr)) DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name); /* TODO maybe we need to reset GPU here to recover from hang? */ } #ifdef CONFIG_DEBUG_FS void adreno_show(struct msm_gpu *gpu, struct seq_file *m) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); seq_printf(m, "revision: %d (%d.%d.%d.%d)\n", adreno_gpu->info->revn, adreno_gpu->rev.core, adreno_gpu->rev.major, adreno_gpu->rev.minor, adreno_gpu->rev.patchid); seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, gpu->submitted_fence); seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr); seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); } #endif /* would be nice to not have to duplicate the _show() stuff with printk(): */ void adreno_dump(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); printk("revision: %d (%d.%d.%d.%d)\n", adreno_gpu->info->revn, adreno_gpu->rev.core, adreno_gpu->rev.major, adreno_gpu->rev.minor, adreno_gpu->rev.patchid); printk("fence: %d/%d\n", adreno_gpu->memptrs->fence, gpu->submitted_fence); printk("rptr: %d\n", adreno_gpu->memptrs->rptr); printk("wptr: %d\n", adreno_gpu->memptrs->wptr); printk("rb wptr: %d\n", get_wptr(gpu->rb)); } static uint32_t ring_freewords(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); uint32_t size = gpu->rb->size / 4; uint32_t wptr = get_wptr(gpu->rb); uint32_t rptr = adreno_gpu->memptrs->rptr; return (rptr + (size - 1) - wptr) % size; } void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords) { if (spin_until(ring_freewords(gpu) >= ndwords)) DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name); } static const char *iommu_ports[] = { "gfx3d_user", "gfx3d_priv", "gfx3d1_user", "gfx3d1_priv", }; static inline bool _rev_match(uint8_t entry, uint8_t id) { return (entry == ANY_ID) || (entry == id); } int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, struct adreno_rev rev) { struct msm_mmu *mmu; int i, ret; /* identify gpu: */ for (i = 0; i < ARRAY_SIZE(gpulist); i++) { const struct adreno_info *info = &gpulist[i]; if (_rev_match(info->rev.core, rev.core) && _rev_match(info->rev.major, rev.major) && _rev_match(info->rev.minor, rev.minor) && _rev_match(info->rev.patchid, rev.patchid)) { gpu->info = info; gpu->revn = info->revn; break; } } if (i == ARRAY_SIZE(gpulist)) { dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n", rev.core, rev.major, rev.minor, rev.patchid); return -ENXIO; } DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name, rev.core, rev.major, rev.minor, rev.patchid); gpu->funcs = funcs; gpu->gmem = gpu->info->gmem; gpu->rev = rev; ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev); if (ret) { dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", gpu->info->pm4fw, ret); return ret; } ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev); if (ret) { dev_err(drm->dev, "failed to load %s PFP firmware: %d\n", gpu->info->pfpfw, ret); return ret; } ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base, gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq", RB_SIZE); if (ret) return ret; mmu = gpu->base.mmu; if (mmu) { ret = mmu->funcs->attach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) return ret; } gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs), MSM_BO_UNCACHED); if (IS_ERR(gpu->memptrs_bo)) { ret = PTR_ERR(gpu->memptrs_bo); gpu->memptrs_bo = NULL; dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); return ret; } gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo); if (!gpu->memptrs) { dev_err(drm->dev, "could not vmap memptrs\n"); return -ENOMEM; } ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id, &gpu->memptrs_iova); if (ret) { dev_err(drm->dev, "could not map memptrs: %d\n", ret); return ret; } return 0; } void adreno_gpu_cleanup(struct adreno_gpu *gpu) { if (gpu->memptrs_bo) { if (gpu->memptrs_iova) msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); drm_gem_object_unreference(gpu->memptrs_bo); } if (gpu->pm4) release_firmware(gpu->pm4); if (gpu->pfp) release_firmware(gpu->pfp); msm_gpu_cleanup(&gpu->base); }
gpl-2.0
vitek999/android_kernel_oukitel_orange
drivers/gpu/drm/radeon/atombios_dp.c
626
26611
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #include <drm/drmP.h> #include <drm/radeon_drm.h> #include "radeon.h" #include "atom.h" #include "atom-bits.h" #include <drm/drm_dp_helper.h> /* move these to drm_dp_helper.c/h */ #define DP_LINK_CONFIGURATION_SIZE 9 #define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE static char *voltage_names[] = { "0.4V", "0.6V", "0.8V", "1.2V" }; static char *pre_emph_names[] = { "0dB", "3.5dB", "6dB", "9.5dB" }; /***** radeon AUX functions *****/ /* Atom needs data in little endian format * so swap as appropriate when copying data to * or from atom. Note that atom operates on * dw units. */ void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) { #ifdef __BIG_ENDIAN u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ u32 *dst32, *src32; int i; memcpy(src_tmp, src, num_bytes); src32 = (u32 *)src_tmp; dst32 = (u32 *)dst_tmp; if (to_le) { for (i = 0; i < ((num_bytes + 3) / 4); i++) dst32[i] = cpu_to_le32(src32[i]); memcpy(dst, dst_tmp, num_bytes); } else { u8 dws = num_bytes & ~3; for (i = 0; i < ((num_bytes + 3) / 4); i++) dst32[i] = le32_to_cpu(src32[i]); memcpy(dst, dst_tmp, dws); if (num_bytes % 4) { for (i = 0; i < (num_bytes % 4); i++) dst[dws+i] = dst_tmp[dws+i]; } } #else memcpy(dst, src, num_bytes); #endif } union aux_channel_transaction { PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; }; static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *send, int send_bytes, u8 *recv, int recv_size, u8 delay, u8 *ack) { struct drm_device *dev = chan->dev; struct radeon_device *rdev = dev->dev_private; union aux_channel_transaction args; int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); unsigned char *base; int recv_bytes; memset(&args, 0, sizeof(args)); base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); radeon_atom_copy_swap(base, send, send_bytes, true); args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4)); args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4)); args.v1.ucDataOutLen = 0; args.v1.ucChannelID = chan->rec.i2c_id; args.v1.ucDelay = delay / 10; if (ASIC_IS_DCE4(rdev)) args.v2.ucHPD_ID = chan->rec.hpd; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); *ack = args.v1.ucReplyStatus; /* timeout */ if (args.v1.ucReplyStatus == 1) { DRM_DEBUG_KMS("dp_aux_ch timeout\n"); return -ETIMEDOUT; } /* flags not zero */ if (args.v1.ucReplyStatus == 2) { DRM_DEBUG_KMS("dp_aux_ch flags not zero\n"); return -EBUSY; } /* error */ if (args.v1.ucReplyStatus == 3) { DRM_DEBUG_KMS("dp_aux_ch error\n"); return -EIO; } recv_bytes = args.v1.ucDataOutLen; if (recv_bytes > recv_size) recv_bytes = recv_size; if (recv && recv_size) radeon_atom_copy_swap(recv, base + 16, recv_bytes, false); return recv_bytes; } static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, u16 address, u8 *send, u8 send_bytes, u8 delay) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; int ret; u8 msg[20]; int msg_bytes = send_bytes + 4; u8 ack; unsigned retry; if (send_bytes > 16) return -1; msg[0] = address; msg[1] = address >> 8; msg[2] = AUX_NATIVE_WRITE << 4; msg[3] = (msg_bytes << 4) | (send_bytes - 1); memcpy(&msg[4], send, send_bytes); for (retry = 0; retry < 4; retry++) { ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_bytes, NULL, 0, delay, &ack); if (ret == -EBUSY) continue; else if (ret < 0) return ret; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) return send_bytes; else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) udelay(400); else return -EIO; } return -EIO; } static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, u16 address, u8 *recv, int recv_bytes, u8 delay) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; u8 msg[4]; int msg_bytes = 4; u8 ack; int ret; unsigned retry; msg[0] = address; msg[1] = address >> 8; msg[2] = AUX_NATIVE_READ << 4; msg[3] = (msg_bytes << 4) | (recv_bytes - 1); for (retry = 0; retry < 4; retry++) { ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_bytes, recv, recv_bytes, delay, &ack); if (ret == -EBUSY) continue; else if (ret < 0) return ret; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) return ret; else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) udelay(400); else if (ret == 0) return -EPROTO; else return -EIO; } return -EIO; } static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, u16 reg, u8 val) { radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0); } static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector, u16 reg) { u8 val = 0; radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0); return val; } int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, u8 write_byte, u8 *read_byte) { struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter; u16 address = algo_data->address; u8 msg[5]; u8 reply[2]; unsigned retry; int msg_bytes; int reply_bytes = 1; int ret; u8 ack; /* Set up the command byte */ if (mode & MODE_I2C_READ) msg[2] = AUX_I2C_READ << 4; else msg[2] = AUX_I2C_WRITE << 4; if (!(mode & MODE_I2C_STOP)) msg[2] |= AUX_I2C_MOT << 4; msg[0] = address; msg[1] = address >> 8; switch (mode) { case MODE_I2C_WRITE: msg_bytes = 5; msg[3] = msg_bytes << 4; msg[4] = write_byte; break; case MODE_I2C_READ: msg_bytes = 4; msg[3] = msg_bytes << 4; break; default: msg_bytes = 4; msg[3] = 3 << 4; break; } for (retry = 0; retry < 4; retry++) { ret = radeon_process_aux_ch(auxch, msg, msg_bytes, reply, reply_bytes, 0, &ack); if (ret == -EBUSY) continue; else if (ret < 0) { DRM_DEBUG_KMS("aux_ch failed %d\n", ret); return ret; } switch (ack & AUX_NATIVE_REPLY_MASK) { case AUX_NATIVE_REPLY_ACK: /* I2C-over-AUX Reply field is only valid * when paired with AUX ACK. */ break; case AUX_NATIVE_REPLY_NACK: DRM_DEBUG_KMS("aux_ch native nack\n"); return -EREMOTEIO; case AUX_NATIVE_REPLY_DEFER: DRM_DEBUG_KMS("aux_ch native defer\n"); udelay(400); continue; default: DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack); return -EREMOTEIO; } switch (ack & AUX_I2C_REPLY_MASK) { case AUX_I2C_REPLY_ACK: if (mode == MODE_I2C_READ) *read_byte = reply[0]; return ret; case AUX_I2C_REPLY_NACK: DRM_DEBUG_KMS("aux_i2c nack\n"); return -EREMOTEIO; case AUX_I2C_REPLY_DEFER: DRM_DEBUG_KMS("aux_i2c defer\n"); udelay(400); break; default: DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack); return -EREMOTEIO; } } DRM_DEBUG_KMS("aux i2c too many retries, giving up\n"); return -EREMOTEIO; } /***** general DP utility functions *****/ #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 #define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5 static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], int lane_count, u8 train_set[4]) { u8 v = 0; u8 p = 0; int lane; for (lane = 0; lane < lane_count; lane++) { u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane); u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n", lane, voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT], pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]); if (this_v > v) v = this_v; if (this_p > p) p = this_p; } if (v >= DP_VOLTAGE_MAX) v |= DP_TRAIN_MAX_SWING_REACHED; if (p >= DP_PRE_EMPHASIS_MAX) p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n", voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT], pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]); for (lane = 0; lane < 4; lane++) train_set[lane] = v | p; } /* convert bits per color to bits per pixel */ /* get bpc from the EDID */ static int convert_bpc_to_bpp(int bpc) { if (bpc == 0) return 24; else return bpc * 3; } /* get the max pix clock supported by the link rate and lane num */ static int dp_get_max_dp_pix_clock(int link_rate, int lane_num, int bpp) { return (link_rate * lane_num * 8) / bpp; } /***** radeon specific DP functions *****/ static int radeon_dp_get_max_link_rate(struct drm_connector *connector, u8 dpcd[DP_DPCD_SIZE]) { int max_link_rate; if (radeon_connector_is_dp12_capable(connector)) max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000); else max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000); return max_link_rate; } /* First get the min lane# when low rate is used according to pixel clock * (prefer low rate), second check max lane# supported by DP panel, * if the max lane# < low rate lane# then use max lane# instead. */ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector, u8 dpcd[DP_DPCD_SIZE], int pix_clock) { int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd); int max_lane_num = drm_dp_max_lane_count(dpcd); int lane_num; int max_dp_pix_clock; for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) { max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp); if (pix_clock <= max_dp_pix_clock) break; } return lane_num; } static int radeon_dp_get_dp_link_clock(struct drm_connector *connector, u8 dpcd[DP_DPCD_SIZE], int pix_clock) { int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); int lane_num, max_pix_clock; if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == ENCODER_OBJECT_ID_NUTMEG) return 270000; lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock); max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp); if (pix_clock <= max_pix_clock) return 162000; max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp); if (pix_clock <= max_pix_clock) return 270000; if (radeon_connector_is_dp12_capable(connector)) { max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp); if (pix_clock <= max_pix_clock) return 540000; } return radeon_dp_get_max_link_rate(connector, dpcd); } static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock, u8 ucconfig, u8 lane_num) { DP_ENCODER_SERVICE_PARAMETERS args; int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); memset(&args, 0, sizeof(args)); args.ucLinkClock = dp_clock / 10; args.ucConfig = ucconfig; args.ucAction = action; args.ucLaneNum = lane_num; args.ucStatus = 0; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); return args.ucStatus; } u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; struct drm_device *dev = radeon_connector->base.dev; struct radeon_device *rdev = dev->dev_private; return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0, dig_connector->dp_i2c_bus->rec.i2c_id, 0); } static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; u8 buf[3]; if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) return; if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0)) DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", buf[0], buf[1], buf[2]); if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0)) DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", buf[0], buf[1], buf[2]); } bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; u8 msg[DP_DPCD_SIZE]; int ret, i; ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, DP_DPCD_SIZE, 0); if (ret > 0) { memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); DRM_DEBUG_KMS("DPCD: "); for (i = 0; i < DP_DPCD_SIZE; i++) DRM_DEBUG_KMS("%02x ", msg[i]); DRM_DEBUG_KMS("\n"); radeon_dp_probe_oui(radeon_connector); return true; } dig_connector->dpcd[0] = 0; return false; } int radeon_dp_get_panel_mode(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector); u8 tmp; if (!ASIC_IS_DCE4(rdev)) return panel_mode; if (dp_bridge != ENCODER_OBJECT_ID_NONE) { /* DP bridge chips */ tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); if (tmp & 1) panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; else panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { /* eDP */ tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); if (tmp & 1) panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; } return panel_mode; } void radeon_dp_set_link_config(struct drm_connector *connector, const struct drm_display_mode *mode) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector; if (!radeon_connector->con_priv) return; dig_connector = radeon_connector->con_priv; if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { dig_connector->dp_clock = radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); dig_connector->dp_lane_count = radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock); } } int radeon_dp_mode_valid_helper(struct drm_connector *connector, struct drm_display_mode *mode) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector; int dp_clock; if ((mode->clock > 340000) && (!radeon_connector_is_dp12_capable(connector))) return MODE_CLOCK_HIGH; if (!radeon_connector->con_priv) return MODE_CLOCK_HIGH; dig_connector = radeon_connector->con_priv; dp_clock = radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); if ((dp_clock == 540000) && (!radeon_connector_is_dp12_capable(connector))) return MODE_CLOCK_HIGH; return MODE_OK; } static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector, u8 link_status[DP_LINK_STATUS_SIZE]) { int ret; ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, link_status, DP_LINK_STATUS_SIZE, 100); if (ret <= 0) { return false; } DRM_DEBUG_KMS("link status %*ph\n", 6, link_status); return true; } bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) { u8 link_status[DP_LINK_STATUS_SIZE]; struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; if (!radeon_dp_get_link_status(radeon_connector, link_status)) return false; if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) return false; return true; } struct radeon_dp_link_train_info { struct radeon_device *rdev; struct drm_encoder *encoder; struct drm_connector *connector; struct radeon_connector *radeon_connector; int enc_id; int dp_clock; int dp_lane_count; bool tp3_supported; u8 dpcd[DP_RECEIVER_CAP_SIZE]; u8 train_set[4]; u8 link_status[DP_LINK_STATUS_SIZE]; u8 tries; bool use_dpencoder; }; static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info) { /* set the initial vs/emph on the source */ atombios_dig_transmitter_setup(dp_info->encoder, ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH, 0, dp_info->train_set[0]); /* sets all lanes at once */ /* set the vs/emph on the sink */ radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET, dp_info->train_set, dp_info->dp_lane_count, 0); } static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp) { int rtp = 0; /* set training pattern on the source */ if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) { switch (tp) { case DP_TRAINING_PATTERN_1: rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1; break; case DP_TRAINING_PATTERN_2: rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2; break; case DP_TRAINING_PATTERN_3: rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3; break; } atombios_dig_encoder_setup(dp_info->encoder, rtp, 0); } else { switch (tp) { case DP_TRAINING_PATTERN_1: rtp = 0; break; case DP_TRAINING_PATTERN_2: rtp = 1; break; } radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, dp_info->dp_clock, dp_info->enc_id, rtp); } /* enable training pattern on the sink */ radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp); } static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; u8 tmp; /* power up the sink */ if (dp_info->dpcd[0] >= 0x11) radeon_write_dpcd_reg(dp_info->radeon_connector, DP_SET_POWER, DP_SET_POWER_D0); /* possibly enable downspread on the sink */ if (dp_info->dpcd[3] & 0x1) radeon_write_dpcd_reg(dp_info->radeon_connector, DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); else radeon_write_dpcd_reg(dp_info->radeon_connector, DP_DOWNSPREAD_CTRL, 0); if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) && (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) { radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1); } /* set the lane count on the sink */ tmp = dp_info->dp_lane_count; if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 && dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP) tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN; radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); /* set the link rate on the sink */ tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock); radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); /* start training on the source */ if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) atombios_dig_encoder_setup(dp_info->encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0); else radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START, dp_info->dp_clock, dp_info->enc_id, 0); /* disable the training pattern on the sink */ radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); return 0; } static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info) { udelay(400); /* disable the training pattern on the sink */ radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); /* disable the training pattern on the source */ if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) atombios_dig_encoder_setup(dp_info->encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0); else radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, dp_info->dp_clock, dp_info->enc_id, 0); return 0; } static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info) { bool clock_recovery; u8 voltage; int i; radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1); memset(dp_info->train_set, 0, 4); radeon_dp_update_vs_emph(dp_info); udelay(400); /* clock recovery loop */ clock_recovery = false; dp_info->tries = 0; voltage = 0xff; while (1) { drm_dp_link_train_clock_recovery_delay(dp_info->dpcd); if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { DRM_ERROR("displayport link status failed\n"); break; } if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { clock_recovery = true; break; } for (i = 0; i < dp_info->dp_lane_count; i++) { if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; } if (i == dp_info->dp_lane_count) { DRM_ERROR("clock recovery reached max voltage\n"); break; } if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { ++dp_info->tries; if (dp_info->tries == 5) { DRM_ERROR("clock recovery tried 5 times\n"); break; } } else dp_info->tries = 0; voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; /* Compute new train_set as requested by sink */ dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set); radeon_dp_update_vs_emph(dp_info); } if (!clock_recovery) { DRM_ERROR("clock recovery failed\n"); return -1; } else { DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n", dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT); return 0; } } static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info) { bool channel_eq; if (dp_info->tp3_supported) radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3); else radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2); /* channel equalization loop */ dp_info->tries = 0; channel_eq = false; while (1) { drm_dp_link_train_channel_eq_delay(dp_info->dpcd); if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { DRM_ERROR("displayport link status failed\n"); break; } if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { channel_eq = true; break; } /* Try 5 times */ if (dp_info->tries > 5) { DRM_ERROR("channel eq failed: 5 tries\n"); break; } /* Compute new train_set as requested by sink */ dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set); radeon_dp_update_vs_emph(dp_info); dp_info->tries++; } if (!channel_eq) { DRM_ERROR("channel eq failed\n"); return -1; } else { DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n", dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT); return 0; } } void radeon_dp_link_train(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig; struct radeon_connector *radeon_connector; struct radeon_connector_atom_dig *dig_connector; struct radeon_dp_link_train_info dp_info; int index; u8 tmp, frev, crev; if (!radeon_encoder->enc_priv) return; dig = radeon_encoder->enc_priv; radeon_connector = to_radeon_connector(connector); if (!radeon_connector->con_priv) return; dig_connector = radeon_connector->con_priv; if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) && (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP)) return; /* DPEncoderService newer than 1.1 can't program properly the * training pattern. When facing such version use the * DIGXEncoderControl (X== 1 | 2) */ dp_info.use_dpencoder = true; index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) { if (crev > 1) { dp_info.use_dpencoder = false; } } dp_info.enc_id = 0; if (dig->dig_encoder) dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; else dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; if (dig->linkb) dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B; else dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT); if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) dp_info.tp3_supported = true; else dp_info.tp3_supported = false; memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); dp_info.rdev = rdev; dp_info.encoder = encoder; dp_info.connector = connector; dp_info.radeon_connector = radeon_connector; dp_info.dp_lane_count = dig_connector->dp_lane_count; dp_info.dp_clock = dig_connector->dp_clock; if (radeon_dp_link_train_init(&dp_info)) goto done; if (radeon_dp_link_train_cr(&dp_info)) goto done; if (radeon_dp_link_train_ce(&dp_info)) goto done; done: if (radeon_dp_link_train_finish(&dp_info)) return; }
gpl-2.0
tcp209/kernel_samsung_epic4gtouch
net/mac80211/wpa.c
626
14966
/* * Copyright 2002-2004, Instant802 Networks, Inc. * Copyright 2008, Jouni Malinen <j@w1.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/netdevice.h> #include <linux/types.h> #include <linux/skbuff.h> #include <linux/compiler.h> #include <linux/ieee80211.h> #include <linux/gfp.h> #include <asm/unaligned.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "michael.h" #include "tkip.h" #include "aes_ccm.h" #include "aes_cmac.h" #include "wpa.h" ieee80211_tx_result ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) { u8 *data, *key, *mic, key_offset; size_t data_len; unsigned int hdrlen; struct ieee80211_hdr *hdr; struct sk_buff *skb = tx->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int authenticator; int tail; hdr = (struct ieee80211_hdr *)skb->data; if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 || !ieee80211_is_data_present(hdr->frame_control)) return TX_CONTINUE; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < hdrlen) return TX_DROP; data = skb->data + hdrlen; data_len = skb->len - hdrlen; if (info->control.hw_key && !(tx->flags & IEEE80211_TX_FRAGMENTED) && !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { /* hwaccel - with no need for SW-generated MMIC */ return TX_CONTINUE; } tail = MICHAEL_MIC_LEN; if (!info->control.hw_key) tail += TKIP_ICV_LEN; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < TKIP_IV_LEN)) return TX_DROP; #if 0 authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */ #else authenticator = 1; #endif key_offset = authenticator ? NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY : NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY; key = &tx->key->conf.key[key_offset]; mic = skb_put(skb, MICHAEL_MIC_LEN); michael_mic(key, hdr, data, data_len, mic); return TX_CONTINUE; } ieee80211_rx_result ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) { u8 *data, *key = NULL, key_offset; size_t data_len; unsigned int hdrlen; u8 mic[MICHAEL_MIC_LEN]; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; int authenticator = 1, wpa_test = 0; /* No way to verify the MIC if the hardware stripped it */ if (status->flag & RX_FLAG_MMIC_STRIPPED) return RX_CONTINUE; if (!rx->key || rx->key->conf.alg != ALG_TKIP || !ieee80211_has_protected(hdr->frame_control) || !ieee80211_is_data_present(hdr->frame_control)) return RX_CONTINUE; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < hdrlen + MICHAEL_MIC_LEN) return RX_DROP_UNUSABLE; data = skb->data + hdrlen; data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; #if 0 authenticator = fc & IEEE80211_FCTL_TODS; /* FIX */ #else authenticator = 1; #endif key_offset = authenticator ? NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY : NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY; key = &rx->key->conf.key[key_offset]; michael_mic(key, hdr, data, data_len, mic); if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) { if (!(rx->flags & IEEE80211_RX_RA_MATCH)) return RX_DROP_UNUSABLE; mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx, (void *) skb->data, NULL, GFP_ATOMIC); return RX_DROP_UNUSABLE; } /* remove Michael MIC from payload */ skb_trim(skb, skb->len - MICHAEL_MIC_LEN); /* update IV in key information to be able to detect replays */ rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32; rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16; return RX_CONTINUE; } static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_key *key = tx->key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); unsigned int hdrlen; int len, tail; u8 *pos; if (info->control.hw_key && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { /* hwaccel - with no need for software-generated IV */ return 0; } hdrlen = ieee80211_hdrlen(hdr->frame_control); len = skb->len - hdrlen; if (info->control.hw_key) tail = 0; else tail = TKIP_ICV_LEN; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < TKIP_IV_LEN)) return -1; pos = skb_push(skb, TKIP_IV_LEN); memmove(pos, pos + TKIP_IV_LEN, hdrlen); pos += hdrlen; /* Increase IV for the frame */ key->u.tkip.tx.iv16++; if (key->u.tkip.tx.iv16 == 0) key->u.tkip.tx.iv32++; pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16); /* hwaccel - with software IV */ if (info->control.hw_key) return 0; /* Add room for ICV */ skb_put(skb, TKIP_ICV_LEN); hdr = (struct ieee80211_hdr *) skb->data; ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm, key, pos, len, hdr->addr2); return 0; } ieee80211_tx_result ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; ieee80211_tx_set_protected(tx); do { if (tkip_encrypt_skb(tx, skb) < 0) return TX_DROP; } while ((skb = skb->next)); return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; int hdrlen, res, hwaccel = 0, wpa_test = 0; struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); hdrlen = ieee80211_hdrlen(hdr->frame_control); if (!ieee80211_is_data(hdr->frame_control)) return RX_CONTINUE; if (!rx->sta || skb->len - hdrlen < 12) return RX_DROP_UNUSABLE; if (status->flag & RX_FLAG_DECRYPTED) { if (status->flag & RX_FLAG_IV_STRIPPED) { /* * Hardware took care of all processing, including * replay protection, and stripped the ICV/IV so * we cannot do any checks here. */ return RX_CONTINUE; } /* let TKIP code verify IV, but skip decryption */ hwaccel = 1; } res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, key, skb->data + hdrlen, skb->len - hdrlen, rx->sta->sta.addr, hdr->addr1, hwaccel, rx->queue, &rx->tkip_iv32, &rx->tkip_iv16); if (res != TKIP_DECRYPT_OK || wpa_test) return RX_DROP_UNUSABLE; /* Trim ICV */ skb_trim(skb, skb->len - TKIP_ICV_LEN); /* Remove IV */ memmove(skb->data + TKIP_IV_LEN, skb->data, hdrlen); skb_pull(skb, TKIP_IV_LEN); return RX_CONTINUE; } static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch, int encrypted) { __le16 mask_fc; int a4_included, mgmt; u8 qos_tid; u8 *b_0, *aad; u16 data_len, len_a; unsigned int hdrlen; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; b_0 = scratch + 3 * AES_BLOCK_LEN; aad = scratch + 4 * AES_BLOCK_LEN; /* * Mask FC: zero subtype b4 b5 b6 (if not mgmt) * Retry, PwrMgt, MoreData; set Protected */ mgmt = ieee80211_is_mgmt(hdr->frame_control); mask_fc = hdr->frame_control; mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA); if (!mgmt) mask_fc &= ~cpu_to_le16(0x0070); mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); hdrlen = ieee80211_hdrlen(hdr->frame_control); len_a = hdrlen - 2; a4_included = ieee80211_has_a4(hdr->frame_control); if (ieee80211_is_data_qos(hdr->frame_control)) qos_tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; else qos_tid = 0; data_len = skb->len - hdrlen - CCMP_HDR_LEN; if (encrypted) data_len -= CCMP_MIC_LEN; /* First block, b_0 */ b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */ /* Nonce: Nonce Flags | A2 | PN * Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7) */ b_0[1] = qos_tid | (mgmt << 4); memcpy(&b_0[2], hdr->addr2, ETH_ALEN); memcpy(&b_0[8], pn, CCMP_PN_LEN); /* l(m) */ put_unaligned_be16(data_len, &b_0[14]); /* AAD (extra authenticate-only data) / masked 802.11 header * FC | A1 | A2 | A3 | SC | [A4] | [QC] */ put_unaligned_be16(len_a, &aad[0]); put_unaligned(mask_fc, (__le16 *)&aad[2]); memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN); /* Mask Seq#, leave Frag# */ aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f; aad[23] = 0; if (a4_included) { memcpy(&aad[24], hdr->addr4, ETH_ALEN); aad[30] = qos_tid; aad[31] = 0; } else { memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN); aad[24] = qos_tid; } } static inline void ccmp_pn2hdr(u8 *hdr, u8 *pn, int key_id) { hdr[0] = pn[5]; hdr[1] = pn[4]; hdr[2] = 0; hdr[3] = 0x20 | (key_id << 6); hdr[4] = pn[3]; hdr[5] = pn[2]; hdr[6] = pn[1]; hdr[7] = pn[0]; } static inline void ccmp_hdr2pn(u8 *pn, u8 *hdr) { pn[0] = hdr[7]; pn[1] = hdr[6]; pn[2] = hdr[5]; pn[3] = hdr[4]; pn[4] = hdr[1]; pn[5] = hdr[0]; } static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_key *key = tx->key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int hdrlen, len, tail; u8 *pos, *pn; int i; if (info->control.hw_key && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { /* * hwaccel has no need for preallocated room for CCMP * header or MIC fields */ return 0; } hdrlen = ieee80211_hdrlen(hdr->frame_control); len = skb->len - hdrlen; if (info->control.hw_key) tail = 0; else tail = CCMP_MIC_LEN; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < CCMP_HDR_LEN)) return -1; pos = skb_push(skb, CCMP_HDR_LEN); memmove(pos, pos + CCMP_HDR_LEN, hdrlen); hdr = (struct ieee80211_hdr *) pos; pos += hdrlen; /* PN = PN + 1 */ pn = key->u.ccmp.tx_pn; for (i = CCMP_PN_LEN - 1; i >= 0; i--) { pn[i]++; if (pn[i]) break; } ccmp_pn2hdr(pos, pn, key->conf.keyidx); /* hwaccel - with software CCMP header */ if (info->control.hw_key) return 0; pos += CCMP_HDR_LEN; ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0); ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, key->u.ccmp.tx_crypto_buf, pos, len, pos, skb_put(skb, CCMP_MIC_LEN)); return 0; } ieee80211_tx_result ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; ieee80211_tx_set_protected(tx); do { if (ccmp_encrypt_skb(tx, skb) < 0) return TX_DROP; } while ((skb = skb->next)); return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; int hdrlen; struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); u8 pn[CCMP_PN_LEN]; int data_len; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (!ieee80211_is_data(hdr->frame_control) && !ieee80211_is_robust_mgmt_frame(hdr)) return RX_CONTINUE; data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; if (!rx->sta || data_len < 0) return RX_DROP_UNUSABLE; if ((status->flag & RX_FLAG_DECRYPTED) && (status->flag & RX_FLAG_IV_STRIPPED)) return RX_CONTINUE; ccmp_hdr2pn(pn, skb->data + hdrlen); if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) { key->u.ccmp.replays++; return RX_DROP_UNUSABLE; } if (!(status->flag & RX_FLAG_DECRYPTED)) { /* hardware didn't decrypt/verify MIC */ ccmp_special_blocks(skb, pn, key->u.ccmp.rx_crypto_buf, 1); if (ieee80211_aes_ccm_decrypt( key->u.ccmp.tfm, key->u.ccmp.rx_crypto_buf, skb->data + hdrlen + CCMP_HDR_LEN, data_len, skb->data + skb->len - CCMP_MIC_LEN, skb->data + hdrlen + CCMP_HDR_LEN)) return RX_DROP_UNUSABLE; } memcpy(key->u.ccmp.rx_pn[rx->queue], pn, CCMP_PN_LEN); /* Remove CCMP header and MIC */ skb_trim(skb, skb->len - CCMP_MIC_LEN); memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen); skb_pull(skb, CCMP_HDR_LEN); return RX_CONTINUE; } static void bip_aad(struct sk_buff *skb, u8 *aad) { /* BIP AAD: FC(masked) || A1 || A2 || A3 */ /* FC type/subtype */ aad[0] = skb->data[0]; /* Mask FC Retry, PwrMgt, MoreData flags to zero */ aad[1] = skb->data[1] & ~(BIT(4) | BIT(5) | BIT(6)); /* A1 || A2 || A3 */ memcpy(aad + 2, skb->data + 4, 3 * ETH_ALEN); } static inline void bip_ipn_swap(u8 *d, const u8 *s) { *d++ = s[5]; *d++ = s[4]; *d++ = s[3]; *d++ = s[2]; *d++ = s[1]; *d = s[0]; } ieee80211_tx_result ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_key *key = tx->key; struct ieee80211_mmie *mmie; u8 *pn, aad[20]; int i; if (info->control.hw_key) return 0; if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) return TX_DROP; mmie = (struct ieee80211_mmie *) skb_put(skb, sizeof(*mmie)); mmie->element_id = WLAN_EID_MMIE; mmie->length = sizeof(*mmie) - 2; mmie->key_id = cpu_to_le16(key->conf.keyidx); /* PN = PN + 1 */ pn = key->u.aes_cmac.tx_pn; for (i = sizeof(key->u.aes_cmac.tx_pn) - 1; i >= 0; i--) { pn[i]++; if (pn[i]) break; } bip_ipn_swap(mmie->sequence_number, pn); bip_aad(skb, aad); /* * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) */ ieee80211_aes_cmac(key->u.aes_cmac.tfm, key->u.aes_cmac.tx_crypto_buf, aad, skb->data + 24, skb->len - 24, mmie->mic); return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_key *key = rx->key; struct ieee80211_mmie *mmie; u8 aad[20], mic[8], ipn[6]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; if (!ieee80211_is_mgmt(hdr->frame_control)) return RX_CONTINUE; if ((status->flag & RX_FLAG_DECRYPTED) && (status->flag & RX_FLAG_IV_STRIPPED)) return RX_CONTINUE; if (skb->len < 24 + sizeof(*mmie)) return RX_DROP_UNUSABLE; mmie = (struct ieee80211_mmie *) (skb->data + skb->len - sizeof(*mmie)); if (mmie->element_id != WLAN_EID_MMIE || mmie->length != sizeof(*mmie) - 2) return RX_DROP_UNUSABLE; /* Invalid MMIE */ bip_ipn_swap(ipn, mmie->sequence_number); if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) { key->u.aes_cmac.replays++; return RX_DROP_UNUSABLE; } if (!(status->flag & RX_FLAG_DECRYPTED)) { /* hardware didn't decrypt/verify MIC */ bip_aad(skb, aad); ieee80211_aes_cmac(key->u.aes_cmac.tfm, key->u.aes_cmac.rx_crypto_buf, aad, skb->data + 24, skb->len - 24, mic); if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { key->u.aes_cmac.icverrors++; return RX_DROP_UNUSABLE; } } memcpy(key->u.aes_cmac.rx_pn, ipn, 6); /* Remove MMIE */ skb_trim(skb, skb->len - sizeof(*mmie)); return RX_CONTINUE; }
gpl-2.0
carvsdriver/android_kernel_samsung_n5110-common
kernel/cpuset.c
882
75657
/* * kernel/cpuset.c * * Processor and Memory placement constraints for sets of tasks. * * Copyright (C) 2003 BULL SA. * Copyright (C) 2004-2007 Silicon Graphics, Inc. * Copyright (C) 2006 Google, Inc * * Portions derived from Patrick Mochel's sysfs code. * sysfs is Copyright (c) 2001-3 Patrick Mochel * * 2003-10-10 Written by Simon Derr. * 2003-10-22 Updates by Stephen Hemminger. * 2004 May-July Rework by Paul Jackson. * 2006 Rework by Paul Menage to use generic cgroups * 2008 Rework of the scheduler domains and CPU hotplug handling * by Max Krasnyansky * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of the Linux * distribution for more details. */ #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/cpuset.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/list.h> #include <linux/mempolicy.h> #include <linux/mm.h> #include <linux/memory.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/pagemap.h> #include <linux/proc_fs.h> #include <linux/rcupdate.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/security.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/time.h> #include <linux/backing-dev.h> #include <linux/sort.h> #include <asm/uaccess.h> #include <asm/atomic.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/cgroup.h> /* * Workqueue for cpuset related tasks. * * Using kevent workqueue may cause deadlock when memory_migrate * is set. So we create a separate workqueue thread for cpuset. */ static struct workqueue_struct *cpuset_wq; /* * Tracks how many cpusets are currently defined in system. * When there is only one cpuset (the root cpuset) we can * short circuit some hooks. */ int number_of_cpusets __read_mostly; /* Forward declare cgroup structures */ struct cgroup_subsys cpuset_subsys; struct cpuset; /* See "Frequency meter" comments, below. */ struct fmeter { int cnt; /* unprocessed events count */ int val; /* most recent output value */ time_t time; /* clock (secs) when val computed */ spinlock_t lock; /* guards read or write of above */ }; struct cpuset { struct cgroup_subsys_state css; unsigned long flags; /* "unsigned long" so bitops work */ cpumask_var_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */ struct cpuset *parent; /* my parent */ struct fmeter fmeter; /* memory_pressure filter */ /* partition number for rebuild_sched_domains() */ int pn; /* for custom sched domain */ int relax_domain_level; /* used for walking a cpuset hierarchy */ struct list_head stack_list; }; /* Retrieve the cpuset for a cgroup */ static inline struct cpuset *cgroup_cs(struct cgroup *cont) { return container_of(cgroup_subsys_state(cont, cpuset_subsys_id), struct cpuset, css); } /* Retrieve the cpuset for a task */ static inline struct cpuset *task_cs(struct task_struct *task) { return container_of(task_subsys_state(task, cpuset_subsys_id), struct cpuset, css); } /* bits in struct cpuset flags field */ typedef enum { CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, CS_MEM_HARDWALL, CS_MEMORY_MIGRATE, CS_SCHED_LOAD_BALANCE, CS_SPREAD_PAGE, CS_SPREAD_SLAB, } cpuset_flagbits_t; /* convenient tests for these bits */ static inline int is_cpu_exclusive(const struct cpuset *cs) { return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); } static inline int is_mem_exclusive(const struct cpuset *cs) { return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); } static inline int is_mem_hardwall(const struct cpuset *cs) { return test_bit(CS_MEM_HARDWALL, &cs->flags); } static inline int is_sched_load_balance(const struct cpuset *cs) { return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); } static inline int is_memory_migrate(const struct cpuset *cs) { return test_bit(CS_MEMORY_MIGRATE, &cs->flags); } static inline int is_spread_page(const struct cpuset *cs) { return test_bit(CS_SPREAD_PAGE, &cs->flags); } static inline int is_spread_slab(const struct cpuset *cs) { return test_bit(CS_SPREAD_SLAB, &cs->flags); } static struct cpuset top_cpuset = { .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), }; /* * There are two global mutexes guarding cpuset structures. The first * is the main control groups cgroup_mutex, accessed via * cgroup_lock()/cgroup_unlock(). The second is the cpuset-specific * callback_mutex, below. They can nest. It is ok to first take * cgroup_mutex, then nest callback_mutex. We also require taking * task_lock() when dereferencing a task's cpuset pointer. See "The * task_lock() exception", at the end of this comment. * * A task must hold both mutexes to modify cpusets. If a task * holds cgroup_mutex, then it blocks others wanting that mutex, * ensuring that it is the only task able to also acquire callback_mutex * and be able to modify cpusets. It can perform various checks on * the cpuset structure first, knowing nothing will change. It can * also allocate memory while just holding cgroup_mutex. While it is * performing these checks, various callback routines can briefly * acquire callback_mutex to query cpusets. Once it is ready to make * the changes, it takes callback_mutex, blocking everyone else. * * Calls to the kernel memory allocator can not be made while holding * callback_mutex, as that would risk double tripping on callback_mutex * from one of the callbacks into the cpuset code from within * __alloc_pages(). * * If a task is only holding callback_mutex, then it has read-only * access to cpusets. * * Now, the task_struct fields mems_allowed and mempolicy may be changed * by other task, we use alloc_lock in the task_struct fields to protect * them. * * The cpuset_common_file_read() handlers only hold callback_mutex across * small pieces of code, such as when reading out possibly multi-word * cpumasks and nodemasks. * * Accessing a task's cpuset should be done in accordance with the * guidelines for accessing subsystem state in kernel/cgroup.c */ static DEFINE_MUTEX(callback_mutex); /* * cpuset_buffer_lock protects both the cpuset_name and cpuset_nodelist * buffers. They are statically allocated to prevent using excess stack * when calling cpuset_print_task_mems_allowed(). */ #define CPUSET_NAME_LEN (128) #define CPUSET_NODELIST_LEN (256) static char cpuset_name[CPUSET_NAME_LEN]; static char cpuset_nodelist[CPUSET_NODELIST_LEN]; static DEFINE_SPINLOCK(cpuset_buffer_lock); /* * This is ugly, but preserves the userspace API for existing cpuset * users. If someone tries to mount the "cpuset" filesystem, we * silently switch it to mount "cgroup" instead */ static struct dentry *cpuset_mount(struct file_system_type *fs_type, int flags, const char *unused_dev_name, void *data) { struct file_system_type *cgroup_fs = get_fs_type("cgroup"); struct dentry *ret = ERR_PTR(-ENODEV); if (cgroup_fs) { char mountopts[] = "cpuset,noprefix," "release_agent=/sbin/cpuset_release_agent"; ret = cgroup_fs->mount(cgroup_fs, flags, unused_dev_name, mountopts); put_filesystem(cgroup_fs); } return ret; } static struct file_system_type cpuset_fs_type = { .name = "cpuset", .mount = cpuset_mount, }; /* * Return in pmask the portion of a cpusets's cpus_allowed that * are online. If none are online, walk up the cpuset hierarchy * until we find one that does have some online cpus. If we get * all the way to the top and still haven't found any online cpus, * return cpu_online_map. Or if passed a NULL cs from an exit'ing * task, return cpu_online_map. * * One way or another, we guarantee to return some non-empty subset * of cpu_online_map. * * Call with callback_mutex held. */ static void guarantee_online_cpus(const struct cpuset *cs, struct cpumask *pmask) { while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask)) cs = cs->parent; if (cs) cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask); else cpumask_copy(pmask, cpu_online_mask); BUG_ON(!cpumask_intersects(pmask, cpu_online_mask)); } /* * Return in *pmask the portion of a cpusets's mems_allowed that * are online, with memory. If none are online with memory, walk * up the cpuset hierarchy until we find one that does have some * online mems. If we get all the way to the top and still haven't * found any online mems, return node_states[N_HIGH_MEMORY]. * * One way or another, we guarantee to return some non-empty subset * of node_states[N_HIGH_MEMORY]. * * Call with callback_mutex held. */ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) { while (cs && !nodes_intersects(cs->mems_allowed, node_states[N_HIGH_MEMORY])) cs = cs->parent; if (cs) nodes_and(*pmask, cs->mems_allowed, node_states[N_HIGH_MEMORY]); else *pmask = node_states[N_HIGH_MEMORY]; BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY])); } /* * update task's spread flag if cpuset's page/slab spread flag is set * * Called with callback_mutex/cgroup_mutex held */ static void cpuset_update_task_spread_flag(struct cpuset *cs, struct task_struct *tsk) { if (is_spread_page(cs)) tsk->flags |= PF_SPREAD_PAGE; else tsk->flags &= ~PF_SPREAD_PAGE; if (is_spread_slab(cs)) tsk->flags |= PF_SPREAD_SLAB; else tsk->flags &= ~PF_SPREAD_SLAB; } /* * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? * * One cpuset is a subset of another if all its allowed CPUs and * Memory Nodes are a subset of the other, and its exclusive flags * are only set if the other's are set. Call holding cgroup_mutex. */ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) { return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && nodes_subset(p->mems_allowed, q->mems_allowed) && is_cpu_exclusive(p) <= is_cpu_exclusive(q) && is_mem_exclusive(p) <= is_mem_exclusive(q); } /** * alloc_trial_cpuset - allocate a trial cpuset * @cs: the cpuset that the trial cpuset duplicates */ static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs) { struct cpuset *trial; trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); if (!trial) return NULL; if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) { kfree(trial); return NULL; } cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); return trial; } /** * free_trial_cpuset - free the trial cpuset * @trial: the trial cpuset to be freed */ static void free_trial_cpuset(struct cpuset *trial) { free_cpumask_var(trial->cpus_allowed); kfree(trial); } /* * validate_change() - Used to validate that any proposed cpuset change * follows the structural rules for cpusets. * * If we replaced the flag and mask values of the current cpuset * (cur) with those values in the trial cpuset (trial), would * our various subset and exclusive rules still be valid? Presumes * cgroup_mutex held. * * 'cur' is the address of an actual, in-use cpuset. Operations * such as list traversal that depend on the actual address of the * cpuset in the list must use cur below, not trial. * * 'trial' is the address of bulk structure copy of cur, with * perhaps one or more of the fields cpus_allowed, mems_allowed, * or flags changed to new, trial values. * * Return 0 if valid, -errno if not. */ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) { struct cgroup *cont; struct cpuset *c, *par; /* Each of our child cpusets must be a subset of us */ list_for_each_entry(cont, &cur->css.cgroup->children, sibling) { if (!is_cpuset_subset(cgroup_cs(cont), trial)) return -EBUSY; } /* Remaining checks don't apply to root cpuset */ if (cur == &top_cpuset) return 0; par = cur->parent; /* We must be a subset of our parent cpuset */ if (!is_cpuset_subset(trial, par)) return -EACCES; /* * If either I or some sibling (!= me) is exclusive, we can't * overlap */ list_for_each_entry(cont, &par->css.cgroup->children, sibling) { c = cgroup_cs(cont); if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && c != cur && cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) return -EINVAL; if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && c != cur && nodes_intersects(trial->mems_allowed, c->mems_allowed)) return -EINVAL; } /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */ if (cgroup_task_count(cur->css.cgroup)) { if (cpumask_empty(trial->cpus_allowed) || nodes_empty(trial->mems_allowed)) { return -ENOSPC; } } return 0; } #ifdef CONFIG_SMP /* * Helper routine for generate_sched_domains(). * Do cpusets a, b have overlapping cpus_allowed masks? */ static int cpusets_overlap(struct cpuset *a, struct cpuset *b) { return cpumask_intersects(a->cpus_allowed, b->cpus_allowed); } static void update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) { if (dattr->relax_domain_level < c->relax_domain_level) dattr->relax_domain_level = c->relax_domain_level; return; } static void update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) { LIST_HEAD(q); list_add(&c->stack_list, &q); while (!list_empty(&q)) { struct cpuset *cp; struct cgroup *cont; struct cpuset *child; cp = list_first_entry(&q, struct cpuset, stack_list); list_del(q.next); if (cpumask_empty(cp->cpus_allowed)) continue; if (is_sched_load_balance(cp)) update_domain_attr(dattr, cp); list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { child = cgroup_cs(cont); list_add_tail(&child->stack_list, &q); } } } /* * generate_sched_domains() * * This function builds a partial partition of the systems CPUs * A 'partial partition' is a set of non-overlapping subsets whose * union is a subset of that set. * The output of this function needs to be passed to kernel/sched.c * partition_sched_domains() routine, which will rebuild the scheduler's * load balancing domains (sched domains) as specified by that partial * partition. * * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt * for a background explanation of this. * * Does not return errors, on the theory that the callers of this * routine would rather not worry about failures to rebuild sched * domains when operating in the severe memory shortage situations * that could cause allocation failures below. * * Must be called with cgroup_lock held. * * The three key local variables below are: * q - a linked-list queue of cpuset pointers, used to implement a * top-down scan of all cpusets. This scan loads a pointer * to each cpuset marked is_sched_load_balance into the * array 'csa'. For our purposes, rebuilding the schedulers * sched domains, we can ignore !is_sched_load_balance cpusets. * csa - (for CpuSet Array) Array of pointers to all the cpusets * that need to be load balanced, for convenient iterative * access by the subsequent code that finds the best partition, * i.e the set of domains (subsets) of CPUs such that the * cpus_allowed of every cpuset marked is_sched_load_balance * is a subset of one of these domains, while there are as * many such domains as possible, each as small as possible. * doms - Conversion of 'csa' to an array of cpumasks, for passing to * the kernel/sched.c routine partition_sched_domains() in a * convenient format, that can be easily compared to the prior * value to determine what partition elements (sched domains) * were changed (added or removed.) * * Finding the best partition (set of domains): * The triple nested loops below over i, j, k scan over the * load balanced cpusets (using the array of cpuset pointers in * csa[]) looking for pairs of cpusets that have overlapping * cpus_allowed, but which don't have the same 'pn' partition * number and gives them in the same partition number. It keeps * looping on the 'restart' label until it can no longer find * any such pairs. * * The union of the cpus_allowed masks from the set of * all cpusets having the same 'pn' value then form the one * element of the partition (one sched domain) to be passed to * partition_sched_domains(). */ static int generate_sched_domains(cpumask_var_t **domains, struct sched_domain_attr **attributes) { LIST_HEAD(q); /* queue of cpusets to be scanned */ struct cpuset *cp; /* scans q */ struct cpuset **csa; /* array of all cpuset ptrs */ int csn; /* how many cpuset ptrs in csa so far */ int i, j, k; /* indices for partition finding loops */ cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ struct sched_domain_attr *dattr; /* attributes for custom domains */ int ndoms = 0; /* number of sched domains in result */ int nslot; /* next empty doms[] struct cpumask slot */ doms = NULL; dattr = NULL; csa = NULL; /* Special case for the 99% of systems with one, full, sched domain */ if (is_sched_load_balance(&top_cpuset)) { ndoms = 1; doms = alloc_sched_domains(ndoms); if (!doms) goto done; dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); if (dattr) { *dattr = SD_ATTR_INIT; update_domain_attr_tree(dattr, &top_cpuset); } cpumask_copy(doms[0], top_cpuset.cpus_allowed); goto done; } csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); if (!csa) goto done; csn = 0; list_add(&top_cpuset.stack_list, &q); while (!list_empty(&q)) { struct cgroup *cont; struct cpuset *child; /* scans child cpusets of cp */ cp = list_first_entry(&q, struct cpuset, stack_list); list_del(q.next); if (cpumask_empty(cp->cpus_allowed)) continue; /* * All child cpusets contain a subset of the parent's cpus, so * just skip them, and then we call update_domain_attr_tree() * to calc relax_domain_level of the corresponding sched * domain. */ if (is_sched_load_balance(cp)) { csa[csn++] = cp; continue; } list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { child = cgroup_cs(cont); list_add_tail(&child->stack_list, &q); } } for (i = 0; i < csn; i++) csa[i]->pn = i; ndoms = csn; restart: /* Find the best partition (set of sched domains) */ for (i = 0; i < csn; i++) { struct cpuset *a = csa[i]; int apn = a->pn; for (j = 0; j < csn; j++) { struct cpuset *b = csa[j]; int bpn = b->pn; if (apn != bpn && cpusets_overlap(a, b)) { for (k = 0; k < csn; k++) { struct cpuset *c = csa[k]; if (c->pn == bpn) c->pn = apn; } ndoms--; /* one less element */ goto restart; } } } /* * Now we know how many domains to create. * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. */ doms = alloc_sched_domains(ndoms); if (!doms) goto done; /* * The rest of the code, including the scheduler, can deal with * dattr==NULL case. No need to abort if alloc fails. */ dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL); for (nslot = 0, i = 0; i < csn; i++) { struct cpuset *a = csa[i]; struct cpumask *dp; int apn = a->pn; if (apn < 0) { /* Skip completed partitions */ continue; } dp = doms[nslot]; if (nslot == ndoms) { static int warnings = 10; if (warnings) { printk(KERN_WARNING "rebuild_sched_domains confused:" " nslot %d, ndoms %d, csn %d, i %d," " apn %d\n", nslot, ndoms, csn, i, apn); warnings--; } continue; } cpumask_clear(dp); if (dattr) *(dattr + nslot) = SD_ATTR_INIT; for (j = i; j < csn; j++) { struct cpuset *b = csa[j]; if (apn == b->pn) { cpumask_or(dp, dp, b->cpus_allowed); if (dattr) update_domain_attr_tree(dattr + nslot, b); /* Done with this partition */ b->pn = -1; } } nslot++; } BUG_ON(nslot != ndoms); done: kfree(csa); /* * Fallback to the default domain if kmalloc() failed. * See comments in partition_sched_domains(). */ if (doms == NULL) ndoms = 1; *domains = doms; *attributes = dattr; return ndoms; } /* * Rebuild scheduler domains. * * Call with neither cgroup_mutex held nor within get_online_cpus(). * Takes both cgroup_mutex and get_online_cpus(). * * Cannot be directly called from cpuset code handling changes * to the cpuset pseudo-filesystem, because it cannot be called * from code that already holds cgroup_mutex. */ static void do_rebuild_sched_domains(struct work_struct *unused) { struct sched_domain_attr *attr; cpumask_var_t *doms; int ndoms; get_online_cpus(); /* Generate domain masks and attrs */ cgroup_lock(); ndoms = generate_sched_domains(&doms, &attr); cgroup_unlock(); /* Have scheduler rebuild the domains */ partition_sched_domains(ndoms, doms, attr); put_online_cpus(); } #else /* !CONFIG_SMP */ static void do_rebuild_sched_domains(struct work_struct *unused) { } static int generate_sched_domains(cpumask_var_t **domains, struct sched_domain_attr **attributes) { *domains = NULL; return 1; } #endif /* CONFIG_SMP */ static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains); /* * Rebuild scheduler domains, asynchronously via workqueue. * * If the flag 'sched_load_balance' of any cpuset with non-empty * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset * which has that flag enabled, or if any cpuset with a non-empty * 'cpus' is removed, then call this routine to rebuild the * scheduler's dynamic sched domains. * * The rebuild_sched_domains() and partition_sched_domains() * routines must nest cgroup_lock() inside get_online_cpus(), * but such cpuset changes as these must nest that locking the * other way, holding cgroup_lock() for much of the code. * * So in order to avoid an ABBA deadlock, the cpuset code handling * these user changes delegates the actual sched domain rebuilding * to a separate workqueue thread, which ends up processing the * above do_rebuild_sched_domains() function. */ static void async_rebuild_sched_domains(void) { queue_work(cpuset_wq, &rebuild_sched_domains_work); } /* * Accomplishes the same scheduler domain rebuild as the above * async_rebuild_sched_domains(), however it directly calls the * rebuild routine synchronously rather than calling it via an * asynchronous work thread. * * This can only be called from code that is not holding * cgroup_mutex (not nested in a cgroup_lock() call.) */ void rebuild_sched_domains(void) { do_rebuild_sched_domains(NULL); } /** * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's * @tsk: task to test * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner * * Call with cgroup_mutex held. May take callback_mutex during call. * Called for each task in a cgroup by cgroup_scan_tasks(). * Return nonzero if this tasks's cpus_allowed mask should be changed (in other * words, if its mask is not equal to its cpuset's mask). */ static int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) { return !cpumask_equal(&tsk->cpus_allowed, (cgroup_cs(scan->cg))->cpus_allowed); } /** * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's * @tsk: task to test * @scan: struct cgroup_scanner containing the cgroup of the task * * Called by cgroup_scan_tasks() for each task in a cgroup whose * cpus_allowed mask needs to be changed. * * We don't need to re-check for the cgroup/cpuset membership, since we're * holding cgroup_lock() at this point. */ static void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) { set_cpus_allowed_ptr(tsk, ((cgroup_cs(scan->cg))->cpus_allowed)); } /** * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() * * Called with cgroup_mutex held * * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, * calling callback functions for each. * * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 * if @heap != NULL. */ static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap) { struct cgroup_scanner scan; scan.cg = cs->css.cgroup; scan.test_task = cpuset_test_cpumask; scan.process_task = cpuset_change_cpumask; scan.heap = heap; cgroup_scan_tasks(&scan); } /** * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it * @cs: the cpuset to consider * @buf: buffer of cpu numbers written to this cpuset */ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, const char *buf) { struct ptr_heap heap; int retval; int is_load_balanced; /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ if (cs == &top_cpuset) return -EACCES; /* * An empty cpus_allowed is ok only if the cpuset has no tasks. * Since cpulist_parse() fails on an empty mask, we special case * that parsing. The validate_change() call ensures that cpusets * with tasks have cpus. */ if (!*buf) { cpumask_clear(trialcs->cpus_allowed); } else { retval = cpulist_parse(buf, trialcs->cpus_allowed); if (retval < 0) return retval; if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask)) return -EINVAL; } retval = validate_change(cs, trialcs); if (retval < 0) return retval; /* Nothing to do if the cpus didn't change */ if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) return 0; retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); if (retval) return retval; is_load_balanced = is_sched_load_balance(trialcs); mutex_lock(&callback_mutex); cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); mutex_unlock(&callback_mutex); /* * Scan tasks in the cpuset, and update the cpumasks of any * that need an update. */ update_tasks_cpumask(cs, &heap); heap_free(&heap); if (is_load_balanced) async_rebuild_sched_domains(); return 0; } /* * cpuset_migrate_mm * * Migrate memory region from one set of nodes to another. * * Temporarilly set tasks mems_allowed to target nodes of migration, * so that the migration code can allocate pages on these nodes. * * Call holding cgroup_mutex, so current's cpuset won't change * during this call, as manage_mutex holds off any cpuset_attach() * calls. Therefore we don't need to take task_lock around the * call to guarantee_online_mems(), as we know no one is changing * our task's cpuset. * * While the mm_struct we are migrating is typically from some * other task, the task_struct mems_allowed that we are hacking * is for our current task, which must allocate new pages for that * migrating memory region. */ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to) { struct task_struct *tsk = current; tsk->mems_allowed = *to; do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed); } /* * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy * @tsk: the task to change * @newmems: new nodes that the task will be set * * In order to avoid seeing no nodes if the old and new nodes are disjoint, * we structure updates as setting all new allowed nodes, then clearing newly * disallowed ones. */ static void cpuset_change_task_nodemask(struct task_struct *tsk, nodemask_t *newmems) { repeat: /* * Allow tasks that have access to memory reserves because they have * been OOM killed to get memory anywhere. */ if (unlikely(test_thread_flag(TIF_MEMDIE))) return; if (current->flags & PF_EXITING) /* Let dying task have memory */ return; task_lock(tsk); nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1); /* * ensure checking ->mems_allowed_change_disable after setting all new * allowed nodes. * * the read-side task can see an nodemask with new allowed nodes and * old allowed nodes. and if it allocates page when cpuset clears newly * disallowed ones continuous, it can see the new allowed bits. * * And if setting all new allowed nodes is after the checking, setting * all new allowed nodes and clearing newly disallowed ones will be done * continuous, and the read-side task may find no node to alloc page. */ smp_mb(); /* * Allocation of memory is very fast, we needn't sleep when waiting * for the read-side. */ while (ACCESS_ONCE(tsk->mems_allowed_change_disable)) { task_unlock(tsk); if (!task_curr(tsk)) yield(); goto repeat; } /* * ensure checking ->mems_allowed_change_disable before clearing all new * disallowed nodes. * * if clearing newly disallowed bits before the checking, the read-side * task may find no node to alloc page. */ smp_mb(); mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2); tsk->mems_allowed = *newmems; task_unlock(tsk); } /* * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy * of it to cpuset's new mems_allowed, and migrate pages to new nodes if * memory_migrate flag is set. Called with cgroup_mutex held. */ static void cpuset_change_nodemask(struct task_struct *p, struct cgroup_scanner *scan) { struct mm_struct *mm; struct cpuset *cs; int migrate; const nodemask_t *oldmem = scan->data; static nodemask_t newmems; /* protected by cgroup_mutex */ cs = cgroup_cs(scan->cg); guarantee_online_mems(cs, &newmems); cpuset_change_task_nodemask(p, &newmems); mm = get_task_mm(p); if (!mm) return; migrate = is_memory_migrate(cs); mpol_rebind_mm(mm, &cs->mems_allowed); if (migrate) cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed); mmput(mm); } static void *cpuset_being_rebound; /** * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. * @cs: the cpuset in which each task's mems_allowed mask needs to be changed * @oldmem: old mems_allowed of cpuset cs * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() * * Called with cgroup_mutex held * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 * if @heap != NULL. */ static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem, struct ptr_heap *heap) { struct cgroup_scanner scan; cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ scan.cg = cs->css.cgroup; scan.test_task = NULL; scan.process_task = cpuset_change_nodemask; scan.heap = heap; scan.data = (nodemask_t *)oldmem; /* * The mpol_rebind_mm() call takes mmap_sem, which we couldn't * take while holding tasklist_lock. Forks can happen - the * mpol_dup() cpuset_being_rebound check will catch such forks, * and rebind their vma mempolicies too. Because we still hold * the global cgroup_mutex, we know that no other rebind effort * will be contending for the global variable cpuset_being_rebound. * It's ok if we rebind the same mm twice; mpol_rebind_mm() * is idempotent. Also migrate pages in each mm to new nodes. */ cgroup_scan_tasks(&scan); /* We're done rebinding vmas to this cpuset's new mems_allowed. */ cpuset_being_rebound = NULL; } /* * Handle user request to change the 'mems' memory placement * of a cpuset. Needs to validate the request, update the * cpusets mems_allowed, and for each task in the cpuset, * update mems_allowed and rebind task's mempolicy and any vma * mempolicies and if the cpuset is marked 'memory_migrate', * migrate the tasks pages to the new memory. * * Call with cgroup_mutex held. May take callback_mutex during call. * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, * lock each such tasks mm->mmap_sem, scan its vma's and rebind * their mempolicies to the cpusets new mems_allowed. */ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, const char *buf) { NODEMASK_ALLOC(nodemask_t, oldmem, GFP_KERNEL); int retval; struct ptr_heap heap; if (!oldmem) return -ENOMEM; /* * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; * it's read-only */ if (cs == &top_cpuset) { retval = -EACCES; goto done; } /* * An empty mems_allowed is ok iff there are no tasks in the cpuset. * Since nodelist_parse() fails on an empty mask, we special case * that parsing. The validate_change() call ensures that cpusets * with tasks have memory. */ if (!*buf) { nodes_clear(trialcs->mems_allowed); } else { retval = nodelist_parse(buf, trialcs->mems_allowed); if (retval < 0) goto done; if (!nodes_subset(trialcs->mems_allowed, node_states[N_HIGH_MEMORY])) { retval = -EINVAL; goto done; } } *oldmem = cs->mems_allowed; if (nodes_equal(*oldmem, trialcs->mems_allowed)) { retval = 0; /* Too easy - nothing to do */ goto done; } retval = validate_change(cs, trialcs); if (retval < 0) goto done; retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); if (retval < 0) goto done; mutex_lock(&callback_mutex); cs->mems_allowed = trialcs->mems_allowed; mutex_unlock(&callback_mutex); update_tasks_nodemask(cs, oldmem, &heap); heap_free(&heap); done: NODEMASK_FREE(oldmem); return retval; } int current_cpuset_is_being_rebound(void) { return task_cs(current) == cpuset_being_rebound; } static int update_relax_domain_level(struct cpuset *cs, s64 val) { #ifdef CONFIG_SMP if (val < -1 || val >= sched_domain_level_max) return -EINVAL; #endif if (val != cs->relax_domain_level) { cs->relax_domain_level = val; if (!cpumask_empty(cs->cpus_allowed) && is_sched_load_balance(cs)) async_rebuild_sched_domains(); } return 0; } /* * cpuset_change_flag - make a task's spread flags the same as its cpuset's * @tsk: task to be updated * @scan: struct cgroup_scanner containing the cgroup of the task * * Called by cgroup_scan_tasks() for each task in a cgroup. * * We don't need to re-check for the cgroup/cpuset membership, since we're * holding cgroup_lock() at this point. */ static void cpuset_change_flag(struct task_struct *tsk, struct cgroup_scanner *scan) { cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk); } /* * update_tasks_flags - update the spread flags of tasks in the cpuset. * @cs: the cpuset in which each task's spread flags needs to be changed * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() * * Called with cgroup_mutex held * * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, * calling callback functions for each. * * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 * if @heap != NULL. */ static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap) { struct cgroup_scanner scan; scan.cg = cs->css.cgroup; scan.test_task = NULL; scan.process_task = cpuset_change_flag; scan.heap = heap; cgroup_scan_tasks(&scan); } /* * update_flag - read a 0 or a 1 in a file and update associated flag * bit: the bit to update (see cpuset_flagbits_t) * cs: the cpuset to update * turning_on: whether the flag is being set or cleared * * Call with cgroup_mutex held. */ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on) { struct cpuset *trialcs; int balance_flag_changed; int spread_flag_changed; struct ptr_heap heap; int err; trialcs = alloc_trial_cpuset(cs); if (!trialcs) return -ENOMEM; if (turning_on) set_bit(bit, &trialcs->flags); else clear_bit(bit, &trialcs->flags); err = validate_change(cs, trialcs); if (err < 0) goto out; err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); if (err < 0) goto out; balance_flag_changed = (is_sched_load_balance(cs) != is_sched_load_balance(trialcs)); spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) || (is_spread_page(cs) != is_spread_page(trialcs))); mutex_lock(&callback_mutex); cs->flags = trialcs->flags; mutex_unlock(&callback_mutex); if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) async_rebuild_sched_domains(); if (spread_flag_changed) update_tasks_flags(cs, &heap); heap_free(&heap); out: free_trial_cpuset(trialcs); return err; } /* * Frequency meter - How fast is some event occurring? * * These routines manage a digitally filtered, constant time based, * event frequency meter. There are four routines: * fmeter_init() - initialize a frequency meter. * fmeter_markevent() - called each time the event happens. * fmeter_getrate() - returns the recent rate of such events. * fmeter_update() - internal routine used to update fmeter. * * A common data structure is passed to each of these routines, * which is used to keep track of the state required to manage the * frequency meter and its digital filter. * * The filter works on the number of events marked per unit time. * The filter is single-pole low-pass recursive (IIR). The time unit * is 1 second. Arithmetic is done using 32-bit integers scaled to * simulate 3 decimal digits of precision (multiplied by 1000). * * With an FM_COEF of 933, and a time base of 1 second, the filter * has a half-life of 10 seconds, meaning that if the events quit * happening, then the rate returned from the fmeter_getrate() * will be cut in half each 10 seconds, until it converges to zero. * * It is not worth doing a real infinitely recursive filter. If more * than FM_MAXTICKS ticks have elapsed since the last filter event, * just compute FM_MAXTICKS ticks worth, by which point the level * will be stable. * * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid * arithmetic overflow in the fmeter_update() routine. * * Given the simple 32 bit integer arithmetic used, this meter works * best for reporting rates between one per millisecond (msec) and * one per 32 (approx) seconds. At constant rates faster than one * per msec it maxes out at values just under 1,000,000. At constant * rates between one per msec, and one per second it will stabilize * to a value N*1000, where N is the rate of events per second. * At constant rates between one per second and one per 32 seconds, * it will be choppy, moving up on the seconds that have an event, * and then decaying until the next event. At rates slower than * about one in 32 seconds, it decays all the way back to zero between * each event. */ #define FM_COEF 933 /* coefficient for half-life of 10 secs */ #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */ #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ #define FM_SCALE 1000 /* faux fixed point scale */ /* Initialize a frequency meter */ static void fmeter_init(struct fmeter *fmp) { fmp->cnt = 0; fmp->val = 0; fmp->time = 0; spin_lock_init(&fmp->lock); } /* Internal meter update - process cnt events and update value */ static void fmeter_update(struct fmeter *fmp) { time_t now = get_seconds(); time_t ticks = now - fmp->time; if (ticks == 0) return; ticks = min(FM_MAXTICKS, ticks); while (ticks-- > 0) fmp->val = (FM_COEF * fmp->val) / FM_SCALE; fmp->time = now; fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; fmp->cnt = 0; } /* Process any previous ticks, then bump cnt by one (times scale). */ static void fmeter_markevent(struct fmeter *fmp) { spin_lock(&fmp->lock); fmeter_update(fmp); fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); spin_unlock(&fmp->lock); } /* Process any previous ticks, then return current value. */ static int fmeter_getrate(struct fmeter *fmp) { int val; spin_lock(&fmp->lock); fmeter_update(fmp); val = fmp->val; spin_unlock(&fmp->lock); return val; } /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, struct task_struct *tsk) { struct cpuset *cs = cgroup_cs(cont); if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) return -ENOSPC; /* * Kthreads bound to specific cpus cannot be moved to a new cpuset; we * cannot change their cpu affinity and isolating such threads by their * set of allowed nodes is unnecessary. Thus, cpusets are not * applicable for such threads. This prevents checking for success of * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may * be changed. */ if (tsk->flags & PF_THREAD_BOUND) return -EINVAL; return 0; } static int cpuset_can_attach_task(struct cgroup *cgrp, struct task_struct *task) { return security_task_setscheduler(task); } /* * Protected by cgroup_lock. The nodemasks must be stored globally because * dynamically allocating them is not allowed in pre_attach, and they must * persist among pre_attach, attach_task, and attach. */ static cpumask_var_t cpus_attach; static nodemask_t cpuset_attach_nodemask_from; static nodemask_t cpuset_attach_nodemask_to; /* Set-up work for before attaching each task. */ static void cpuset_pre_attach(struct cgroup *cont) { struct cpuset *cs = cgroup_cs(cont); if (cs == &top_cpuset) cpumask_copy(cpus_attach, cpu_possible_mask); else guarantee_online_cpus(cs, cpus_attach); guarantee_online_mems(cs, &cpuset_attach_nodemask_to); } /* Per-thread attachment work. */ static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk) { int err; struct cpuset *cs = cgroup_cs(cont); /* * can_attach beforehand should guarantee that this doesn't fail. * TODO: have a better way to handle failure here */ err = set_cpus_allowed_ptr(tsk, cpus_attach); WARN_ON_ONCE(err); cpuset_change_task_nodemask(tsk, &cpuset_attach_nodemask_to); cpuset_update_task_spread_flag(cs, tsk); } static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont, struct cgroup *oldcont, struct task_struct *tsk) { struct mm_struct *mm; struct cpuset *cs = cgroup_cs(cont); struct cpuset *oldcs = cgroup_cs(oldcont); /* * Change mm, possibly for multiple threads in a threadgroup. This is * expensive and may sleep. */ cpuset_attach_nodemask_from = oldcs->mems_allowed; cpuset_attach_nodemask_to = cs->mems_allowed; mm = get_task_mm(tsk); if (mm) { mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); if (is_memory_migrate(cs)) cpuset_migrate_mm(mm, &cpuset_attach_nodemask_from, &cpuset_attach_nodemask_to); mmput(mm); } } /* The various types of files and directories in a cpuset file system */ typedef enum { FILE_MEMORY_MIGRATE, FILE_CPULIST, FILE_MEMLIST, FILE_CPU_EXCLUSIVE, FILE_MEM_EXCLUSIVE, FILE_MEM_HARDWALL, FILE_SCHED_LOAD_BALANCE, FILE_SCHED_RELAX_DOMAIN_LEVEL, FILE_MEMORY_PRESSURE_ENABLED, FILE_MEMORY_PRESSURE, FILE_SPREAD_PAGE, FILE_SPREAD_SLAB, } cpuset_filetype_t; static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) { int retval = 0; struct cpuset *cs = cgroup_cs(cgrp); cpuset_filetype_t type = cft->private; if (!cgroup_lock_live_group(cgrp)) return -ENODEV; switch (type) { case FILE_CPU_EXCLUSIVE: retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); break; case FILE_MEM_EXCLUSIVE: retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); break; case FILE_MEM_HARDWALL: retval = update_flag(CS_MEM_HARDWALL, cs, val); break; case FILE_SCHED_LOAD_BALANCE: retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); break; case FILE_MEMORY_MIGRATE: retval = update_flag(CS_MEMORY_MIGRATE, cs, val); break; case FILE_MEMORY_PRESSURE_ENABLED: cpuset_memory_pressure_enabled = !!val; break; case FILE_MEMORY_PRESSURE: retval = -EACCES; break; case FILE_SPREAD_PAGE: retval = update_flag(CS_SPREAD_PAGE, cs, val); break; case FILE_SPREAD_SLAB: retval = update_flag(CS_SPREAD_SLAB, cs, val); break; default: retval = -EINVAL; break; } cgroup_unlock(); return retval; } static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val) { int retval = 0; struct cpuset *cs = cgroup_cs(cgrp); cpuset_filetype_t type = cft->private; if (!cgroup_lock_live_group(cgrp)) return -ENODEV; switch (type) { case FILE_SCHED_RELAX_DOMAIN_LEVEL: retval = update_relax_domain_level(cs, val); break; default: retval = -EINVAL; break; } cgroup_unlock(); return retval; } /* * Common handling for a write to a "cpus" or "mems" file. */ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, const char *buf) { int retval = 0; struct cpuset *cs = cgroup_cs(cgrp); struct cpuset *trialcs; if (!cgroup_lock_live_group(cgrp)) return -ENODEV; trialcs = alloc_trial_cpuset(cs); if (!trialcs) { retval = -ENOMEM; goto out; } switch (cft->private) { case FILE_CPULIST: retval = update_cpumask(cs, trialcs, buf); break; case FILE_MEMLIST: retval = update_nodemask(cs, trialcs, buf); break; default: retval = -EINVAL; break; } free_trial_cpuset(trialcs); out: cgroup_unlock(); return retval; } /* * These ascii lists should be read in a single call, by using a user * buffer large enough to hold the entire map. If read in smaller * chunks, there is no guarantee of atomicity. Since the display format * used, list of ranges of sequential numbers, is variable length, * and since these maps can change value dynamically, one could read * gibberish by doing partial reads while a list was changing. * A single large read to a buffer that crosses a page boundary is * ok, because the result being copied to user land is not recomputed * across a page fault. */ static size_t cpuset_sprintf_cpulist(char *page, struct cpuset *cs) { size_t count; mutex_lock(&callback_mutex); count = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed); mutex_unlock(&callback_mutex); return count; } static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs) { size_t count; mutex_lock(&callback_mutex); count = nodelist_scnprintf(page, PAGE_SIZE, cs->mems_allowed); mutex_unlock(&callback_mutex); return count; } static ssize_t cpuset_common_file_read(struct cgroup *cont, struct cftype *cft, struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct cpuset *cs = cgroup_cs(cont); cpuset_filetype_t type = cft->private; char *page; ssize_t retval = 0; char *s; if (!(page = (char *)__get_free_page(GFP_TEMPORARY))) return -ENOMEM; s = page; switch (type) { case FILE_CPULIST: s += cpuset_sprintf_cpulist(s, cs); break; case FILE_MEMLIST: s += cpuset_sprintf_memlist(s, cs); break; default: retval = -EINVAL; goto out; } *s++ = '\n'; retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page); out: free_page((unsigned long)page); return retval; } static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft) { struct cpuset *cs = cgroup_cs(cont); cpuset_filetype_t type = cft->private; switch (type) { case FILE_CPU_EXCLUSIVE: return is_cpu_exclusive(cs); case FILE_MEM_EXCLUSIVE: return is_mem_exclusive(cs); case FILE_MEM_HARDWALL: return is_mem_hardwall(cs); case FILE_SCHED_LOAD_BALANCE: return is_sched_load_balance(cs); case FILE_MEMORY_MIGRATE: return is_memory_migrate(cs); case FILE_MEMORY_PRESSURE_ENABLED: return cpuset_memory_pressure_enabled; case FILE_MEMORY_PRESSURE: return fmeter_getrate(&cs->fmeter); case FILE_SPREAD_PAGE: return is_spread_page(cs); case FILE_SPREAD_SLAB: return is_spread_slab(cs); default: BUG(); } /* Unreachable but makes gcc happy */ return 0; } static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft) { struct cpuset *cs = cgroup_cs(cont); cpuset_filetype_t type = cft->private; switch (type) { case FILE_SCHED_RELAX_DOMAIN_LEVEL: return cs->relax_domain_level; default: BUG(); } /* Unrechable but makes gcc happy */ return 0; } /* * for the common functions, 'private' gives the type of file */ static struct cftype files[] = { { .name = "cpus", .read = cpuset_common_file_read, .write_string = cpuset_write_resmask, .max_write_len = (100U + 6 * NR_CPUS), .private = FILE_CPULIST, }, { .name = "mems", .read = cpuset_common_file_read, .write_string = cpuset_write_resmask, .max_write_len = (100U + 6 * MAX_NUMNODES), .private = FILE_MEMLIST, }, { .name = "cpu_exclusive", .read_u64 = cpuset_read_u64, .write_u64 = cpuset_write_u64, .private = FILE_CPU_EXCLUSIVE, }, { .name = "mem_exclusive", .read_u64 = cpuset_read_u64, .write_u64 = cpuset_write_u64, .private = FILE_MEM_EXCLUSIVE, }, { .name = "mem_hardwall", .read_u64 = cpuset_read_u64, .write_u64 = cpuset_write_u64, .private = FILE_MEM_HARDWALL, }, { .name = "sched_load_balance", .read_u64 = cpuset_read_u64, .write_u64 = cpuset_write_u64, .private = FILE_SCHED_LOAD_BALANCE, }, { .name = "sched_relax_domain_level", .read_s64 = cpuset_read_s64, .write_s64 = cpuset_write_s64, .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, }, { .name = "memory_migrate", .read_u64 = cpuset_read_u64, .write_u64 = cpuset_write_u64, .private = FILE_MEMORY_MIGRATE, }, { .name = "memory_pressure", .read_u64 = cpuset_read_u64, .write_u64 = cpuset_write_u64, .private = FILE_MEMORY_PRESSURE, .mode = S_IRUGO, }, { .name = "memory_spread_page", .read_u64 = cpuset_read_u64, .write_u64 = cpuset_write_u64, .private = FILE_SPREAD_PAGE, }, { .name = "memory_spread_slab", .read_u64 = cpuset_read_u64, .write_u64 = cpuset_write_u64, .private = FILE_SPREAD_SLAB, }, }; static struct cftype cft_memory_pressure_enabled = { .name = "memory_pressure_enabled", .read_u64 = cpuset_read_u64, .write_u64 = cpuset_write_u64, .private = FILE_MEMORY_PRESSURE_ENABLED, }; static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) { int err; err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files)); if (err) return err; /* memory_pressure_enabled is in root cpuset only */ if (!cont->parent) err = cgroup_add_file(cont, ss, &cft_memory_pressure_enabled); return err; } /* * post_clone() is called during cgroup_create() when the * clone_children mount argument was specified. The cgroup * can not yet have any tasks. * * Currently we refuse to set up the cgroup - thereby * refusing the task to be entered, and as a result refusing * the sys_unshare() or clone() which initiated it - if any * sibling cpusets have exclusive cpus or mem. * * If this becomes a problem for some users who wish to * allow that scenario, then cpuset_post_clone() could be * changed to grant parent->cpus_allowed-sibling_cpus_exclusive * (and likewise for mems) to the new cgroup. Called with cgroup_mutex * held. */ static void cpuset_post_clone(struct cgroup_subsys *ss, struct cgroup *cgroup) { struct cgroup *parent, *child; struct cpuset *cs, *parent_cs; parent = cgroup->parent; list_for_each_entry(child, &parent->children, sibling) { cs = cgroup_cs(child); if (is_mem_exclusive(cs) || is_cpu_exclusive(cs)) return; } cs = cgroup_cs(cgroup); parent_cs = cgroup_cs(parent); mutex_lock(&callback_mutex); cs->mems_allowed = parent_cs->mems_allowed; cpumask_copy(cs->cpus_allowed, parent_cs->cpus_allowed); mutex_unlock(&callback_mutex); return; } /* * cpuset_create - create a cpuset * ss: cpuset cgroup subsystem * cont: control group that the new cpuset will be part of */ static struct cgroup_subsys_state *cpuset_create( struct cgroup_subsys *ss, struct cgroup *cont) { struct cpuset *cs; struct cpuset *parent; if (!cont->parent) { return &top_cpuset.css; } parent = cgroup_cs(cont->parent); cs = kmalloc(sizeof(*cs), GFP_KERNEL); if (!cs) return ERR_PTR(-ENOMEM); if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) { kfree(cs); return ERR_PTR(-ENOMEM); } cs->flags = 0; if (is_spread_page(parent)) set_bit(CS_SPREAD_PAGE, &cs->flags); if (is_spread_slab(parent)) set_bit(CS_SPREAD_SLAB, &cs->flags); set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); cpumask_clear(cs->cpus_allowed); nodes_clear(cs->mems_allowed); fmeter_init(&cs->fmeter); cs->relax_domain_level = -1; cs->parent = parent; number_of_cpusets++; return &cs->css ; } /* * If the cpuset being removed has its flag 'sched_load_balance' * enabled, then simulate turning sched_load_balance off, which * will call async_rebuild_sched_domains(). */ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) { struct cpuset *cs = cgroup_cs(cont); if (is_sched_load_balance(cs)) update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); number_of_cpusets--; free_cpumask_var(cs->cpus_allowed); kfree(cs); } struct cgroup_subsys cpuset_subsys = { .name = "cpuset", .create = cpuset_create, .destroy = cpuset_destroy, .can_attach = cpuset_can_attach, .can_attach_task = cpuset_can_attach_task, .pre_attach = cpuset_pre_attach, .attach_task = cpuset_attach_task, .attach = cpuset_attach, .populate = cpuset_populate, .post_clone = cpuset_post_clone, .subsys_id = cpuset_subsys_id, .early_init = 1, }; /** * cpuset_init - initialize cpusets at system boot * * Description: Initialize top_cpuset and the cpuset internal file system, **/ int __init cpuset_init(void) { int err = 0; if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)) BUG(); cpumask_setall(top_cpuset.cpus_allowed); nodes_setall(top_cpuset.mems_allowed); fmeter_init(&top_cpuset.fmeter); set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); top_cpuset.relax_domain_level = -1; err = register_filesystem(&cpuset_fs_type); if (err < 0) return err; if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)) BUG(); number_of_cpusets = 1; return 0; } /** * cpuset_do_move_task - move a given task to another cpuset * @tsk: pointer to task_struct the task to move * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner * * Called by cgroup_scan_tasks() for each task in a cgroup. * Return nonzero to stop the walk through the tasks. */ static void cpuset_do_move_task(struct task_struct *tsk, struct cgroup_scanner *scan) { struct cgroup *new_cgroup = scan->data; cgroup_attach_task(new_cgroup, tsk); } /** * move_member_tasks_to_cpuset - move tasks from one cpuset to another * @from: cpuset in which the tasks currently reside * @to: cpuset to which the tasks will be moved * * Called with cgroup_mutex held * callback_mutex must not be held, as cpuset_attach() will take it. * * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, * calling callback functions for each. */ static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to) { struct cgroup_scanner scan; scan.cg = from->css.cgroup; scan.test_task = NULL; /* select all tasks in cgroup */ scan.process_task = cpuset_do_move_task; scan.heap = NULL; scan.data = to->css.cgroup; if (cgroup_scan_tasks(&scan)) printk(KERN_ERR "move_member_tasks_to_cpuset: " "cgroup_scan_tasks failed\n"); } /* * If CPU and/or memory hotplug handlers, below, unplug any CPUs * or memory nodes, we need to walk over the cpuset hierarchy, * removing that CPU or node from all cpusets. If this removes the * last CPU or node from a cpuset, then move the tasks in the empty * cpuset to its next-highest non-empty parent. * * Called with cgroup_mutex held * callback_mutex must not be held, as cpuset_attach() will take it. */ static void remove_tasks_in_empty_cpuset(struct cpuset *cs) { struct cpuset *parent; /* * The cgroup's css_sets list is in use if there are tasks * in the cpuset; the list is empty if there are none; * the cs->css.refcnt seems always 0. */ if (list_empty(&cs->css.cgroup->css_sets)) return; /* * Find its next-highest non-empty parent, (top cpuset * has online cpus, so can't be empty). */ parent = cs->parent; while (cpumask_empty(parent->cpus_allowed) || nodes_empty(parent->mems_allowed)) parent = parent->parent; move_member_tasks_to_cpuset(cs, parent); } /* * Walk the specified cpuset subtree and look for empty cpusets. * The tasks of such cpuset must be moved to a parent cpuset. * * Called with cgroup_mutex held. We take callback_mutex to modify * cpus_allowed and mems_allowed. * * This walk processes the tree from top to bottom, completing one layer * before dropping down to the next. It always processes a node before * any of its children. * * For now, since we lack memory hot unplug, we'll never see a cpuset * that has tasks along with an empty 'mems'. But if we did see such * a cpuset, we'd handle it just like we do if its 'cpus' was empty. */ static void scan_for_empty_cpusets(struct cpuset *root) { LIST_HEAD(queue); struct cpuset *cp; /* scans cpusets being updated */ struct cpuset *child; /* scans child cpusets of cp */ struct cgroup *cont; static nodemask_t oldmems; /* protected by cgroup_mutex */ list_add_tail((struct list_head *)&root->stack_list, &queue); while (!list_empty(&queue)) { cp = list_first_entry(&queue, struct cpuset, stack_list); list_del(queue.next); list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { child = cgroup_cs(cont); list_add_tail(&child->stack_list, &queue); } /* Continue past cpusets with all cpus, mems online */ if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) && nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) continue; oldmems = cp->mems_allowed; /* Remove offline cpus and mems from this cpuset. */ mutex_lock(&callback_mutex); cpumask_and(cp->cpus_allowed, cp->cpus_allowed, cpu_active_mask); nodes_and(cp->mems_allowed, cp->mems_allowed, node_states[N_HIGH_MEMORY]); mutex_unlock(&callback_mutex); /* Move tasks from the empty cpuset to a parent */ if (cpumask_empty(cp->cpus_allowed) || nodes_empty(cp->mems_allowed)) remove_tasks_in_empty_cpuset(cp); else { update_tasks_cpumask(cp, NULL); update_tasks_nodemask(cp, &oldmems, NULL); } } } /* * The top_cpuset tracks what CPUs and Memory Nodes are online, * period. This is necessary in order to make cpusets transparent * (of no affect) on systems that are actively using CPU hotplug * but making no active use of cpusets. * * This routine ensures that top_cpuset.cpus_allowed tracks * cpu_active_mask on each CPU hotplug (cpuhp) event. * * Called within get_online_cpus(). Needs to call cgroup_lock() * before calling generate_sched_domains(). */ void cpuset_update_active_cpus(void) { struct sched_domain_attr *attr; cpumask_var_t *doms; int ndoms; cgroup_lock(); mutex_lock(&callback_mutex); cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); mutex_unlock(&callback_mutex); scan_for_empty_cpusets(&top_cpuset); ndoms = generate_sched_domains(&doms, &attr); cgroup_unlock(); /* Have scheduler rebuild the domains */ partition_sched_domains(ndoms, doms, attr); } #ifdef CONFIG_MEMORY_HOTPLUG /* * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY]. * Call this routine anytime after node_states[N_HIGH_MEMORY] changes. * See also the previous routine cpuset_track_online_cpus(). */ static int cpuset_track_online_nodes(struct notifier_block *self, unsigned long action, void *arg) { static nodemask_t oldmems; /* protected by cgroup_mutex */ cgroup_lock(); switch (action) { case MEM_ONLINE: oldmems = top_cpuset.mems_allowed; mutex_lock(&callback_mutex); top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; mutex_unlock(&callback_mutex); update_tasks_nodemask(&top_cpuset, &oldmems, NULL); break; case MEM_OFFLINE: /* * needn't update top_cpuset.mems_allowed explicitly because * scan_for_empty_cpusets() will update it. */ scan_for_empty_cpusets(&top_cpuset); break; default: break; } cgroup_unlock(); return NOTIFY_OK; } #endif /** * cpuset_init_smp - initialize cpus_allowed * * Description: Finish top cpuset after cpu, node maps are initialized **/ void __init cpuset_init_smp(void) { cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; hotplug_memory_notifier(cpuset_track_online_nodes, 10); cpuset_wq = create_singlethread_workqueue("cpuset"); BUG_ON(!cpuset_wq); } /** * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. * * Description: Returns the cpumask_var_t cpus_allowed of the cpuset * attached to the specified @tsk. Guaranteed to return some non-empty * subset of cpu_online_map, even if this means going outside the * tasks cpuset. **/ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) { mutex_lock(&callback_mutex); task_lock(tsk); guarantee_online_cpus(task_cs(tsk), pmask); task_unlock(tsk); mutex_unlock(&callback_mutex); } int cpuset_cpus_allowed_fallback(struct task_struct *tsk) { const struct cpuset *cs; int cpu; rcu_read_lock(); cs = task_cs(tsk); if (cs) do_set_cpus_allowed(tsk, cs->cpus_allowed); rcu_read_unlock(); /* * We own tsk->cpus_allowed, nobody can change it under us. * * But we used cs && cs->cpus_allowed lockless and thus can * race with cgroup_attach_task() or update_cpumask() and get * the wrong tsk->cpus_allowed. However, both cases imply the * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() * which takes task_rq_lock(). * * If we are called after it dropped the lock we must see all * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary * set any mask even if it is not right from task_cs() pov, * the pending set_cpus_allowed_ptr() will fix things. */ cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask); if (cpu >= nr_cpu_ids) { /* * Either tsk->cpus_allowed is wrong (see above) or it * is actually empty. The latter case is only possible * if we are racing with remove_tasks_in_empty_cpuset(). * Like above we can temporary set any mask and rely on * set_cpus_allowed_ptr() as synchronization point. */ do_set_cpus_allowed(tsk, cpu_possible_mask); cpu = cpumask_any(cpu_active_mask); } return cpu; } void cpuset_init_current_mems_allowed(void) { nodes_setall(current->mems_allowed); } /** * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. * * Description: Returns the nodemask_t mems_allowed of the cpuset * attached to the specified @tsk. Guaranteed to return some non-empty * subset of node_states[N_HIGH_MEMORY], even if this means going outside the * tasks cpuset. **/ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) { nodemask_t mask; mutex_lock(&callback_mutex); task_lock(tsk); guarantee_online_mems(task_cs(tsk), &mask); task_unlock(tsk); mutex_unlock(&callback_mutex); return mask; } /** * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed * @nodemask: the nodemask to be checked * * Are any of the nodes in the nodemask allowed in current->mems_allowed? */ int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) { return nodes_intersects(*nodemask, current->mems_allowed); } /* * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or * mem_hardwall ancestor to the specified cpuset. Call holding * callback_mutex. If no ancestor is mem_exclusive or mem_hardwall * (an unusual configuration), then returns the root cpuset. */ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs) { while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent) cs = cs->parent; return cs; } /** * cpuset_node_allowed_softwall - Can we allocate on a memory node? * @node: is this an allowed node? * @gfp_mask: memory allocation flags * * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is * set, yes, we can always allocate. If node is in our task's mems_allowed, * yes. If it's not a __GFP_HARDWALL request and this node is in the nearest * hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE * flag, yes. * Otherwise, no. * * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall() * might sleep, and might allow a node from an enclosing cpuset. * * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall * cpusets, and never sleeps. * * The __GFP_THISNODE placement logic is really handled elsewhere, * by forcibly using a zonelist starting at a specified node, and by * (in get_page_from_freelist()) refusing to consider the zones for * any node on the zonelist except the first. By the time any such * calls get to this routine, we should just shut up and say 'yes'. * * GFP_USER allocations are marked with the __GFP_HARDWALL bit, * and do not allow allocations outside the current tasks cpuset * unless the task has been OOM killed as is marked TIF_MEMDIE. * GFP_KERNEL allocations are not so marked, so can escape to the * nearest enclosing hardwalled ancestor cpuset. * * Scanning up parent cpusets requires callback_mutex. The * __alloc_pages() routine only calls here with __GFP_HARDWALL bit * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the * current tasks mems_allowed came up empty on the first pass over * the zonelist. So only GFP_KERNEL allocations, if all nodes in the * cpuset are short of memory, might require taking the callback_mutex * mutex. * * The first call here from mm/page_alloc:get_page_from_freelist() * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, * so no allocation on a node outside the cpuset is allowed (unless * in interrupt, of course). * * The second pass through get_page_from_freelist() doesn't even call * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set * in alloc_flags. That logic and the checks below have the combined * affect that: * in_interrupt - any node ok (current task context irrelevant) * GFP_ATOMIC - any node ok * TIF_MEMDIE - any node ok * GFP_KERNEL - any node in enclosing hardwalled cpuset ok * GFP_USER - only nodes in current tasks mems allowed ok. * * Rule: * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables * the code that might scan up ancestor cpusets and sleep. */ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) { const struct cpuset *cs; /* current cpuset ancestors */ int allowed; /* is allocation in zone z allowed? */ if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) return 1; might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); if (node_isset(node, current->mems_allowed)) return 1; /* * Allow tasks that have access to memory reserves because they have * been OOM killed to get memory anywhere. */ if (unlikely(test_thread_flag(TIF_MEMDIE))) return 1; if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ return 0; if (current->flags & PF_EXITING) /* Let dying task have memory */ return 1; /* Not hardwall and node outside mems_allowed: scan up cpusets */ mutex_lock(&callback_mutex); task_lock(current); cs = nearest_hardwall_ancestor(task_cs(current)); task_unlock(current); allowed = node_isset(node, cs->mems_allowed); mutex_unlock(&callback_mutex); return allowed; } /* * cpuset_node_allowed_hardwall - Can we allocate on a memory node? * @node: is this an allowed node? * @gfp_mask: memory allocation flags * * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is * set, yes, we can always allocate. If node is in our task's mems_allowed, * yes. If the task has been OOM killed and has access to memory reserves as * specified by the TIF_MEMDIE flag, yes. * Otherwise, no. * * The __GFP_THISNODE placement logic is really handled elsewhere, * by forcibly using a zonelist starting at a specified node, and by * (in get_page_from_freelist()) refusing to consider the zones for * any node on the zonelist except the first. By the time any such * calls get to this routine, we should just shut up and say 'yes'. * * Unlike the cpuset_node_allowed_softwall() variant, above, * this variant requires that the node be in the current task's * mems_allowed or that we're in interrupt. It does not scan up the * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. * It never sleeps. */ int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) { if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) return 1; if (node_isset(node, current->mems_allowed)) return 1; /* * Allow tasks that have access to memory reserves because they have * been OOM killed to get memory anywhere. */ if (unlikely(test_thread_flag(TIF_MEMDIE))) return 1; return 0; } /** * cpuset_unlock - release lock on cpuset changes * * Undo the lock taken in a previous cpuset_lock() call. */ void cpuset_unlock(void) { mutex_unlock(&callback_mutex); } /** * cpuset_mem_spread_node() - On which node to begin search for a file page * cpuset_slab_spread_node() - On which node to begin search for a slab page * * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for * tasks in a cpuset with is_spread_page or is_spread_slab set), * and if the memory allocation used cpuset_mem_spread_node() * to determine on which node to start looking, as it will for * certain page cache or slab cache pages such as used for file * system buffers and inode caches, then instead of starting on the * local node to look for a free page, rather spread the starting * node around the tasks mems_allowed nodes. * * We don't have to worry about the returned node being offline * because "it can't happen", and even if it did, it would be ok. * * The routines calling guarantee_online_mems() are careful to * only set nodes in task->mems_allowed that are online. So it * should not be possible for the following code to return an * offline node. But if it did, that would be ok, as this routine * is not returning the node where the allocation must be, only * the node where the search should start. The zonelist passed to * __alloc_pages() will include all nodes. If the slab allocator * is passed an offline node, it will fall back to the local node. * See kmem_cache_alloc_node(). */ static int cpuset_spread_node(int *rotor) { int node; node = next_node(*rotor, current->mems_allowed); if (node == MAX_NUMNODES) node = first_node(current->mems_allowed); *rotor = node; return node; } int cpuset_mem_spread_node(void) { return cpuset_spread_node(&current->cpuset_mem_spread_rotor); } int cpuset_slab_spread_node(void) { return cpuset_spread_node(&current->cpuset_slab_spread_rotor); } EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); /** * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? * @tsk1: pointer to task_struct of some task. * @tsk2: pointer to task_struct of some other task. * * Description: Return true if @tsk1's mems_allowed intersects the * mems_allowed of @tsk2. Used by the OOM killer to determine if * one of the task's memory usage might impact the memory available * to the other. **/ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, const struct task_struct *tsk2) { return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); } /** * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed * @task: pointer to task_struct of some task. * * Description: Prints @task's name, cpuset name, and cached copy of its * mems_allowed to the kernel log. Must hold task_lock(task) to allow * dereferencing task_cs(task). */ void cpuset_print_task_mems_allowed(struct task_struct *tsk) { struct dentry *dentry; dentry = task_cs(tsk)->css.cgroup->dentry; spin_lock(&cpuset_buffer_lock); snprintf(cpuset_name, CPUSET_NAME_LEN, dentry ? (const char *)dentry->d_name.name : "/"); nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, tsk->mems_allowed); printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n", tsk->comm, cpuset_name, cpuset_nodelist); spin_unlock(&cpuset_buffer_lock); } /* * Collection of memory_pressure is suppressed unless * this flag is enabled by writing "1" to the special * cpuset file 'memory_pressure_enabled' in the root cpuset. */ int cpuset_memory_pressure_enabled __read_mostly; /** * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. * * Keep a running average of the rate of synchronous (direct) * page reclaim efforts initiated by tasks in each cpuset. * * This represents the rate at which some task in the cpuset * ran low on memory on all nodes it was allowed to use, and * had to enter the kernels page reclaim code in an effort to * create more free memory by tossing clean pages or swapping * or writing dirty pages. * * Display to user space in the per-cpuset read-only file * "memory_pressure". Value displayed is an integer * representing the recent rate of entry into the synchronous * (direct) page reclaim by any task attached to the cpuset. **/ void __cpuset_memory_pressure_bump(void) { task_lock(current); fmeter_markevent(&task_cs(current)->fmeter); task_unlock(current); } #ifdef CONFIG_PROC_PID_CPUSET /* * proc_cpuset_show() * - Print tasks cpuset path into seq_file. * - Used for /proc/<pid>/cpuset. * - No need to task_lock(tsk) on this tsk->cpuset reference, as it * doesn't really matter if tsk->cpuset changes after we read it, * and we take cgroup_mutex, keeping cpuset_attach() from changing it * anyway. */ static int proc_cpuset_show(struct seq_file *m, void *unused_v) { struct pid *pid; struct task_struct *tsk; char *buf; struct cgroup_subsys_state *css; int retval; retval = -ENOMEM; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) goto out; retval = -ESRCH; pid = m->private; tsk = get_pid_task(pid, PIDTYPE_PID); if (!tsk) goto out_free; retval = -EINVAL; cgroup_lock(); css = task_subsys_state(tsk, cpuset_subsys_id); retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); if (retval < 0) goto out_unlock; seq_puts(m, buf); seq_putc(m, '\n'); out_unlock: cgroup_unlock(); put_task_struct(tsk); out_free: kfree(buf); out: return retval; } static int cpuset_open(struct inode *inode, struct file *file) { struct pid *pid = PROC_I(inode)->pid; return single_open(file, proc_cpuset_show, pid); } const struct file_operations proc_cpuset_operations = { .open = cpuset_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* CONFIG_PROC_PID_CPUSET */ /* Display task mems_allowed in /proc/<pid>/status file. */ void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) { seq_printf(m, "Mems_allowed:\t"); seq_nodemask(m, &task->mems_allowed); seq_printf(m, "\n"); seq_printf(m, "Mems_allowed_list:\t"); seq_nodemask_list(m, &task->mems_allowed); seq_printf(m, "\n"); }
gpl-2.0
ChiefzReloaded/lge-kernel-startablet-new
arch/x86/kernel/uv_irq.c
882
7291
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * SGI UV IRQ functions * * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. */ #include <linux/module.h> #include <linux/rbtree.h> #include <linux/slab.h> #include <linux/irq.h> #include <asm/apic.h> #include <asm/uv/uv_irq.h> #include <asm/uv/uv_hub.h> /* MMR offset and pnode of hub sourcing interrupts for a given irq */ struct uv_irq_2_mmr_pnode{ struct rb_node list; unsigned long offset; int pnode; int irq; }; static spinlock_t uv_irq_lock; static struct rb_root uv_irq_root; static int uv_set_irq_affinity(unsigned int, const struct cpumask *); static void uv_noop(unsigned int irq) { } static unsigned int uv_noop_ret(unsigned int irq) { return 0; } static void uv_ack_apic(unsigned int irq) { ack_APIC_irq(); } static struct irq_chip uv_irq_chip = { .name = "UV-CORE", .startup = uv_noop_ret, .shutdown = uv_noop, .enable = uv_noop, .disable = uv_noop, .ack = uv_noop, .mask = uv_noop, .unmask = uv_noop, .eoi = uv_ack_apic, .end = uv_noop, .set_affinity = uv_set_irq_affinity, }; /* * Add offset and pnode information of the hub sourcing interrupts to the * rb tree for a specific irq. */ static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade) { struct rb_node **link = &uv_irq_root.rb_node; struct rb_node *parent = NULL; struct uv_irq_2_mmr_pnode *n; struct uv_irq_2_mmr_pnode *e; unsigned long irqflags; n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL, uv_blade_to_memory_nid(blade)); if (!n) return -ENOMEM; n->irq = irq; n->offset = offset; n->pnode = uv_blade_to_pnode(blade); spin_lock_irqsave(&uv_irq_lock, irqflags); /* Find the right place in the rbtree: */ while (*link) { parent = *link; e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list); if (unlikely(irq == e->irq)) { /* irq entry exists */ e->pnode = uv_blade_to_pnode(blade); e->offset = offset; spin_unlock_irqrestore(&uv_irq_lock, irqflags); kfree(n); return 0; } if (irq < e->irq) link = &(*link)->rb_left; else link = &(*link)->rb_right; } /* Insert the node into the rbtree. */ rb_link_node(&n->list, parent, link); rb_insert_color(&n->list, &uv_irq_root); spin_unlock_irqrestore(&uv_irq_lock, irqflags); return 0; } /* Retrieve offset and pnode information from the rb tree for a specific irq */ int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode) { struct uv_irq_2_mmr_pnode *e; struct rb_node *n; unsigned long irqflags; spin_lock_irqsave(&uv_irq_lock, irqflags); n = uv_irq_root.rb_node; while (n) { e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); if (e->irq == irq) { *offset = e->offset; *pnode = e->pnode; spin_unlock_irqrestore(&uv_irq_lock, irqflags); return 0; } if (irq < e->irq) n = n->rb_left; else n = n->rb_right; } spin_unlock_irqrestore(&uv_irq_lock, irqflags); return -1; } /* * Re-target the irq to the specified CPU and enable the specified MMR located * on the specified blade to allow the sending of MSIs to the specified CPU. */ static int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { const struct cpumask *eligible_cpu = cpumask_of(cpu); struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; int mmr_pnode; unsigned long mmr_value; struct uv_IO_APIC_route_entry *entry; int err; BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); cfg = irq_cfg(irq); err = assign_irq_vector(irq, cfg, eligible_cpu); if (err != 0) return err; if (limit == UV_AFFINITY_CPU) desc->status |= IRQ_NO_BALANCING; else desc->status |= IRQ_MOVE_PCNTXT; set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, irq_name); mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; entry->vector = cfg->vector; entry->delivery_mode = apic->irq_delivery_mode; entry->dest_mode = apic->irq_dest_mode; entry->polarity = 0; entry->trigger = 0; entry->mask = 0; entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); mmr_pnode = uv_blade_to_pnode(mmr_blade); uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); if (cfg->move_in_progress) send_cleanup_vector(cfg); return irq; } /* * Disable the specified MMR located on the specified blade so that MSIs are * longer allowed to be sent. */ static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) { unsigned long mmr_value; struct uv_IO_APIC_route_entry *entry; BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; entry->mask = 1; uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); } static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg = desc->chip_data; unsigned int dest; unsigned long mmr_value; struct uv_IO_APIC_route_entry *entry; unsigned long mmr_offset; int mmr_pnode; if (set_desc_affinity(desc, mask, &dest)) return -1; mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; entry->vector = cfg->vector; entry->delivery_mode = apic->irq_delivery_mode; entry->dest_mode = apic->irq_dest_mode; entry->polarity = 0; entry->trigger = 0; entry->mask = 0; entry->dest = dest; /* Get previously stored MMR and pnode of hub sourcing interrupts */ if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode)) return -1; uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); if (cfg->move_in_progress) send_cleanup_vector(cfg); return 0; } /* * Set up a mapping of an available irq and vector, and enable the specified * MMR that defines the MSI that is to be sent to the specified CPU when an * interrupt is raised. */ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { int irq, ret; irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade)); if (irq <= 0) return -EBUSY; ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset, limit); if (ret == irq) uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade); else destroy_irq(irq); return ret; } EXPORT_SYMBOL_GPL(uv_setup_irq); /* * Tear down a mapping of an irq and vector, and disable the specified MMR that * defined the MSI that was to be sent to the specified CPU when an interrupt * was raised. * * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq(). */ void uv_teardown_irq(unsigned int irq) { struct uv_irq_2_mmr_pnode *e; struct rb_node *n; unsigned long irqflags; spin_lock_irqsave(&uv_irq_lock, irqflags); n = uv_irq_root.rb_node; while (n) { e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); if (e->irq == irq) { arch_disable_uv_irq(e->pnode, e->offset); rb_erase(n, &uv_irq_root); kfree(e); break; } if (irq < e->irq) n = n->rb_left; else n = n->rb_right; } spin_unlock_irqrestore(&uv_irq_lock, irqflags); destroy_irq(irq); } EXPORT_SYMBOL_GPL(uv_teardown_irq);
gpl-2.0
singleman/linux
tools/testing/selftests/timers/threadtest.c
1138
4481
/* threadtest.c * by: john stultz (johnstul@us.ibm.com) * (C) Copyright IBM 2004, 2005, 2006, 2012 * Licensed under the GPLv2 * * To build: * $ gcc threadtest.c -o threadtest -lrt * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <sys/time.h> #include <pthread.h> #ifdef KTEST #include "../kselftest.h" #else static inline int ksft_exit_pass(void) { exit(0); } static inline int ksft_exit_fail(void) { exit(1); } #endif /* serializes shared list access */ pthread_mutex_t list_lock = PTHREAD_MUTEX_INITIALIZER; /* serializes console output */ pthread_mutex_t print_lock = PTHREAD_MUTEX_INITIALIZER; #define MAX_THREADS 128 #define LISTSIZE 128 int done = 0; struct timespec global_list[LISTSIZE]; int listcount = 0; void checklist(struct timespec *list, int size) { int i, j; struct timespec *a, *b; /* scan the list */ for (i = 0; i < size-1; i++) { a = &list[i]; b = &list[i+1]; /* look for any time inconsistencies */ if ((b->tv_sec <= a->tv_sec) && (b->tv_nsec < a->tv_nsec)) { /* flag other threads */ done = 1; /*serialize printing to avoid junky output*/ pthread_mutex_lock(&print_lock); /* dump the list */ printf("\n"); for (j = 0; j < size; j++) { if (j == i) printf("---------------\n"); printf("%lu:%lu\n", list[j].tv_sec, list[j].tv_nsec); if (j == i+1) printf("---------------\n"); } printf("[FAILED]\n"); pthread_mutex_unlock(&print_lock); } } } /* The shared thread shares a global list * that each thread fills while holding the lock. * This stresses clock syncronization across cpus. */ void *shared_thread(void *arg) { while (!done) { /* protect the list */ pthread_mutex_lock(&list_lock); /* see if we're ready to check the list */ if (listcount >= LISTSIZE) { checklist(global_list, LISTSIZE); listcount = 0; } clock_gettime(CLOCK_MONOTONIC, &global_list[listcount++]); pthread_mutex_unlock(&list_lock); } return NULL; } /* Each independent thread fills in its own * list. This stresses clock_gettime() lock contention. */ void *independent_thread(void *arg) { struct timespec my_list[LISTSIZE]; int count; while (!done) { /* fill the list */ for (count = 0; count < LISTSIZE; count++) clock_gettime(CLOCK_MONOTONIC, &my_list[count]); checklist(my_list, LISTSIZE); } return NULL; } #define DEFAULT_THREAD_COUNT 8 #define DEFAULT_RUNTIME 30 int main(int argc, char **argv) { int thread_count, i; time_t start, now, runtime; char buf[255]; pthread_t pth[MAX_THREADS]; int opt; void *tret; int ret = 0; void *(*thread)(void *) = shared_thread; thread_count = DEFAULT_THREAD_COUNT; runtime = DEFAULT_RUNTIME; /* Process arguments */ while ((opt = getopt(argc, argv, "t:n:i")) != -1) { switch (opt) { case 't': runtime = atoi(optarg); break; case 'n': thread_count = atoi(optarg); break; case 'i': thread = independent_thread; printf("using independent threads\n"); break; default: printf("Usage: %s [-t <secs>] [-n <numthreads>] [-i]\n", argv[0]); printf(" -t: time to run\n"); printf(" -n: number of threads\n"); printf(" -i: use independent threads\n"); return -1; } } if (thread_count > MAX_THREADS) thread_count = MAX_THREADS; setbuf(stdout, NULL); start = time(0); strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&start)); printf("%s\n", buf); printf("Testing consistency with %i threads for %ld seconds: ", thread_count, runtime); /* spawn */ for (i = 0; i < thread_count; i++) pthread_create(&pth[i], 0, thread, 0); while (time(&now) < start + runtime) { sleep(1); if (done) { ret = 1; strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&now)); printf("%s\n", buf); goto out; } } printf("[OK]\n"); done = 1; out: /* wait */ for (i = 0; i < thread_count; i++) pthread_join(pth[i], &tret); /* die */ if (ret) ksft_exit_fail(); return ksft_exit_pass(); }
gpl-2.0
abeobk/sam9x35
drivers/ssb/driver_gpio.c
1394
12059
/* * Sonics Silicon Backplane * GPIO driver * * Copyright 2011, Broadcom Corporation * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de> * * Licensed under the GNU/GPL. See COPYING for details. */ #include <linux/gpio.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/export.h> #include <linux/ssb/ssb.h> #include "ssb_private.h" /************************************************** * Shared **************************************************/ static struct ssb_bus *ssb_gpio_get_bus(struct gpio_chip *chip) { return container_of(chip, struct ssb_bus, gpio); } #if IS_ENABLED(CONFIG_SSB_EMBEDDED) static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); if (bus->bustype == SSB_BUSTYPE_SSB) return irq_find_mapping(bus->irq_domain, gpio); else return -EINVAL; } #endif /************************************************** * ChipCommon **************************************************/ static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); return !!ssb_chipco_gpio_in(&bus->chipco, 1 << gpio); } static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio, int value) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0); } static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 0); return 0; } static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 1 << gpio); ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0); return 0; } static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); ssb_chipco_gpio_control(&bus->chipco, 1 << gpio, 0); /* clear pulldown */ ssb_chipco_gpio_pulldown(&bus->chipco, 1 << gpio, 0); /* Set pullup */ ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 1 << gpio); return 0; } static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); /* clear pullup */ ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 0); } #if IS_ENABLED(CONFIG_SSB_EMBEDDED) static void ssb_gpio_irq_chipco_mask(struct irq_data *d) { struct ssb_bus *bus = irq_data_get_irq_chip_data(d); int gpio = irqd_to_hwirq(d); ssb_chipco_gpio_intmask(&bus->chipco, BIT(gpio), 0); } static void ssb_gpio_irq_chipco_unmask(struct irq_data *d) { struct ssb_bus *bus = irq_data_get_irq_chip_data(d); int gpio = irqd_to_hwirq(d); u32 val = ssb_chipco_gpio_in(&bus->chipco, BIT(gpio)); ssb_chipco_gpio_polarity(&bus->chipco, BIT(gpio), val); ssb_chipco_gpio_intmask(&bus->chipco, BIT(gpio), BIT(gpio)); } static struct irq_chip ssb_gpio_irq_chipco_chip = { .name = "SSB-GPIO-CC", .irq_mask = ssb_gpio_irq_chipco_mask, .irq_unmask = ssb_gpio_irq_chipco_unmask, }; static irqreturn_t ssb_gpio_irq_chipco_handler(int irq, void *dev_id) { struct ssb_bus *bus = dev_id; struct ssb_chipcommon *chipco = &bus->chipco; u32 val = chipco_read32(chipco, SSB_CHIPCO_GPIOIN); u32 mask = chipco_read32(chipco, SSB_CHIPCO_GPIOIRQ); u32 pol = chipco_read32(chipco, SSB_CHIPCO_GPIOPOL); unsigned long irqs = (val ^ pol) & mask; int gpio; if (!irqs) return IRQ_NONE; for_each_set_bit(gpio, &irqs, bus->gpio.ngpio) generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio)); ssb_chipco_gpio_polarity(chipco, irqs, val & irqs); return IRQ_HANDLED; } static int ssb_gpio_irq_chipco_domain_init(struct ssb_bus *bus) { struct ssb_chipcommon *chipco = &bus->chipco; struct gpio_chip *chip = &bus->gpio; int gpio, hwirq, err; if (bus->bustype != SSB_BUSTYPE_SSB) return 0; bus->irq_domain = irq_domain_add_linear(NULL, chip->ngpio, &irq_domain_simple_ops, chipco); if (!bus->irq_domain) { err = -ENODEV; goto err_irq_domain; } for (gpio = 0; gpio < chip->ngpio; gpio++) { int irq = irq_create_mapping(bus->irq_domain, gpio); irq_set_chip_data(irq, bus); irq_set_chip_and_handler(irq, &ssb_gpio_irq_chipco_chip, handle_simple_irq); } hwirq = ssb_mips_irq(bus->chipco.dev) + 2; err = request_irq(hwirq, ssb_gpio_irq_chipco_handler, IRQF_SHARED, "gpio", bus); if (err) goto err_req_irq; ssb_chipco_gpio_intmask(&bus->chipco, ~0, 0); chipco_set32(chipco, SSB_CHIPCO_IRQMASK, SSB_CHIPCO_IRQ_GPIO); return 0; err_req_irq: for (gpio = 0; gpio < chip->ngpio; gpio++) { int irq = irq_find_mapping(bus->irq_domain, gpio); irq_dispose_mapping(irq); } irq_domain_remove(bus->irq_domain); err_irq_domain: return err; } static void ssb_gpio_irq_chipco_domain_exit(struct ssb_bus *bus) { struct ssb_chipcommon *chipco = &bus->chipco; struct gpio_chip *chip = &bus->gpio; int gpio; if (bus->bustype != SSB_BUSTYPE_SSB) return; chipco_mask32(chipco, SSB_CHIPCO_IRQMASK, ~SSB_CHIPCO_IRQ_GPIO); free_irq(ssb_mips_irq(bus->chipco.dev) + 2, chipco); for (gpio = 0; gpio < chip->ngpio; gpio++) { int irq = irq_find_mapping(bus->irq_domain, gpio); irq_dispose_mapping(irq); } irq_domain_remove(bus->irq_domain); } #else static int ssb_gpio_irq_chipco_domain_init(struct ssb_bus *bus) { return 0; } static void ssb_gpio_irq_chipco_domain_exit(struct ssb_bus *bus) { } #endif static int ssb_gpio_chipco_init(struct ssb_bus *bus) { struct gpio_chip *chip = &bus->gpio; int err; chip->label = "ssb_chipco_gpio"; chip->owner = THIS_MODULE; chip->request = ssb_gpio_chipco_request; chip->free = ssb_gpio_chipco_free; chip->get = ssb_gpio_chipco_get_value; chip->set = ssb_gpio_chipco_set_value; chip->direction_input = ssb_gpio_chipco_direction_input; chip->direction_output = ssb_gpio_chipco_direction_output; #if IS_ENABLED(CONFIG_SSB_EMBEDDED) chip->to_irq = ssb_gpio_to_irq; #endif chip->ngpio = 16; /* There is just one SoC in one device and its GPIO addresses should be * deterministic to address them more easily. The other buses could get * a random base number. */ if (bus->bustype == SSB_BUSTYPE_SSB) chip->base = 0; else chip->base = -1; err = ssb_gpio_irq_chipco_domain_init(bus); if (err) return err; err = gpiochip_add(chip); if (err) { ssb_gpio_irq_chipco_domain_exit(bus); return err; } return 0; } /************************************************** * EXTIF **************************************************/ #ifdef CONFIG_SSB_DRIVER_EXTIF static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); return !!ssb_extif_gpio_in(&bus->extif, 1 << gpio); } static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio, int value) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0); } static int ssb_gpio_extif_direction_input(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 0); return 0; } static int ssb_gpio_extif_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 1 << gpio); ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0); return 0; } #if IS_ENABLED(CONFIG_SSB_EMBEDDED) static void ssb_gpio_irq_extif_mask(struct irq_data *d) { struct ssb_bus *bus = irq_data_get_irq_chip_data(d); int gpio = irqd_to_hwirq(d); ssb_extif_gpio_intmask(&bus->extif, BIT(gpio), 0); } static void ssb_gpio_irq_extif_unmask(struct irq_data *d) { struct ssb_bus *bus = irq_data_get_irq_chip_data(d); int gpio = irqd_to_hwirq(d); u32 val = ssb_extif_gpio_in(&bus->extif, BIT(gpio)); ssb_extif_gpio_polarity(&bus->extif, BIT(gpio), val); ssb_extif_gpio_intmask(&bus->extif, BIT(gpio), BIT(gpio)); } static struct irq_chip ssb_gpio_irq_extif_chip = { .name = "SSB-GPIO-EXTIF", .irq_mask = ssb_gpio_irq_extif_mask, .irq_unmask = ssb_gpio_irq_extif_unmask, }; static irqreturn_t ssb_gpio_irq_extif_handler(int irq, void *dev_id) { struct ssb_bus *bus = dev_id; struct ssb_extif *extif = &bus->extif; u32 val = ssb_read32(extif->dev, SSB_EXTIF_GPIO_IN); u32 mask = ssb_read32(extif->dev, SSB_EXTIF_GPIO_INTMASK); u32 pol = ssb_read32(extif->dev, SSB_EXTIF_GPIO_INTPOL); unsigned long irqs = (val ^ pol) & mask; int gpio; if (!irqs) return IRQ_NONE; for_each_set_bit(gpio, &irqs, bus->gpio.ngpio) generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio)); ssb_extif_gpio_polarity(extif, irqs, val & irqs); return IRQ_HANDLED; } static int ssb_gpio_irq_extif_domain_init(struct ssb_bus *bus) { struct ssb_extif *extif = &bus->extif; struct gpio_chip *chip = &bus->gpio; int gpio, hwirq, err; if (bus->bustype != SSB_BUSTYPE_SSB) return 0; bus->irq_domain = irq_domain_add_linear(NULL, chip->ngpio, &irq_domain_simple_ops, extif); if (!bus->irq_domain) { err = -ENODEV; goto err_irq_domain; } for (gpio = 0; gpio < chip->ngpio; gpio++) { int irq = irq_create_mapping(bus->irq_domain, gpio); irq_set_chip_data(irq, bus); irq_set_chip_and_handler(irq, &ssb_gpio_irq_extif_chip, handle_simple_irq); } hwirq = ssb_mips_irq(bus->extif.dev) + 2; err = request_irq(hwirq, ssb_gpio_irq_extif_handler, IRQF_SHARED, "gpio", bus); if (err) goto err_req_irq; ssb_extif_gpio_intmask(&bus->extif, ~0, 0); return 0; err_req_irq: for (gpio = 0; gpio < chip->ngpio; gpio++) { int irq = irq_find_mapping(bus->irq_domain, gpio); irq_dispose_mapping(irq); } irq_domain_remove(bus->irq_domain); err_irq_domain: return err; } static void ssb_gpio_irq_extif_domain_exit(struct ssb_bus *bus) { struct ssb_extif *extif = &bus->extif; struct gpio_chip *chip = &bus->gpio; int gpio; if (bus->bustype != SSB_BUSTYPE_SSB) return; free_irq(ssb_mips_irq(bus->extif.dev) + 2, extif); for (gpio = 0; gpio < chip->ngpio; gpio++) { int irq = irq_find_mapping(bus->irq_domain, gpio); irq_dispose_mapping(irq); } irq_domain_remove(bus->irq_domain); } #else static int ssb_gpio_irq_extif_domain_init(struct ssb_bus *bus) { return 0; } static void ssb_gpio_irq_extif_domain_exit(struct ssb_bus *bus) { } #endif static int ssb_gpio_extif_init(struct ssb_bus *bus) { struct gpio_chip *chip = &bus->gpio; int err; chip->label = "ssb_extif_gpio"; chip->owner = THIS_MODULE; chip->get = ssb_gpio_extif_get_value; chip->set = ssb_gpio_extif_set_value; chip->direction_input = ssb_gpio_extif_direction_input; chip->direction_output = ssb_gpio_extif_direction_output; #if IS_ENABLED(CONFIG_SSB_EMBEDDED) chip->to_irq = ssb_gpio_to_irq; #endif chip->ngpio = 5; /* There is just one SoC in one device and its GPIO addresses should be * deterministic to address them more easily. The other buses could get * a random base number. */ if (bus->bustype == SSB_BUSTYPE_SSB) chip->base = 0; else chip->base = -1; err = ssb_gpio_irq_extif_domain_init(bus); if (err) return err; err = gpiochip_add(chip); if (err) { ssb_gpio_irq_extif_domain_exit(bus); return err; } return 0; } #else static int ssb_gpio_extif_init(struct ssb_bus *bus) { return -ENOTSUPP; } #endif /************************************************** * Init **************************************************/ int ssb_gpio_init(struct ssb_bus *bus) { if (ssb_chipco_available(&bus->chipco)) return ssb_gpio_chipco_init(bus); else if (ssb_extif_available(&bus->extif)) return ssb_gpio_extif_init(bus); else SSB_WARN_ON(1); return -1; } int ssb_gpio_unregister(struct ssb_bus *bus) { if (ssb_chipco_available(&bus->chipco) || ssb_extif_available(&bus->extif)) { gpiochip_remove(&bus->gpio); return 0; } else { SSB_WARN_ON(1); } return -1; }
gpl-2.0
giorgio130/linux-2.6.35-kobo-multitouch
arch/mips/txx9/rbtx4939/irq.c
1650
2259
/* * Toshiba RBTX4939 interrupt routines * Based on linux/arch/mips/txx9/rbtx4938/irq.c, * and RBTX49xx patch from CELF patch archive. * * Copyright (C) 2000-2001,2005-2006 Toshiba Corporation * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the * terms of the GNU General Public License version 2. This program is * licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/init.h> #include <linux/interrupt.h> #include <asm/mipsregs.h> #include <asm/txx9/rbtx4939.h> /* * RBTX4939 IOC controller definition */ static void rbtx4939_ioc_irq_unmask(unsigned int irq) { int ioc_nr = irq - RBTX4939_IRQ_IOC; writeb(readb(rbtx4939_ien_addr) | (1 << ioc_nr), rbtx4939_ien_addr); } static void rbtx4939_ioc_irq_mask(unsigned int irq) { int ioc_nr = irq - RBTX4939_IRQ_IOC; writeb(readb(rbtx4939_ien_addr) & ~(1 << ioc_nr), rbtx4939_ien_addr); mmiowb(); } static struct irq_chip rbtx4939_ioc_irq_chip = { .name = "IOC", .ack = rbtx4939_ioc_irq_mask, .mask = rbtx4939_ioc_irq_mask, .mask_ack = rbtx4939_ioc_irq_mask, .unmask = rbtx4939_ioc_irq_unmask, }; static inline int rbtx4939_ioc_irqroute(void) { unsigned char istat = readb(rbtx4939_ifac2_addr); if (unlikely(istat == 0)) return -1; return RBTX4939_IRQ_IOC + __fls8(istat); } static int rbtx4939_irq_dispatch(int pending) { int irq; if (pending & CAUSEF_IP7) return MIPS_CPU_IRQ_BASE + 7; irq = tx4939_irq(); if (likely(irq >= 0)) { /* redirect IOC interrupts */ switch (irq) { case RBTX4939_IRQ_IOCINT: irq = rbtx4939_ioc_irqroute(); break; } } else if (pending & CAUSEF_IP0) irq = MIPS_CPU_IRQ_BASE + 0; else if (pending & CAUSEF_IP1) irq = MIPS_CPU_IRQ_BASE + 1; else irq = -1; return irq; } void __init rbtx4939_irq_setup(void) { int i; /* mask all IOC interrupts */ writeb(0, rbtx4939_ien_addr); /* clear SoftInt interrupts */ writeb(0, rbtx4939_softint_addr); txx9_irq_dispatch = rbtx4939_irq_dispatch; tx4939_irq_init(); for (i = RBTX4939_IRQ_IOC; i < RBTX4939_IRQ_IOC + RBTX4939_NR_IRQ_IOC; i++) set_irq_chip_and_handler(i, &rbtx4939_ioc_irq_chip, handle_level_irq); set_irq_chained_handler(RBTX4939_IRQ_IOCINT, handle_simple_irq); }
gpl-2.0
robacklin/ts4700
arch/mips/txx9/rbtx4939/irq.c
1650
2259
/* * Toshiba RBTX4939 interrupt routines * Based on linux/arch/mips/txx9/rbtx4938/irq.c, * and RBTX49xx patch from CELF patch archive. * * Copyright (C) 2000-2001,2005-2006 Toshiba Corporation * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the * terms of the GNU General Public License version 2. This program is * licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/init.h> #include <linux/interrupt.h> #include <asm/mipsregs.h> #include <asm/txx9/rbtx4939.h> /* * RBTX4939 IOC controller definition */ static void rbtx4939_ioc_irq_unmask(unsigned int irq) { int ioc_nr = irq - RBTX4939_IRQ_IOC; writeb(readb(rbtx4939_ien_addr) | (1 << ioc_nr), rbtx4939_ien_addr); } static void rbtx4939_ioc_irq_mask(unsigned int irq) { int ioc_nr = irq - RBTX4939_IRQ_IOC; writeb(readb(rbtx4939_ien_addr) & ~(1 << ioc_nr), rbtx4939_ien_addr); mmiowb(); } static struct irq_chip rbtx4939_ioc_irq_chip = { .name = "IOC", .ack = rbtx4939_ioc_irq_mask, .mask = rbtx4939_ioc_irq_mask, .mask_ack = rbtx4939_ioc_irq_mask, .unmask = rbtx4939_ioc_irq_unmask, }; static inline int rbtx4939_ioc_irqroute(void) { unsigned char istat = readb(rbtx4939_ifac2_addr); if (unlikely(istat == 0)) return -1; return RBTX4939_IRQ_IOC + __fls8(istat); } static int rbtx4939_irq_dispatch(int pending) { int irq; if (pending & CAUSEF_IP7) return MIPS_CPU_IRQ_BASE + 7; irq = tx4939_irq(); if (likely(irq >= 0)) { /* redirect IOC interrupts */ switch (irq) { case RBTX4939_IRQ_IOCINT: irq = rbtx4939_ioc_irqroute(); break; } } else if (pending & CAUSEF_IP0) irq = MIPS_CPU_IRQ_BASE + 0; else if (pending & CAUSEF_IP1) irq = MIPS_CPU_IRQ_BASE + 1; else irq = -1; return irq; } void __init rbtx4939_irq_setup(void) { int i; /* mask all IOC interrupts */ writeb(0, rbtx4939_ien_addr); /* clear SoftInt interrupts */ writeb(0, rbtx4939_softint_addr); txx9_irq_dispatch = rbtx4939_irq_dispatch; tx4939_irq_init(); for (i = RBTX4939_IRQ_IOC; i < RBTX4939_IRQ_IOC + RBTX4939_NR_IRQ_IOC; i++) set_irq_chip_and_handler(i, &rbtx4939_ioc_irq_chip, handle_level_irq); set_irq_chained_handler(RBTX4939_IRQ_IOCINT, handle_simple_irq); }
gpl-2.0
zhjwpku/gsoc
drivers/mmc/host/of_mmc_spi.c
1906
3891
/* * OpenFirmware bindings for the MMC-over-SPI driver * * Copyright (c) MontaVista Software, Inc. 2008. * * Author: Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/of_irq.h> #include <linux/spi/spi.h> #include <linux/spi/mmc_spi.h> #include <linux/mmc/core.h> #include <linux/mmc/host.h> /* For archs that don't support NO_IRQ (such as mips), provide a dummy value */ #ifndef NO_IRQ #define NO_IRQ 0 #endif MODULE_LICENSE("GPL"); enum { CD_GPIO = 0, WP_GPIO, NUM_GPIOS, }; struct of_mmc_spi { int gpios[NUM_GPIOS]; bool alow_gpios[NUM_GPIOS]; int detect_irq; struct mmc_spi_platform_data pdata; }; static struct of_mmc_spi *to_of_mmc_spi(struct device *dev) { return container_of(dev->platform_data, struct of_mmc_spi, pdata); } static int of_mmc_spi_init(struct device *dev, irqreturn_t (*irqhandler)(int, void *), void *mmc) { struct of_mmc_spi *oms = to_of_mmc_spi(dev); return request_threaded_irq(oms->detect_irq, NULL, irqhandler, 0, dev_name(dev), mmc); } static void of_mmc_spi_exit(struct device *dev, void *mmc) { struct of_mmc_spi *oms = to_of_mmc_spi(dev); free_irq(oms->detect_irq, mmc); } struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) { struct device *dev = &spi->dev; struct device_node *np = dev->of_node; struct of_mmc_spi *oms; const u32 *voltage_ranges; int num_ranges; int i; int ret = -EINVAL; if (dev->platform_data || !np) return dev->platform_data; oms = kzalloc(sizeof(*oms), GFP_KERNEL); if (!oms) return NULL; voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; if (!voltage_ranges || !num_ranges) { dev_err(dev, "OF: voltage-ranges unspecified\n"); goto err_ocr; } for (i = 0; i < num_ranges; i++) { const int j = i * 2; u32 mask; mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]), be32_to_cpu(voltage_ranges[j + 1])); if (!mask) { ret = -EINVAL; dev_err(dev, "OF: voltage-range #%d is invalid\n", i); goto err_ocr; } oms->pdata.ocr_mask |= mask; } for (i = 0; i < ARRAY_SIZE(oms->gpios); i++) { enum of_gpio_flags gpio_flags; oms->gpios[i] = of_get_gpio_flags(np, i, &gpio_flags); if (!gpio_is_valid(oms->gpios[i])) continue; if (gpio_flags & OF_GPIO_ACTIVE_LOW) oms->alow_gpios[i] = true; } if (gpio_is_valid(oms->gpios[CD_GPIO])) { oms->pdata.cd_gpio = oms->gpios[CD_GPIO]; oms->pdata.flags |= MMC_SPI_USE_CD_GPIO; if (!oms->alow_gpios[CD_GPIO]) oms->pdata.caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; } if (gpio_is_valid(oms->gpios[WP_GPIO])) { oms->pdata.ro_gpio = oms->gpios[WP_GPIO]; oms->pdata.flags |= MMC_SPI_USE_RO_GPIO; if (!oms->alow_gpios[WP_GPIO]) oms->pdata.caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; } oms->detect_irq = irq_of_parse_and_map(np, 0); if (oms->detect_irq != 0) { oms->pdata.init = of_mmc_spi_init; oms->pdata.exit = of_mmc_spi_exit; } else { oms->pdata.caps |= MMC_CAP_NEEDS_POLL; } dev->platform_data = &oms->pdata; return dev->platform_data; err_ocr: kfree(oms); return NULL; } EXPORT_SYMBOL(mmc_spi_get_pdata); void mmc_spi_put_pdata(struct spi_device *spi) { struct device *dev = &spi->dev; struct device_node *np = dev->of_node; struct of_mmc_spi *oms = to_of_mmc_spi(dev); if (!dev->platform_data || !np) return; kfree(oms); dev->platform_data = NULL; } EXPORT_SYMBOL(mmc_spi_put_pdata);
gpl-2.0
h8rift/android_kernel_htc_msm8960-evita
arch/arm/plat-iop/time.c
2674
4408
/* * arch/arm/plat-iop/time.c * * Timer code for IOP32x and IOP33x based systems * * Author: Deepak Saxena <dsaxena@mvista.com> * * Copyright 2002-2003 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/time.h> #include <linux/init.h> #include <linux/timex.h> #include <linux/sched.h> #include <linux/io.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/sched_clock.h> #include <asm/uaccess.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> #include <mach/time.h> /* * Minimum clocksource/clockevent timer range in seconds */ #define IOP_MIN_RANGE 4 /* * IOP clocksource (free-running timer 1). */ static cycle_t notrace iop_clocksource_read(struct clocksource *unused) { return 0xffffffffu - read_tcr1(); } static struct clocksource iop_clocksource = { .name = "iop_timer1", .rating = 300, .read = iop_clocksource_read, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static DEFINE_CLOCK_DATA(cd); /* * IOP sched_clock() implementation via its clocksource. */ unsigned long long notrace sched_clock(void) { u32 cyc = 0xffffffffu - read_tcr1(); return cyc_to_sched_clock(&cd, cyc, (u32)~0); } static void notrace iop_update_sched_clock(void) { u32 cyc = 0xffffffffu - read_tcr1(); update_sched_clock(&cd, cyc, (u32)~0); } /* * IOP clockevents (interrupting timer 0). */ static int iop_set_next_event(unsigned long delta, struct clock_event_device *unused) { u32 tmr = IOP_TMR_PRIVILEGED | IOP_TMR_RATIO_1_1; BUG_ON(delta == 0); write_tmr0(tmr & ~(IOP_TMR_EN | IOP_TMR_RELOAD)); write_tcr0(delta); write_tmr0((tmr & ~IOP_TMR_RELOAD) | IOP_TMR_EN); return 0; } static unsigned long ticks_per_jiffy; static void iop_set_mode(enum clock_event_mode mode, struct clock_event_device *unused) { u32 tmr = read_tmr0(); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: write_tmr0(tmr & ~IOP_TMR_EN); write_tcr0(ticks_per_jiffy - 1); write_trr0(ticks_per_jiffy - 1); tmr |= (IOP_TMR_RELOAD | IOP_TMR_EN); break; case CLOCK_EVT_MODE_ONESHOT: /* ->set_next_event sets period and enables timer */ tmr &= ~(IOP_TMR_RELOAD | IOP_TMR_EN); break; case CLOCK_EVT_MODE_RESUME: tmr |= IOP_TMR_EN; break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: default: tmr &= ~IOP_TMR_EN; break; } write_tmr0(tmr); } static struct clock_event_device iop_clockevent = { .name = "iop_timer0", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .rating = 300, .set_next_event = iop_set_next_event, .set_mode = iop_set_mode, }; static irqreturn_t iop_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = dev_id; write_tisr(1); evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction iop_timer_irq = { .name = "IOP Timer Tick", .handler = iop_timer_interrupt, .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .dev_id = &iop_clockevent, }; static unsigned long iop_tick_rate; unsigned long get_iop_tick_rate(void) { return iop_tick_rate; } EXPORT_SYMBOL(get_iop_tick_rate); void __init iop_init_time(unsigned long tick_rate) { u32 timer_ctl; init_sched_clock(&cd, iop_update_sched_clock, 32, tick_rate); ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ); iop_tick_rate = tick_rate; timer_ctl = IOP_TMR_EN | IOP_TMR_PRIVILEGED | IOP_TMR_RELOAD | IOP_TMR_RATIO_1_1; /* * Set up interrupting clockevent timer 0. */ write_tmr0(timer_ctl & ~IOP_TMR_EN); write_tisr(1); setup_irq(IRQ_IOP_TIMER0, &iop_timer_irq); clockevents_calc_mult_shift(&iop_clockevent, tick_rate, IOP_MIN_RANGE); iop_clockevent.max_delta_ns = clockevent_delta2ns(0xfffffffe, &iop_clockevent); iop_clockevent.min_delta_ns = clockevent_delta2ns(0xf, &iop_clockevent); iop_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&iop_clockevent); /* * Set up free-running clocksource timer 1. */ write_trr1(0xffffffff); write_tcr1(0xffffffff); write_tmr1(timer_ctl); clocksource_register_hz(&iop_clocksource, tick_rate); }
gpl-2.0
ziqiaozhou/cachebar
source/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
2674
46867
/****************************************************************************** * rtl871x_mp_ioctl.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com> * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #include <linux/rndis.h> #include "osdep_service.h" #include "drv_types.h" #include "mlme_osdep.h" #include "rtl871x_mp.h" #include "rtl871x_mp_ioctl.h" uint oid_null_function(struct oid_par_priv *poid_par_priv) { return RNDIS_STATUS_SUCCESS; } uint oid_rt_wireless_mode_hdl(struct oid_par_priv *poid_par_priv) { uint status = RNDIS_STATUS_SUCCESS; struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); if (poid_par_priv->type_of_oid == SET_OID) { if (poid_par_priv->information_buf_len >= sizeof(u8)) Adapter->registrypriv.wireless_mode = *(u8 *)poid_par_priv->information_buf; else status = RNDIS_STATUS_INVALID_LENGTH; } else if (poid_par_priv->type_of_oid == QUERY_OID) { if (poid_par_priv->information_buf_len >= sizeof(u8)) { *(u8 *)poid_par_priv->information_buf = Adapter->registrypriv.wireless_mode; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; } else status = RNDIS_STATUS_INVALID_LENGTH; } else { status = RNDIS_STATUS_NOT_ACCEPTED; } return status; } uint oid_rt_pro_write_bb_reg_hdl(struct oid_par_priv *poid_par_priv) { uint status = RNDIS_STATUS_SUCCESS; struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); struct bb_reg_param *pbbreg; u16 offset; u32 value; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(struct bb_reg_param)) return RNDIS_STATUS_INVALID_LENGTH; pbbreg = (struct bb_reg_param *)(poid_par_priv->information_buf); offset = (u16)(pbbreg->offset) & 0xFFF; /*0ffset :0x800~0xfff*/ if (offset < BB_REG_BASE_ADDR) offset |= BB_REG_BASE_ADDR; value = pbbreg->value; r8712_bb_reg_write(Adapter, offset, value); return status; } uint oid_rt_pro_read_bb_reg_hdl(struct oid_par_priv *poid_par_priv) { uint status = RNDIS_STATUS_SUCCESS; struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); struct bb_reg_param *pbbreg; u16 offset; u32 value; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(struct bb_reg_param)) return RNDIS_STATUS_INVALID_LENGTH; pbbreg = (struct bb_reg_param *)(poid_par_priv->information_buf); offset = (u16)(pbbreg->offset) & 0xFFF; /*0ffset :0x800~0xfff*/ if (offset < BB_REG_BASE_ADDR) offset |= BB_REG_BASE_ADDR; value = r8712_bb_reg_read(Adapter, offset); pbbreg->value = value; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return status; } uint oid_rt_pro_write_rf_reg_hdl(struct oid_par_priv *poid_par_priv) { uint status = RNDIS_STATUS_SUCCESS; struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); struct rf_reg_param *pbbreg; u8 path; u8 offset; u32 value; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(struct rf_reg_param)) return RNDIS_STATUS_INVALID_LENGTH; pbbreg = (struct rf_reg_param *)(poid_par_priv->information_buf); path = (u8)pbbreg->path; if (path > RF_PATH_B) return RNDIS_STATUS_NOT_ACCEPTED; offset = (u8)pbbreg->offset; value = pbbreg->value; r8712_rf_reg_write(Adapter, path, offset, value); return status; } uint oid_rt_pro_read_rf_reg_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; struct rf_reg_param *pbbreg; u8 path; u8 offset; u32 value; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(struct rf_reg_param)) return RNDIS_STATUS_INVALID_LENGTH; pbbreg = (struct rf_reg_param *)(poid_par_priv->information_buf); path = (u8)pbbreg->path; if (path > RF_PATH_B) /* 1T2R path_a /path_b */ return RNDIS_STATUS_NOT_ACCEPTED; offset = (u8)pbbreg->offset; value = r8712_rf_reg_read(Adapter, path, offset); pbbreg->value = value; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return status; } /*This function initializes the DUT to the MP test mode*/ static int mp_start_test(struct _adapter *padapter) { struct mp_priv *pmppriv = &padapter->mppriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct wlan_network *tgt_network = &pmlmepriv->cur_network; struct ndis_wlan_bssid_ex bssid; struct sta_info *psta; unsigned long length; unsigned long irqL; int res = _SUCCESS; /* 3 1. initialize a new struct ndis_wlan_bssid_ex */ memcpy(bssid.MacAddress, pmppriv->network_macaddr, ETH_ALEN); bssid.Ssid.SsidLength = 16; memcpy(bssid.Ssid.Ssid, (unsigned char *)"mp_pseudo_adhoc", bssid.Ssid.SsidLength); bssid.InfrastructureMode = Ndis802_11IBSS; bssid.NetworkTypeInUse = Ndis802_11DS; bssid.IELength = 0; length = r8712_get_ndis_wlan_bssid_ex_sz(&bssid); if (length % 4) { /*round up to multiple of 4 bytes.*/ bssid.Length = ((length >> 2) + 1) << 2; } else bssid.Length = length; spin_lock_irqsave(&pmlmepriv->lock, irqL); if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) goto end_of_mp_start_test; /*init mp_start_test status*/ pmppriv->prev_fw_state = get_fwstate(pmlmepriv); pmlmepriv->fw_state = WIFI_MP_STATE; if (pmppriv->mode == _LOOPBOOK_MODE_) set_fwstate(pmlmepriv, WIFI_MP_LPBK_STATE); /*append txdesc*/ set_fwstate(pmlmepriv, _FW_UNDER_LINKING); /* 3 2. create a new psta for mp driver */ /* clear psta in the cur_network, if any */ psta = r8712_get_stainfo(&padapter->stapriv, tgt_network->network.MacAddress); if (psta) r8712_free_stainfo(padapter, psta); psta = r8712_alloc_stainfo(&padapter->stapriv, bssid.MacAddress); if (psta == NULL) { res = _FAIL; goto end_of_mp_start_test; } /* 3 3. join psudo AdHoc */ tgt_network->join_res = 1; tgt_network->aid = psta->aid = 1; memcpy(&tgt_network->network, &bssid, length); _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); r8712_os_indicate_connect(padapter); /* Set to LINKED STATE for MP TRX Testing */ set_fwstate(pmlmepriv, _FW_LINKED); end_of_mp_start_test: spin_unlock_irqrestore(&pmlmepriv->lock, irqL); return res; } /*This function change the DUT from the MP test mode into normal mode */ static int mp_stop_test(struct _adapter *padapter) { struct mp_priv *pmppriv = &padapter->mppriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct wlan_network *tgt_network = &pmlmepriv->cur_network; struct sta_info *psta; unsigned long irqL; spin_lock_irqsave(&pmlmepriv->lock, irqL); if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == false) goto end_of_mp_stop_test; /* 3 1. disconnect psudo AdHoc */ r8712_os_indicate_disconnect(padapter); /* 3 2. clear psta used in mp test mode. */ psta = r8712_get_stainfo(&padapter->stapriv, tgt_network->network.MacAddress); if (psta) r8712_free_stainfo(padapter, psta); /* 3 3. return to normal state (default:station mode) */ pmlmepriv->fw_state = pmppriv->prev_fw_state; /* WIFI_STATION_STATE;*/ /*flush the cur_network*/ memset(tgt_network, 0, sizeof(struct wlan_network)); end_of_mp_stop_test: spin_unlock_irqrestore(&pmlmepriv->lock, irqL); return _SUCCESS; } int mp_start_joinbss(struct _adapter *padapter, struct ndis_802_11_ssid *pssid) { struct mp_priv *pmppriv = &padapter->mppriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; unsigned char res = _SUCCESS; if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == false) return _FAIL; if (check_fwstate(pmlmepriv, _FW_LINKED) == false) return _FAIL; _clr_fwstate_(pmlmepriv, _FW_LINKED); res = r8712_setassocsta_cmd(padapter, pmppriv->network_macaddr); set_fwstate(pmlmepriv, _FW_UNDER_LINKING); return res; } uint oid_rt_pro_set_data_rate_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; u32 ratevalue; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len != sizeof(u32)) return RNDIS_STATUS_INVALID_LENGTH; ratevalue = *((u32 *)poid_par_priv->information_buf); if (ratevalue >= MPT_RATE_LAST) return RNDIS_STATUS_INVALID_DATA; Adapter->mppriv.curr_rateidx = ratevalue; r8712_SetDataRate(Adapter); return status; } uint oid_rt_pro_start_test_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; u32 mode; u8 val8; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; mode = *((u32 *)poid_par_priv->information_buf); Adapter->mppriv.mode = mode;/* 1 for loopback*/ if (mp_start_test(Adapter) == _FAIL) status = RNDIS_STATUS_NOT_ACCEPTED; r8712_write8(Adapter, MSR, 1); /* Link in ad hoc network, 0x1025004C */ r8712_write8(Adapter, RCR, 0); /* RCR : disable all pkt, 0x10250048 */ /* RCR disable Check BSSID, 0x1025004a */ r8712_write8(Adapter, RCR+2, 0x57); /* disable RX filter map , mgt frames will put in RX FIFO 0 */ r8712_write16(Adapter, RXFLTMAP0, 0x0); val8 = r8712_read8(Adapter, EE_9346CR); if (!(val8 & _9356SEL)) { /*boot from EFUSE*/ r8712_efuse_reg_init(Adapter); r8712_efuse_change_max_size(Adapter); r8712_efuse_reg_uninit(Adapter); } return status; } uint oid_rt_pro_stop_test_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (mp_stop_test(Adapter) == _FAIL) status = RNDIS_STATUS_NOT_ACCEPTED; return status; } uint oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; u32 Channel; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len != sizeof(u32)) return RNDIS_STATUS_INVALID_LENGTH; Channel = *((u32 *)poid_par_priv->information_buf); if (Channel > 14) return RNDIS_STATUS_NOT_ACCEPTED; Adapter->mppriv.curr_ch = Channel; r8712_SetChannel(Adapter); return status; } uint oid_rt_pro_set_antenna_bb_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; u32 antenna; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len != sizeof(u32)) return RNDIS_STATUS_INVALID_LENGTH; antenna = *((u32 *)poid_par_priv->information_buf); Adapter->mppriv.antenna_tx = (u16)((antenna & 0xFFFF0000) >> 16); Adapter->mppriv.antenna_rx = (u16)(antenna & 0x0000FFFF); r8712_SwitchAntenna(Adapter); return status; } uint oid_rt_pro_set_tx_power_control_hdl( struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; u32 tx_pwr_idx; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len != sizeof(u32)) return RNDIS_STATUS_INVALID_LENGTH; tx_pwr_idx = *((u32 *)poid_par_priv->information_buf); if (tx_pwr_idx > MAX_TX_PWR_INDEX_N_MODE) return RNDIS_STATUS_NOT_ACCEPTED; Adapter->mppriv.curr_txpoweridx = (u8)tx_pwr_idx; r8712_SetTxPower(Adapter); return status; } uint oid_rt_pro_query_tx_packet_sent_hdl( struct oid_par_priv *poid_par_priv) { uint status = RNDIS_STATUS_SUCCESS; struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); if (poid_par_priv->type_of_oid != QUERY_OID) { status = RNDIS_STATUS_NOT_ACCEPTED; return status; } if (poid_par_priv->information_buf_len == sizeof(u32)) { *(u32 *)poid_par_priv->information_buf = Adapter->mppriv.tx_pktcount; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; } else status = RNDIS_STATUS_INVALID_LENGTH; return status; } uint oid_rt_pro_query_rx_packet_received_hdl( struct oid_par_priv *poid_par_priv) { uint status = RNDIS_STATUS_SUCCESS; struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); if (poid_par_priv->type_of_oid != QUERY_OID) { status = RNDIS_STATUS_NOT_ACCEPTED; return status; } if (poid_par_priv->information_buf_len == sizeof(u32)) { *(u32 *)poid_par_priv->information_buf = Adapter->mppriv.rx_pktcount; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; } else status = RNDIS_STATUS_INVALID_LENGTH; return status; } uint oid_rt_pro_query_rx_packet_crc32_error_hdl( struct oid_par_priv *poid_par_priv) { uint status = RNDIS_STATUS_SUCCESS; struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); if (poid_par_priv->type_of_oid != QUERY_OID) { status = RNDIS_STATUS_NOT_ACCEPTED; return status; } if (poid_par_priv->information_buf_len == sizeof(u32)) { *(u32 *)poid_par_priv->information_buf = Adapter->mppriv.rx_crcerrpktcount; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; } else status = RNDIS_STATUS_INVALID_LENGTH; return status; } uint oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; Adapter->mppriv.tx_pktcount = 0; return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv) { uint status = RNDIS_STATUS_SUCCESS; struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len == sizeof(u32)) { Adapter->mppriv.rx_pktcount = 0; Adapter->mppriv.rx_crcerrpktcount = 0; } else status = RNDIS_STATUS_INVALID_LENGTH; return status; } uint oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; r8712_ResetPhyRxPktCount(Adapter); return RNDIS_STATUS_SUCCESS; } uint oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len != sizeof(u32)) return RNDIS_STATUS_INVALID_LENGTH; *(u32 *)poid_par_priv->information_buf = r8712_GetPhyRxPktReceived(Adapter); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return RNDIS_STATUS_SUCCESS; } uint oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len != sizeof(u32)) return RNDIS_STATUS_INVALID_LENGTH; *(u32 *)poid_par_priv->information_buf = r8712_GetPhyRxPktCRC32Error(Adapter); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_set_modulation_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; Adapter->mppriv.curr_modem = *((u8 *)poid_par_priv->information_buf); return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); u32 bStartTest; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; bStartTest = *((u32 *)poid_par_priv->information_buf); r8712_SetContinuousTx(Adapter, (u8)bStartTest); return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); u32 bStartTest; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; bStartTest = *((u32 *)poid_par_priv->information_buf); r8712_SetSingleCarrierTx(Adapter, (u8)bStartTest); return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); u32 bStartTest; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; bStartTest = *((u32 *)poid_par_priv->information_buf); r8712_SetCarrierSuppressionTx(Adapter, (u8)bStartTest); return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); u32 bStartTest; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; bStartTest = *((u32 *)poid_par_priv->information_buf); r8712_SetSingleToneTx(Adapter, (u8)bStartTest); return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro8711_join_bss_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; struct ndis_802_11_ssid *pssid; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; *poid_par_priv->bytes_needed = (u32)sizeof(struct ndis_802_11_ssid); *poid_par_priv->bytes_rw = 0; if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed) return RNDIS_STATUS_INVALID_LENGTH; pssid = (struct ndis_802_11_ssid *)poid_par_priv->information_buf; if (mp_start_joinbss(Adapter, pssid) == _FAIL) status = RNDIS_STATUS_NOT_ACCEPTED; *poid_par_priv->bytes_rw = sizeof(struct ndis_802_11_ssid); return status; } uint oid_rt_pro_read_register_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; struct mp_rw_reg *RegRWStruct; u16 offset; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; RegRWStruct = (struct mp_rw_reg *)poid_par_priv->information_buf; if ((RegRWStruct->offset >= 0x10250800) && (RegRWStruct->offset <= 0x10250FFF)) { /*baseband register*/ /*0ffset :0x800~0xfff*/ offset = (u16)(RegRWStruct->offset) & 0xFFF; RegRWStruct->value = r8712_bb_reg_read(Adapter, offset); } else { switch (RegRWStruct->width) { case 1: RegRWStruct->value = r8712_read8(Adapter, RegRWStruct->offset); break; case 2: RegRWStruct->value = r8712_read16(Adapter, RegRWStruct->offset); break; case 4: RegRWStruct->value = r8712_read32(Adapter, RegRWStruct->offset); break; default: status = RNDIS_STATUS_NOT_ACCEPTED; break; } } *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return status; } uint oid_rt_pro_write_register_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; struct mp_rw_reg *RegRWStruct; u16 offset; u32 value; u32 oldValue = 0; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; RegRWStruct = (struct mp_rw_reg *)poid_par_priv->information_buf; if ((RegRWStruct->offset >= 0x10250800) && (RegRWStruct->offset <= 0x10250FFF)) { /*baseband register*/ offset = (u16)(RegRWStruct->offset) & 0xFFF; value = RegRWStruct->value; switch (RegRWStruct->width) { case 1: oldValue = r8712_bb_reg_read(Adapter, offset); oldValue &= 0xFFFFFF00; value &= 0x000000FF; value |= oldValue; break; case 2: oldValue = r8712_bb_reg_read(Adapter, offset); oldValue &= 0xFFFF0000; value &= 0x0000FFFF; value |= oldValue; break; } r8712_bb_reg_write(Adapter, offset, value); } else { switch (RegRWStruct->width) { case 1: r8712_write8(Adapter, RegRWStruct->offset, (unsigned char)RegRWStruct->value); break; case 2: r8712_write16(Adapter, RegRWStruct->offset, (unsigned short)RegRWStruct->value); break; case 4: r8712_write32(Adapter, RegRWStruct->offset, (unsigned int)RegRWStruct->value); break; default: status = RNDIS_STATUS_NOT_ACCEPTED; break; } if ((status == RNDIS_STATUS_SUCCESS) && (RegRWStruct->offset == HIMR) && (RegRWStruct->width == 4)) Adapter->ImrContent = RegRWStruct->value; } return status; } uint oid_rt_pro_burst_read_register_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); struct burst_rw_reg *pBstRwReg; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; pBstRwReg = (struct burst_rw_reg *)poid_par_priv->information_buf; r8712_read_mem(Adapter, pBstRwReg->offset, (u32)pBstRwReg->len, pBstRwReg->Data); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_burst_write_register_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); struct burst_rw_reg *pBstRwReg; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; pBstRwReg = (struct burst_rw_reg *)poid_par_priv->information_buf; r8712_write_mem(Adapter, pBstRwReg->offset, (u32)pBstRwReg->len, pBstRwReg->Data); return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_write_txcmd_hdl(struct oid_par_priv *poid_par_priv) { return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_read16_eeprom_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); struct eeprom_rw_param *pEEPROM; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; pEEPROM = (struct eeprom_rw_param *)poid_par_priv->information_buf; pEEPROM->value = r8712_eeprom_read16(Adapter, (u16)(pEEPROM->offset >> 1)); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_write16_eeprom_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); struct eeprom_rw_param *pEEPROM; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; pEEPROM = (struct eeprom_rw_param *)poid_par_priv->information_buf; r8712_eeprom_write16(Adapter, (u16)(pEEPROM->offset >> 1), pEEPROM->value); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro8711_wi_poll_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); struct mp_wiparam *pwi_param; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(struct mp_wiparam)) return RNDIS_STATUS_INVALID_LENGTH; if (Adapter->mppriv.workparam.bcompleted == false) return RNDIS_STATUS_NOT_ACCEPTED; pwi_param = (struct mp_wiparam *)poid_par_priv->information_buf; memcpy(pwi_param, &Adapter->mppriv.workparam, sizeof(struct mp_wiparam)); Adapter->mppriv.act_in_progress = false; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro8711_pkt_loss_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(uint) * 2) return RNDIS_STATUS_INVALID_LENGTH; if (*(uint *)poid_par_priv->information_buf == 1) Adapter->mppriv.rx_pktloss = 0; *((uint *)poid_par_priv->information_buf+1) = Adapter->mppriv.rx_pktloss; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return RNDIS_STATUS_SUCCESS; } uint oid_rt_rd_attrib_mem_hdl(struct oid_par_priv *poid_par_priv) { if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; return RNDIS_STATUS_SUCCESS; } uint oid_rt_wr_attrib_mem_hdl(struct oid_par_priv *poid_par_priv) { if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_set_rf_intfs_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (r8712_setrfintfs_cmd(Adapter, *(unsigned char *) poid_par_priv->information_buf) == _FAIL) status = RNDIS_STATUS_NOT_ACCEPTED; return status; } uint oid_rt_poll_rx_status_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; memcpy(poid_par_priv->information_buf, (unsigned char *)&Adapter->mppriv.rxstat, sizeof(struct recv_stat)); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return status; } uint oid_rt_pro_cfg_debug_message_hdl(struct oid_par_priv *poid_par_priv) { return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_set_data_rate_ex_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (r8712_setdatarate_cmd(Adapter, poid_par_priv->information_buf) != _SUCCESS) status = RNDIS_STATUS_NOT_ACCEPTED; return status; } uint oid_rt_get_thermal_meter_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (Adapter->mppriv.act_in_progress == true) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u8)) return RNDIS_STATUS_INVALID_LENGTH; /*init workparam*/ Adapter->mppriv.act_in_progress = true; Adapter->mppriv.workparam.bcompleted = false; Adapter->mppriv.workparam.act_type = MPT_GET_THERMAL_METER; Adapter->mppriv.workparam.io_offset = 0; Adapter->mppriv.workparam.io_value = 0xFFFFFFFF; r8712_GetThermalMeter(Adapter, &Adapter->mppriv.workparam.io_value); Adapter->mppriv.workparam.bcompleted = true; Adapter->mppriv.act_in_progress = false; *(u32 *)poid_par_priv->information_buf = Adapter->mppriv.workparam.io_value; *poid_par_priv->bytes_rw = sizeof(u32); return status; } uint oid_rt_pro_set_power_tracking_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u8)) return RNDIS_STATUS_INVALID_LENGTH; if (!r8712_setptm_cmd(Adapter, *((u8 *)poid_par_priv->information_buf))) status = RNDIS_STATUS_NOT_ACCEPTED; return status; } uint oid_rt_pro_set_basic_rate_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); u8 mpdatarate[NumRates] = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff}; uint status = RNDIS_STATUS_SUCCESS; u32 ratevalue; u8 datarates[NumRates]; int i; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; ratevalue = *((u32 *)poid_par_priv->information_buf); for (i = 0; i < NumRates; i++) { if (ratevalue == mpdatarate[i]) datarates[i] = mpdatarate[i]; else datarates[i] = 0xff; } if (r8712_setbasicrate_cmd(Adapter, datarates) != _SUCCESS) status = RNDIS_STATUS_NOT_ACCEPTED; return status; } uint oid_rt_pro_qry_pwrstate_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < 8) return RNDIS_STATUS_INVALID_LENGTH; *poid_par_priv->bytes_rw = 8; memcpy(poid_par_priv->information_buf, &(Adapter->pwrctrlpriv.pwr_mode), 8); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_set_pwrstate_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint pwr_mode, smart_ps; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; *poid_par_priv->bytes_rw = 0; *poid_par_priv->bytes_needed = 8; if (poid_par_priv->information_buf_len < 8) return RNDIS_STATUS_INVALID_LENGTH; pwr_mode = *(uint *)(poid_par_priv->information_buf); smart_ps = *(uint *)((addr_t)poid_par_priv->information_buf + 4); if (pwr_mode != Adapter->pwrctrlpriv.pwr_mode || smart_ps != Adapter->pwrctrlpriv.smart_ps) r8712_set_ps_mode(Adapter, pwr_mode, smart_ps); *poid_par_priv->bytes_rw = 8; return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_h2c_set_rate_table_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; struct setratable_parm *prate_table; u8 res; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; *poid_par_priv->bytes_needed = sizeof(struct setratable_parm); if (poid_par_priv->information_buf_len < sizeof(struct setratable_parm)) return RNDIS_STATUS_INVALID_LENGTH; prate_table = (struct setratable_parm *)poid_par_priv->information_buf; res = r8712_setrttbl_cmd(Adapter, prate_table); if (res == _FAIL) status = RNDIS_STATUS_FAILURE; return status; } uint oid_rt_pro_h2c_get_rate_table_hdl(struct oid_par_priv *poid_par_priv) { if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_encryption_ctrl_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); struct security_priv *psecuritypriv = &Adapter->securitypriv; enum ENCRY_CTRL_STATE encry_mode = 0; *poid_par_priv->bytes_needed = sizeof(u8); if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed) return RNDIS_STATUS_INVALID_LENGTH; if (poid_par_priv->type_of_oid == SET_OID) { encry_mode = *((u8 *)poid_par_priv->information_buf); switch (encry_mode) { case HW_CONTROL: psecuritypriv->sw_decrypt = false; psecuritypriv->sw_encrypt = false; break; case SW_CONTROL: psecuritypriv->sw_decrypt = true; psecuritypriv->sw_encrypt = true; break; case HW_ENCRY_SW_DECRY: psecuritypriv->sw_decrypt = true; psecuritypriv->sw_encrypt = false; break; case SW_ENCRY_HW_DECRY: psecuritypriv->sw_decrypt = false; psecuritypriv->sw_encrypt = true; break; } } else { if ((psecuritypriv->sw_encrypt == false) && (psecuritypriv->sw_decrypt == false)) encry_mode = HW_CONTROL; else if ((psecuritypriv->sw_encrypt == false) && (psecuritypriv->sw_decrypt == true)) encry_mode = HW_ENCRY_SW_DECRY; else if ((psecuritypriv->sw_encrypt == true) && (psecuritypriv->sw_decrypt == false)) encry_mode = SW_ENCRY_HW_DECRY; else if ((psecuritypriv->sw_encrypt == true) && (psecuritypriv->sw_decrypt == true)) encry_mode = SW_CONTROL; *(u8 *)poid_par_priv->information_buf = encry_mode; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; } return RNDIS_STATUS_SUCCESS; } /*----------------------------------------------------------------------*/ uint oid_rt_pro_add_sta_info_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; struct sta_info *psta = NULL; u8 *macaddr; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; *poid_par_priv->bytes_needed = ETH_ALEN; if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed) return RNDIS_STATUS_INVALID_LENGTH; macaddr = (u8 *) poid_par_priv->information_buf; psta = r8712_get_stainfo(&Adapter->stapriv, macaddr); if (psta == NULL) { /* the sta in sta_info_queue => do nothing*/ psta = r8712_alloc_stainfo(&Adapter->stapriv, macaddr); if (psta == NULL) status = RNDIS_STATUS_FAILURE; } return status; } /*-------------------------------------------------------------------------*/ uint oid_rt_pro_dele_sta_info_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); unsigned long irqL; uint status = RNDIS_STATUS_SUCCESS; struct sta_info *psta = NULL; u8 *macaddr; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; *poid_par_priv->bytes_needed = ETH_ALEN; if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed) return RNDIS_STATUS_INVALID_LENGTH; macaddr = (u8 *)poid_par_priv->information_buf; psta = r8712_get_stainfo(&Adapter->stapriv, macaddr); if (psta != NULL) { spin_lock_irqsave(&(Adapter->stapriv.sta_hash_lock), irqL); r8712_free_stainfo(Adapter, psta); spin_unlock_irqrestore(&(Adapter->stapriv.sta_hash_lock), irqL); } return status; } /*--------------------------------------------------------------------------*/ static u32 mp_query_drv_var(struct _adapter *padapter, u8 offset, u32 var) { return var; } uint oid_rt_pro_query_dr_variable_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; struct DR_VARIABLE_STRUCT *pdrv_var; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; *poid_par_priv->bytes_needed = sizeof(struct DR_VARIABLE_STRUCT); if (poid_par_priv->information_buf_len < *poid_par_priv->bytes_needed) return RNDIS_STATUS_INVALID_LENGTH; pdrv_var = (struct DR_VARIABLE_STRUCT *)poid_par_priv->information_buf; pdrv_var->variable = mp_query_drv_var(Adapter, pdrv_var->offset, pdrv_var->variable); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return status; } /*--------------------------------------------------------------------------*/ uint oid_rt_pro_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv) { return RNDIS_STATUS_SUCCESS; } /*------------------------------------------------------------------------*/ uint oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; struct EFUSE_ACCESS_STRUCT *pefuse; u8 *data; u16 addr = 0, cnts = 0; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(struct EFUSE_ACCESS_STRUCT)) return RNDIS_STATUS_INVALID_LENGTH; pefuse = (struct EFUSE_ACCESS_STRUCT *)poid_par_priv->information_buf; addr = pefuse->start_addr; cnts = pefuse->cnts; data = pefuse->data; memset(data, 0xFF, cnts); if ((addr > 511) || (cnts < 1) || (cnts > 512) || (addr + cnts) > EFUSE_MAX_SIZE) return RNDIS_STATUS_NOT_ACCEPTED; if (r8712_efuse_access(Adapter, true, addr, cnts, data) == false) status = RNDIS_STATUS_FAILURE; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return status; } /*------------------------------------------------------------------------*/ uint oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; struct EFUSE_ACCESS_STRUCT *pefuse; u8 *data; u16 addr = 0, cnts = 0; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; pefuse = (struct EFUSE_ACCESS_STRUCT *)poid_par_priv->information_buf; addr = pefuse->start_addr; cnts = pefuse->cnts; data = pefuse->data; if ((addr > 511) || (cnts < 1) || (cnts > 512) || (addr + cnts) > r8712_efuse_get_max_size(Adapter)) return RNDIS_STATUS_NOT_ACCEPTED; if (r8712_efuse_access(Adapter, false, addr, cnts, data) == false) status = RNDIS_STATUS_FAILURE; return status; } /*----------------------------------------------------------------------*/ uint oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; struct PGPKT_STRUCT *ppgpkt; *poid_par_priv->bytes_rw = 0; if (poid_par_priv->information_buf_len < sizeof(struct PGPKT_STRUCT)) return RNDIS_STATUS_INVALID_LENGTH; ppgpkt = (struct PGPKT_STRUCT *)poid_par_priv->information_buf; if (poid_par_priv->type_of_oid == QUERY_OID) { if (r8712_efuse_pg_packet_read(Adapter, ppgpkt->offset, ppgpkt->data) == true) *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; else status = RNDIS_STATUS_FAILURE; } else { if (r8712_efuse_reg_init(Adapter) == true) { if (r8712_efuse_pg_packet_write(Adapter, ppgpkt->offset, ppgpkt->word_en, ppgpkt->data) == true) *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; else status = RNDIS_STATUS_FAILURE; r8712_efuse_reg_uninit(Adapter); } else status = RNDIS_STATUS_FAILURE; } return status; } uint oid_rt_get_efuse_current_size_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(int)) return RNDIS_STATUS_INVALID_LENGTH; r8712_efuse_reg_init(Adapter); *(int *)poid_par_priv->information_buf = r8712_efuse_get_current_size(Adapter); r8712_efuse_reg_uninit(Adapter); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return status; } uint oid_rt_get_efuse_max_size_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u32)) return RNDIS_STATUS_INVALID_LENGTH; *(int *)poid_par_priv->information_buf = r8712_efuse_get_max_size(Adapter); *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return status; } uint oid_rt_pro_efuse_hdl(struct oid_par_priv *poid_par_priv) { uint status = RNDIS_STATUS_SUCCESS; if (poid_par_priv->type_of_oid == QUERY_OID) status = oid_rt_pro_read_efuse_hdl(poid_par_priv); else status = oid_rt_pro_write_efuse_hdl(poid_par_priv); return status; } uint oid_rt_pro_efuse_map_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; u8 *data; *poid_par_priv->bytes_rw = 0; if (poid_par_priv->information_buf_len < EFUSE_MAP_MAX_SIZE) return RNDIS_STATUS_INVALID_LENGTH; data = (u8 *)poid_par_priv->information_buf; if (poid_par_priv->type_of_oid == QUERY_OID) { if (r8712_efuse_map_read(Adapter, 0, EFUSE_MAP_MAX_SIZE, data)) *poid_par_priv->bytes_rw = EFUSE_MAP_MAX_SIZE; else status = RNDIS_STATUS_FAILURE; } else { /* SET_OID */ if (r8712_efuse_reg_init(Adapter) == true) { if (r8712_efuse_map_write(Adapter, 0, EFUSE_MAP_MAX_SIZE, data)) *poid_par_priv->bytes_rw = EFUSE_MAP_MAX_SIZE; else status = RNDIS_STATUS_FAILURE; r8712_efuse_reg_uninit(Adapter); } else { status = RNDIS_STATUS_FAILURE; } } return status; } uint oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; u32 bandwidth; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u32)) return RNDIS_STATUS_INVALID_LENGTH; bandwidth = *((u32 *)poid_par_priv->information_buf);/*4*/ if (bandwidth != HT_CHANNEL_WIDTH_20) bandwidth = HT_CHANNEL_WIDTH_40; Adapter->mppriv.curr_bandwidth = (u8)bandwidth; r8712_SwitchBandwidth(Adapter); return status; } uint oid_rt_set_crystal_cap_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; u32 crystal_cap = 0; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u32)) return RNDIS_STATUS_INVALID_LENGTH; crystal_cap = *((u32 *)poid_par_priv->information_buf);/*4*/ if (crystal_cap > 0xf) return RNDIS_STATUS_NOT_ACCEPTED; Adapter->mppriv.curr_crystalcap = crystal_cap; r8712_SetCrystalCap(Adapter); return status; } uint oid_rt_set_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); u8 rx_pkt_type; u32 rcr_val32; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u8)) return RNDIS_STATUS_INVALID_LENGTH; rx_pkt_type = *((u8 *)poid_par_priv->information_buf);/*4*/ rcr_val32 = r8712_read32(Adapter, RCR);/*RCR = 0x10250048*/ rcr_val32 &= ~(RCR_CBSSID | RCR_AB | RCR_AM | RCR_APM | RCR_AAP); switch (rx_pkt_type) { case RX_PKT_BROADCAST: rcr_val32 |= (RCR_AB | RCR_AM | RCR_APM | RCR_AAP | RCR_ACRC32); break; case RX_PKT_DEST_ADDR: rcr_val32 |= (RCR_AB | RCR_AM | RCR_APM | RCR_AAP | RCR_ACRC32); break; case RX_PKT_PHY_MATCH: rcr_val32 |= (RCR_APM|RCR_ACRC32); break; default: rcr_val32 &= ~(RCR_AAP | RCR_APM | RCR_AM | RCR_AB | RCR_ACRC32); break; } if (rx_pkt_type == RX_PKT_DEST_ADDR) Adapter->mppriv.check_mp_pkt = 1; else Adapter->mppriv.check_mp_pkt = 0; r8712_write32(Adapter, RCR, rcr_val32); return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_set_tx_agc_offset_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); u32 txagc; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u32)) return RNDIS_STATUS_INVALID_LENGTH; txagc = *(u32 *)poid_par_priv->information_buf; r8712_SetTxAGCOffset(Adapter, txagc); return RNDIS_STATUS_SUCCESS; } uint oid_rt_pro_set_pkt_test_mode_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); uint status = RNDIS_STATUS_SUCCESS; struct mlme_priv *pmlmepriv = &Adapter->mlmepriv; struct mp_priv *pmppriv = &Adapter->mppriv; u32 type; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u32)) return RNDIS_STATUS_INVALID_LENGTH; type = *(u32 *)poid_par_priv->information_buf; if (_LOOPBOOK_MODE_ == type) { pmppriv->mode = type; set_fwstate(pmlmepriv, WIFI_MP_LPBK_STATE); /*append txdesc*/ } else if (_2MAC_MODE_ == type) { pmppriv->mode = type; _clr_fwstate_(pmlmepriv, WIFI_MP_LPBK_STATE); } else status = RNDIS_STATUS_NOT_ACCEPTED; return status; } /*--------------------------------------------------------------------------*/ /*Linux*/ unsigned int mp_ioctl_xmit_packet_hdl(struct oid_par_priv *poid_par_priv) { return _SUCCESS; } /*-------------------------------------------------------------------------*/ uint oid_rt_set_power_down_hdl(struct oid_par_priv *poid_par_priv) { u8 bpwrup; if (poid_par_priv->type_of_oid != SET_OID) return RNDIS_STATUS_NOT_ACCEPTED; bpwrup = *(u8 *)poid_par_priv->information_buf; /*CALL the power_down function*/ return RNDIS_STATUS_SUCCESS; } /*-------------------------------------------------------------------------- */ uint oid_rt_get_power_mode_hdl(struct oid_par_priv *poid_par_priv) { struct _adapter *Adapter = (struct _adapter *) (poid_par_priv->adapter_context); if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; if (poid_par_priv->information_buf_len < sizeof(u32)) return RNDIS_STATUS_INVALID_LENGTH; *(int *)poid_par_priv->information_buf = Adapter->registrypriv.low_power ? POWER_LOW : POWER_NORMAL; *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len; return RNDIS_STATUS_SUCCESS; }
gpl-2.0
drewis/android_kernel_htc_ruby
drivers/net/wireless/iwlegacy/iwl-led.c
2930
5932
/****************************************************************************** * * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/wireless.h> #include <net/mac80211.h> #include <linux/etherdevice.h> #include <asm/unaligned.h> #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-io.h" /* default: IWL_LED_BLINK(0) using blinking index table */ static int led_mode; module_param(led_mode, int, S_IRUGO); MODULE_PARM_DESC(led_mode, "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking"); /* Throughput OFF time(ms) ON time (ms) * >300 25 25 * >200 to 300 40 40 * >100 to 200 55 55 * >70 to 100 65 65 * >50 to 70 75 75 * >20 to 50 85 85 * >10 to 20 95 95 * >5 to 10 110 110 * >1 to 5 130 130 * >0 to 1 167 167 * <=0 SOLID ON */ static const struct ieee80211_tpt_blink iwl_blink[] = { { .throughput = 0, .blink_time = 334 }, { .throughput = 1 * 1024 - 1, .blink_time = 260 }, { .throughput = 5 * 1024 - 1, .blink_time = 220 }, { .throughput = 10 * 1024 - 1, .blink_time = 190 }, { .throughput = 20 * 1024 - 1, .blink_time = 170 }, { .throughput = 50 * 1024 - 1, .blink_time = 150 }, { .throughput = 70 * 1024 - 1, .blink_time = 130 }, { .throughput = 100 * 1024 - 1, .blink_time = 110 }, { .throughput = 200 * 1024 - 1, .blink_time = 80 }, { .throughput = 300 * 1024 - 1, .blink_time = 50 }, }; /* * Adjust led blink rate to compensate on a MAC Clock difference on every HW * Led blink rate analysis showed an average deviation of 0% on 3945, * 5% on 4965 HW. * Need to compensate on the led on/off time per HW according to the deviation * to achieve the desired led frequency * The calculation is: (100-averageDeviation)/100 * blinkTime * For code efficiency the calculation will be: * compensation = (100 - averageDeviation) * 64 / 100 * NewBlinkTime = (compensation * BlinkTime) / 64 */ static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv, u8 time, u16 compensation) { if (!compensation) { IWL_ERR(priv, "undefined blink compensation: " "use pre-defined blinking time\n"); return time; } return (u8)((time * compensation) >> 6); } /* Set led pattern command */ static int iwl_legacy_led_cmd(struct iwl_priv *priv, unsigned long on, unsigned long off) { struct iwl_led_cmd led_cmd = { .id = IWL_LED_LINK, .interval = IWL_DEF_LED_INTRVL }; int ret; if (!test_bit(STATUS_READY, &priv->status)) return -EBUSY; if (priv->blink_on == on && priv->blink_off == off) return 0; if (off == 0) { /* led is SOLID_ON */ on = IWL_LED_SOLID; } IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", priv->cfg->base_params->led_compensation); led_cmd.on = iwl_legacy_blink_compensation(priv, on, priv->cfg->base_params->led_compensation); led_cmd.off = iwl_legacy_blink_compensation(priv, off, priv->cfg->base_params->led_compensation); ret = priv->cfg->ops->led->cmd(priv, &led_cmd); if (!ret) { priv->blink_on = on; priv->blink_off = off; } return ret; } static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); unsigned long on = 0; if (brightness > 0) on = IWL_LED_SOLID; iwl_legacy_led_cmd(priv, on, 0); } static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); return iwl_legacy_led_cmd(priv, *delay_on, *delay_off); } void iwl_legacy_leds_init(struct iwl_priv *priv) { int mode = led_mode; int ret; if (mode == IWL_LED_DEFAULT) mode = priv->cfg->led_mode; priv->led.name = kasprintf(GFP_KERNEL, "%s-led", wiphy_name(priv->hw->wiphy)); priv->led.brightness_set = iwl_legacy_led_brightness_set; priv->led.blink_set = iwl_legacy_led_blink_set; priv->led.max_brightness = 1; switch (mode) { case IWL_LED_DEFAULT: WARN_ON(1); break; case IWL_LED_BLINK: priv->led.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw, IEEE80211_TPT_LEDTRIG_FL_CONNECTED, iwl_blink, ARRAY_SIZE(iwl_blink)); break; case IWL_LED_RF_STATE: priv->led.default_trigger = ieee80211_get_radio_led_name(priv->hw); break; } ret = led_classdev_register(&priv->pci_dev->dev, &priv->led); if (ret) { kfree(priv->led.name); return; } priv->led_registered = true; } EXPORT_SYMBOL(iwl_legacy_leds_init); void iwl_legacy_leds_exit(struct iwl_priv *priv) { if (!priv->led_registered) return; led_classdev_unregister(&priv->led); kfree(priv->led.name); } EXPORT_SYMBOL(iwl_legacy_leds_exit);
gpl-2.0
UniqueDroid/lge-kernel-x3-p880
drivers/mtd/chips/cfi_util.c
3186
6868
/* * Common Flash Interface support: * Generic utility functions not dependent on command set * * Copyright (C) 2002 Red Hat * Copyright (C) 2003 STMicroelectronics Limited * * This code is covered by the GPL. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <asm/io.h> #include <asm/byteorder.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/mtd/xip.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/cfi.h> int __xipram cfi_qry_present(struct map_info *map, __u32 base, struct cfi_private *cfi) { int osf = cfi->interleave * cfi->device_type; /* scale factor */ map_word val[3]; map_word qry[3]; qry[0] = cfi_build_cmd('Q', map, cfi); qry[1] = cfi_build_cmd('R', map, cfi); qry[2] = cfi_build_cmd('Y', map, cfi); val[0] = map_read(map, base + osf*0x10); val[1] = map_read(map, base + osf*0x11); val[2] = map_read(map, base + osf*0x12); if (!map_word_equal(map, qry[0], val[0])) return 0; if (!map_word_equal(map, qry[1], val[1])) return 0; if (!map_word_equal(map, qry[2], val[2])) return 0; return 1; /* "QRY" found */ } EXPORT_SYMBOL_GPL(cfi_qry_present); int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map, struct cfi_private *cfi) { cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); if (cfi_qry_present(map, base, cfi)) return 1; /* QRY not found probably we deal with some odd CFI chips */ /* Some revisions of some old Intel chips? */ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); if (cfi_qry_present(map, base, cfi)) return 1; /* ST M29DW chips */ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL); if (cfi_qry_present(map, base, cfi)) return 1; /* some old SST chips, e.g. 39VF160x/39VF320x */ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL); if (cfi_qry_present(map, base, cfi)) return 1; /* SST 39VF640xB */ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL); if (cfi_qry_present(map, base, cfi)) return 1; /* QRY not found */ return 0; } EXPORT_SYMBOL_GPL(cfi_qry_mode_on); void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map, struct cfi_private *cfi) { cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); /* M29W128G flashes require an additional reset command when exit qry mode */ if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E)) cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); } EXPORT_SYMBOL_GPL(cfi_qry_mode_off); struct cfi_extquery * __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name) { struct cfi_private *cfi = map->fldrv_priv; __u32 base = 0; // cfi->chips[0].start; int ofs_factor = cfi->interleave * cfi->device_type; int i; struct cfi_extquery *extp = NULL; if (!adr) goto out; printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr); extp = kmalloc(size, GFP_KERNEL); if (!extp) { printk(KERN_ERR "Failed to allocate memory\n"); goto out; } #ifdef CONFIG_MTD_XIP local_irq_disable(); #endif /* Switch it into Query Mode */ cfi_qry_mode_on(base, map, cfi); /* Read in the Extended Query Table */ for (i=0; i<size; i++) { ((unsigned char *)extp)[i] = cfi_read_query(map, base+((adr+i)*ofs_factor)); } /* Make sure it returns to read mode */ cfi_qry_mode_off(base, map, cfi); #ifdef CONFIG_MTD_XIP (void) map_read(map, base); xip_iprefetch(); local_irq_enable(); #endif out: return extp; } EXPORT_SYMBOL(cfi_read_pri); void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; struct cfi_fixup *f; for (f=fixups; f->fixup; f++) { if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) && ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) { f->fixup(mtd); } } } EXPORT_SYMBOL(cfi_fixup); int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, loff_t ofs, size_t len, void *thunk) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; unsigned long adr; int chipnum, ret = 0; int i, first; struct mtd_erase_region_info *regions = mtd->eraseregions; if (ofs > mtd->size) return -EINVAL; if ((len + ofs) > mtd->size) return -EINVAL; /* Check that both start and end of the requested erase are * aligned with the erasesize at the appropriate addresses. */ i = 0; /* Skip all erase regions which are ended before the start of the requested erase. Actually, to save on the calculations, we skip to the first erase region which starts after the start of the requested erase, and then go back one. */ while (i < mtd->numeraseregions && ofs >= regions[i].offset) i++; i--; /* OK, now i is pointing at the erase region in which this erase request starts. Check the start of the requested erase range is aligned with the erase size which is in effect here. */ if (ofs & (regions[i].erasesize-1)) return -EINVAL; /* Remember the erase region we start on */ first = i; /* Next, check that the end of the requested erase is aligned * with the erase region at that address. */ while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset) i++; /* As before, drop back one to point at the region in which the address actually falls */ i--; if ((ofs + len) & (regions[i].erasesize-1)) return -EINVAL; chipnum = ofs >> cfi->chipshift; adr = ofs - (chipnum << cfi->chipshift); i=first; while(len) { int size = regions[i].erasesize; ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk); if (ret) return ret; adr += size; ofs += size; len -= size; if (ofs == regions[i].offset + size * regions[i].numblocks) i++; if (adr >> cfi->chipshift) { adr = 0; chipnum++; if (chipnum >= cfi->numchips) break; } } return 0; } EXPORT_SYMBOL(cfi_varsize_frob); MODULE_LICENSE("GPL");
gpl-2.0
vasudev-33/os
Documentation/vm/page-types.c
3186
23683
/* * page-types: Tool for querying page flags * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; version 2. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should find a copy of v2 of the GNU General Public License somewhere on * your Linux system; if not, write to the Free Software Foundation, Inc., 59 * Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * Copyright (C) 2009 Intel corporation * * Authors: Wu Fengguang <fengguang.wu@intel.com> */ #define _LARGEFILE64_SOURCE #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdint.h> #include <stdarg.h> #include <string.h> #include <getopt.h> #include <limits.h> #include <assert.h> #include <sys/types.h> #include <sys/errno.h> #include <sys/fcntl.h> #include <sys/mount.h> #include <sys/statfs.h> #include "../../include/linux/magic.h" #ifndef MAX_PATH # define MAX_PATH 256 #endif #ifndef STR # define _STR(x) #x # define STR(x) _STR(x) #endif /* * pagemap kernel ABI bits */ #define PM_ENTRY_BYTES sizeof(uint64_t) #define PM_STATUS_BITS 3 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK) #define PM_PSHIFT_BITS 6 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) #define PM_PRESENT PM_STATUS(4LL) #define PM_SWAP PM_STATUS(2LL) /* * kernel page flags */ #define KPF_BYTES 8 #define PROC_KPAGEFLAGS "/proc/kpageflags" /* copied from kpageflags_read() */ #define KPF_LOCKED 0 #define KPF_ERROR 1 #define KPF_REFERENCED 2 #define KPF_UPTODATE 3 #define KPF_DIRTY 4 #define KPF_LRU 5 #define KPF_ACTIVE 6 #define KPF_SLAB 7 #define KPF_WRITEBACK 8 #define KPF_RECLAIM 9 #define KPF_BUDDY 10 /* [11-20] new additions in 2.6.31 */ #define KPF_MMAP 11 #define KPF_ANON 12 #define KPF_SWAPCACHE 13 #define KPF_SWAPBACKED 14 #define KPF_COMPOUND_HEAD 15 #define KPF_COMPOUND_TAIL 16 #define KPF_HUGE 17 #define KPF_UNEVICTABLE 18 #define KPF_HWPOISON 19 #define KPF_NOPAGE 20 #define KPF_KSM 21 /* [32-] kernel hacking assistances */ #define KPF_RESERVED 32 #define KPF_MLOCKED 33 #define KPF_MAPPEDTODISK 34 #define KPF_PRIVATE 35 #define KPF_PRIVATE_2 36 #define KPF_OWNER_PRIVATE 37 #define KPF_ARCH 38 #define KPF_UNCACHED 39 /* [48-] take some arbitrary free slots for expanding overloaded flags * not part of kernel API */ #define KPF_READAHEAD 48 #define KPF_SLOB_FREE 49 #define KPF_SLUB_FROZEN 50 #define KPF_SLUB_DEBUG 51 #define KPF_ALL_BITS ((uint64_t)~0ULL) #define KPF_HACKERS_BITS (0xffffULL << 32) #define KPF_OVERLOADED_BITS (0xffffULL << 48) #define BIT(name) (1ULL << KPF_##name) #define BITS_COMPOUND (BIT(COMPOUND_HEAD) | BIT(COMPOUND_TAIL)) static const char *page_flag_names[] = { [KPF_LOCKED] = "L:locked", [KPF_ERROR] = "E:error", [KPF_REFERENCED] = "R:referenced", [KPF_UPTODATE] = "U:uptodate", [KPF_DIRTY] = "D:dirty", [KPF_LRU] = "l:lru", [KPF_ACTIVE] = "A:active", [KPF_SLAB] = "S:slab", [KPF_WRITEBACK] = "W:writeback", [KPF_RECLAIM] = "I:reclaim", [KPF_BUDDY] = "B:buddy", [KPF_MMAP] = "M:mmap", [KPF_ANON] = "a:anonymous", [KPF_SWAPCACHE] = "s:swapcache", [KPF_SWAPBACKED] = "b:swapbacked", [KPF_COMPOUND_HEAD] = "H:compound_head", [KPF_COMPOUND_TAIL] = "T:compound_tail", [KPF_HUGE] = "G:huge", [KPF_UNEVICTABLE] = "u:unevictable", [KPF_HWPOISON] = "X:hwpoison", [KPF_NOPAGE] = "n:nopage", [KPF_KSM] = "x:ksm", [KPF_RESERVED] = "r:reserved", [KPF_MLOCKED] = "m:mlocked", [KPF_MAPPEDTODISK] = "d:mappedtodisk", [KPF_PRIVATE] = "P:private", [KPF_PRIVATE_2] = "p:private_2", [KPF_OWNER_PRIVATE] = "O:owner_private", [KPF_ARCH] = "h:arch", [KPF_UNCACHED] = "c:uncached", [KPF_READAHEAD] = "I:readahead", [KPF_SLOB_FREE] = "P:slob_free", [KPF_SLUB_FROZEN] = "A:slub_frozen", [KPF_SLUB_DEBUG] = "E:slub_debug", }; static const char *debugfs_known_mountpoints[] = { "/sys/kernel/debug", "/debug", 0, }; /* * data structures */ static int opt_raw; /* for kernel developers */ static int opt_list; /* list pages (in ranges) */ static int opt_no_summary; /* don't show summary */ static pid_t opt_pid; /* process to walk */ #define MAX_ADDR_RANGES 1024 static int nr_addr_ranges; static unsigned long opt_offset[MAX_ADDR_RANGES]; static unsigned long opt_size[MAX_ADDR_RANGES]; #define MAX_VMAS 10240 static int nr_vmas; static unsigned long pg_start[MAX_VMAS]; static unsigned long pg_end[MAX_VMAS]; #define MAX_BIT_FILTERS 64 static int nr_bit_filters; static uint64_t opt_mask[MAX_BIT_FILTERS]; static uint64_t opt_bits[MAX_BIT_FILTERS]; static int page_size; static int pagemap_fd; static int kpageflags_fd; static int opt_hwpoison; static int opt_unpoison; static char hwpoison_debug_fs[MAX_PATH+1]; static int hwpoison_inject_fd; static int hwpoison_forget_fd; #define HASH_SHIFT 13 #define HASH_SIZE (1 << HASH_SHIFT) #define HASH_MASK (HASH_SIZE - 1) #define HASH_KEY(flags) (flags & HASH_MASK) static unsigned long total_pages; static unsigned long nr_pages[HASH_SIZE]; static uint64_t page_flags[HASH_SIZE]; /* * helper functions */ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define min_t(type, x, y) ({ \ type __min1 = (x); \ type __min2 = (y); \ __min1 < __min2 ? __min1 : __min2; }) #define max_t(type, x, y) ({ \ type __max1 = (x); \ type __max2 = (y); \ __max1 > __max2 ? __max1 : __max2; }) static unsigned long pages2mb(unsigned long pages) { return (pages * page_size) >> 20; } static void fatal(const char *x, ...) { va_list ap; va_start(ap, x); vfprintf(stderr, x, ap); va_end(ap); exit(EXIT_FAILURE); } static int checked_open(const char *pathname, int flags) { int fd = open(pathname, flags); if (fd < 0) { perror(pathname); exit(EXIT_FAILURE); } return fd; } /* * pagemap/kpageflags routines */ static unsigned long do_u64_read(int fd, char *name, uint64_t *buf, unsigned long index, unsigned long count) { long bytes; if (index > ULONG_MAX / 8) fatal("index overflow: %lu\n", index); if (lseek(fd, index * 8, SEEK_SET) < 0) { perror(name); exit(EXIT_FAILURE); } bytes = read(fd, buf, count * 8); if (bytes < 0) { perror(name); exit(EXIT_FAILURE); } if (bytes % 8) fatal("partial read: %lu bytes\n", bytes); return bytes / 8; } static unsigned long kpageflags_read(uint64_t *buf, unsigned long index, unsigned long pages) { return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages); } static unsigned long pagemap_read(uint64_t *buf, unsigned long index, unsigned long pages) { return do_u64_read(pagemap_fd, "/proc/pid/pagemap", buf, index, pages); } static unsigned long pagemap_pfn(uint64_t val) { unsigned long pfn; if (val & PM_PRESENT) pfn = PM_PFRAME(val); else pfn = 0; return pfn; } /* * page flag names */ static char *page_flag_name(uint64_t flags) { static char buf[65]; int present; int i, j; for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) { present = (flags >> i) & 1; if (!page_flag_names[i]) { if (present) fatal("unknown flag bit %d\n", i); continue; } buf[j++] = present ? page_flag_names[i][0] : '_'; } return buf; } static char *page_flag_longname(uint64_t flags) { static char buf[1024]; int i, n; for (i = 0, n = 0; i < ARRAY_SIZE(page_flag_names); i++) { if (!page_flag_names[i]) continue; if ((flags >> i) & 1) n += snprintf(buf + n, sizeof(buf) - n, "%s,", page_flag_names[i] + 2); } if (n) n--; buf[n] = '\0'; return buf; } /* * page list and summary */ static void show_page_range(unsigned long voffset, unsigned long offset, uint64_t flags) { static uint64_t flags0; static unsigned long voff; static unsigned long index; static unsigned long count; if (flags == flags0 && offset == index + count && (!opt_pid || voffset == voff + count)) { count++; return; } if (count) { if (opt_pid) printf("%lx\t", voff); printf("%lx\t%lx\t%s\n", index, count, page_flag_name(flags0)); } flags0 = flags; index = offset; voff = voffset; count = 1; } static void show_page(unsigned long voffset, unsigned long offset, uint64_t flags) { if (opt_pid) printf("%lx\t", voffset); printf("%lx\t%s\n", offset, page_flag_name(flags)); } static void show_summary(void) { int i; printf(" flags\tpage-count MB" " symbolic-flags\t\t\tlong-symbolic-flags\n"); for (i = 0; i < ARRAY_SIZE(nr_pages); i++) { if (nr_pages[i]) printf("0x%016llx\t%10lu %8lu %s\t%s\n", (unsigned long long)page_flags[i], nr_pages[i], pages2mb(nr_pages[i]), page_flag_name(page_flags[i]), page_flag_longname(page_flags[i])); } printf(" total\t%10lu %8lu\n", total_pages, pages2mb(total_pages)); } /* * page flag filters */ static int bit_mask_ok(uint64_t flags) { int i; for (i = 0; i < nr_bit_filters; i++) { if (opt_bits[i] == KPF_ALL_BITS) { if ((flags & opt_mask[i]) == 0) return 0; } else { if ((flags & opt_mask[i]) != opt_bits[i]) return 0; } } return 1; } static uint64_t expand_overloaded_flags(uint64_t flags) { /* SLOB/SLUB overload several page flags */ if (flags & BIT(SLAB)) { if (flags & BIT(PRIVATE)) flags ^= BIT(PRIVATE) | BIT(SLOB_FREE); if (flags & BIT(ACTIVE)) flags ^= BIT(ACTIVE) | BIT(SLUB_FROZEN); if (flags & BIT(ERROR)) flags ^= BIT(ERROR) | BIT(SLUB_DEBUG); } /* PG_reclaim is overloaded as PG_readahead in the read path */ if ((flags & (BIT(RECLAIM) | BIT(WRITEBACK))) == BIT(RECLAIM)) flags ^= BIT(RECLAIM) | BIT(READAHEAD); return flags; } static uint64_t well_known_flags(uint64_t flags) { /* hide flags intended only for kernel hacker */ flags &= ~KPF_HACKERS_BITS; /* hide non-hugeTLB compound pages */ if ((flags & BITS_COMPOUND) && !(flags & BIT(HUGE))) flags &= ~BITS_COMPOUND; return flags; } static uint64_t kpageflags_flags(uint64_t flags) { flags = expand_overloaded_flags(flags); if (!opt_raw) flags = well_known_flags(flags); return flags; } /* verify that a mountpoint is actually a debugfs instance */ static int debugfs_valid_mountpoint(const char *debugfs) { struct statfs st_fs; if (statfs(debugfs, &st_fs) < 0) return -ENOENT; else if (st_fs.f_type != (long) DEBUGFS_MAGIC) return -ENOENT; return 0; } /* find the path to the mounted debugfs */ static const char *debugfs_find_mountpoint(void) { const char **ptr; char type[100]; FILE *fp; ptr = debugfs_known_mountpoints; while (*ptr) { if (debugfs_valid_mountpoint(*ptr) == 0) { strcpy(hwpoison_debug_fs, *ptr); return hwpoison_debug_fs; } ptr++; } /* give up and parse /proc/mounts */ fp = fopen("/proc/mounts", "r"); if (fp == NULL) perror("Can't open /proc/mounts for read"); while (fscanf(fp, "%*s %" STR(MAX_PATH) "s %99s %*s %*d %*d\n", hwpoison_debug_fs, type) == 2) { if (strcmp(type, "debugfs") == 0) break; } fclose(fp); if (strcmp(type, "debugfs") != 0) return NULL; return hwpoison_debug_fs; } /* mount the debugfs somewhere if it's not mounted */ static void debugfs_mount(void) { const char **ptr; /* see if it's already mounted */ if (debugfs_find_mountpoint()) return; ptr = debugfs_known_mountpoints; while (*ptr) { if (mount(NULL, *ptr, "debugfs", 0, NULL) == 0) { /* save the mountpoint */ strcpy(hwpoison_debug_fs, *ptr); break; } ptr++; } if (*ptr == NULL) { perror("mount debugfs"); exit(EXIT_FAILURE); } } /* * page actions */ static void prepare_hwpoison_fd(void) { char buf[MAX_PATH + 1]; debugfs_mount(); if (opt_hwpoison && !hwpoison_inject_fd) { snprintf(buf, MAX_PATH, "%s/hwpoison/corrupt-pfn", hwpoison_debug_fs); hwpoison_inject_fd = checked_open(buf, O_WRONLY); } if (opt_unpoison && !hwpoison_forget_fd) { snprintf(buf, MAX_PATH, "%s/hwpoison/unpoison-pfn", hwpoison_debug_fs); hwpoison_forget_fd = checked_open(buf, O_WRONLY); } } static int hwpoison_page(unsigned long offset) { char buf[100]; int len; len = sprintf(buf, "0x%lx\n", offset); len = write(hwpoison_inject_fd, buf, len); if (len < 0) { perror("hwpoison inject"); return len; } return 0; } static int unpoison_page(unsigned long offset) { char buf[100]; int len; len = sprintf(buf, "0x%lx\n", offset); len = write(hwpoison_forget_fd, buf, len); if (len < 0) { perror("hwpoison forget"); return len; } return 0; } /* * page frame walker */ static int hash_slot(uint64_t flags) { int k = HASH_KEY(flags); int i; /* Explicitly reserve slot 0 for flags 0: the following logic * cannot distinguish an unoccupied slot from slot (flags==0). */ if (flags == 0) return 0; /* search through the remaining (HASH_SIZE-1) slots */ for (i = 1; i < ARRAY_SIZE(page_flags); i++, k++) { if (!k || k >= ARRAY_SIZE(page_flags)) k = 1; if (page_flags[k] == 0) { page_flags[k] = flags; return k; } if (page_flags[k] == flags) return k; } fatal("hash table full: bump up HASH_SHIFT?\n"); exit(EXIT_FAILURE); } static void add_page(unsigned long voffset, unsigned long offset, uint64_t flags) { flags = kpageflags_flags(flags); if (!bit_mask_ok(flags)) return; if (opt_hwpoison) hwpoison_page(offset); if (opt_unpoison) unpoison_page(offset); if (opt_list == 1) show_page_range(voffset, offset, flags); else if (opt_list == 2) show_page(voffset, offset, flags); nr_pages[hash_slot(flags)]++; total_pages++; } #define KPAGEFLAGS_BATCH (64 << 10) /* 64k pages */ static void walk_pfn(unsigned long voffset, unsigned long index, unsigned long count) { uint64_t buf[KPAGEFLAGS_BATCH]; unsigned long batch; long pages; unsigned long i; while (count) { batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH); pages = kpageflags_read(buf, index, batch); if (pages == 0) break; for (i = 0; i < pages; i++) add_page(voffset + i, index + i, buf[i]); index += pages; count -= pages; } } #define PAGEMAP_BATCH (64 << 10) static void walk_vma(unsigned long index, unsigned long count) { uint64_t buf[PAGEMAP_BATCH]; unsigned long batch; unsigned long pages; unsigned long pfn; unsigned long i; while (count) { batch = min_t(unsigned long, count, PAGEMAP_BATCH); pages = pagemap_read(buf, index, batch); if (pages == 0) break; for (i = 0; i < pages; i++) { pfn = pagemap_pfn(buf[i]); if (pfn) walk_pfn(index + i, pfn, 1); } index += pages; count -= pages; } } static void walk_task(unsigned long index, unsigned long count) { const unsigned long end = index + count; unsigned long start; int i = 0; while (index < end) { while (pg_end[i] <= index) if (++i >= nr_vmas) return; if (pg_start[i] >= end) return; start = max_t(unsigned long, pg_start[i], index); index = min_t(unsigned long, pg_end[i], end); assert(start < index); walk_vma(start, index - start); } } static void add_addr_range(unsigned long offset, unsigned long size) { if (nr_addr_ranges >= MAX_ADDR_RANGES) fatal("too many addr ranges\n"); opt_offset[nr_addr_ranges] = offset; opt_size[nr_addr_ranges] = min_t(unsigned long, size, ULONG_MAX-offset); nr_addr_ranges++; } static void walk_addr_ranges(void) { int i; kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY); if (!nr_addr_ranges) add_addr_range(0, ULONG_MAX); for (i = 0; i < nr_addr_ranges; i++) if (!opt_pid) walk_pfn(0, opt_offset[i], opt_size[i]); else walk_task(opt_offset[i], opt_size[i]); close(kpageflags_fd); } /* * user interface */ static const char *page_flag_type(uint64_t flag) { if (flag & KPF_HACKERS_BITS) return "(r)"; if (flag & KPF_OVERLOADED_BITS) return "(o)"; return " "; } static void usage(void) { int i, j; printf( "page-types [options]\n" " -r|--raw Raw mode, for kernel developers\n" " -d|--describe flags Describe flags\n" " -a|--addr addr-spec Walk a range of pages\n" " -b|--bits bits-spec Walk pages with specified bits\n" " -p|--pid pid Walk process address space\n" #if 0 /* planned features */ " -f|--file filename Walk file address space\n" #endif " -l|--list Show page details in ranges\n" " -L|--list-each Show page details one by one\n" " -N|--no-summary Don't show summary info\n" " -X|--hwpoison hwpoison pages\n" " -x|--unpoison unpoison pages\n" " -h|--help Show this usage message\n" "flags:\n" " 0x10 bitfield format, e.g.\n" " anon bit-name, e.g.\n" " 0x10,anon comma-separated list, e.g.\n" "addr-spec:\n" " N one page at offset N (unit: pages)\n" " N+M pages range from N to N+M-1\n" " N,M pages range from N to M-1\n" " N, pages range from N to end\n" " ,M pages range from 0 to M-1\n" "bits-spec:\n" " bit1,bit2 (flags & (bit1|bit2)) != 0\n" " bit1,bit2=bit1 (flags & (bit1|bit2)) == bit1\n" " bit1,~bit2 (flags & (bit1|bit2)) == bit1\n" " =bit1,bit2 flags == (bit1|bit2)\n" "bit-names:\n" ); for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) { if (!page_flag_names[i]) continue; printf("%16s%s", page_flag_names[i] + 2, page_flag_type(1ULL << i)); if (++j > 3) { j = 0; putchar('\n'); } } printf("\n " "(r) raw mode bits (o) overloaded bits\n"); } static unsigned long long parse_number(const char *str) { unsigned long long n; n = strtoll(str, NULL, 0); if (n == 0 && str[0] != '0') fatal("invalid name or number: %s\n", str); return n; } static void parse_pid(const char *str) { FILE *file; char buf[5000]; opt_pid = parse_number(str); sprintf(buf, "/proc/%d/pagemap", opt_pid); pagemap_fd = checked_open(buf, O_RDONLY); sprintf(buf, "/proc/%d/maps", opt_pid); file = fopen(buf, "r"); if (!file) { perror(buf); exit(EXIT_FAILURE); } while (fgets(buf, sizeof(buf), file) != NULL) { unsigned long vm_start; unsigned long vm_end; unsigned long long pgoff; int major, minor; char r, w, x, s; unsigned long ino; int n; n = sscanf(buf, "%lx-%lx %c%c%c%c %llx %x:%x %lu", &vm_start, &vm_end, &r, &w, &x, &s, &pgoff, &major, &minor, &ino); if (n < 10) { fprintf(stderr, "unexpected line: %s\n", buf); continue; } pg_start[nr_vmas] = vm_start / page_size; pg_end[nr_vmas] = vm_end / page_size; if (++nr_vmas >= MAX_VMAS) { fprintf(stderr, "too many VMAs\n"); break; } } fclose(file); } static void parse_file(const char *name) { } static void parse_addr_range(const char *optarg) { unsigned long offset; unsigned long size; char *p; p = strchr(optarg, ','); if (!p) p = strchr(optarg, '+'); if (p == optarg) { offset = 0; size = parse_number(p + 1); } else if (p) { offset = parse_number(optarg); if (p[1] == '\0') size = ULONG_MAX; else { size = parse_number(p + 1); if (*p == ',') { if (size < offset) fatal("invalid range: %lu,%lu\n", offset, size); size -= offset; } } } else { offset = parse_number(optarg); size = 1; } add_addr_range(offset, size); } static void add_bits_filter(uint64_t mask, uint64_t bits) { if (nr_bit_filters >= MAX_BIT_FILTERS) fatal("too much bit filters\n"); opt_mask[nr_bit_filters] = mask; opt_bits[nr_bit_filters] = bits; nr_bit_filters++; } static uint64_t parse_flag_name(const char *str, int len) { int i; if (!*str || !len) return 0; if (len <= 8 && !strncmp(str, "compound", len)) return BITS_COMPOUND; for (i = 0; i < ARRAY_SIZE(page_flag_names); i++) { if (!page_flag_names[i]) continue; if (!strncmp(str, page_flag_names[i] + 2, len)) return 1ULL << i; } return parse_number(str); } static uint64_t parse_flag_names(const char *str, int all) { const char *p = str; uint64_t flags = 0; while (1) { if (*p == ',' || *p == '=' || *p == '\0') { if ((*str != '~') || (*str == '~' && all && *++str)) flags |= parse_flag_name(str, p - str); if (*p != ',') break; str = p + 1; } p++; } return flags; } static void parse_bits_mask(const char *optarg) { uint64_t mask; uint64_t bits; const char *p; p = strchr(optarg, '='); if (p == optarg) { mask = KPF_ALL_BITS; bits = parse_flag_names(p + 1, 0); } else if (p) { mask = parse_flag_names(optarg, 0); bits = parse_flag_names(p + 1, 0); } else if (strchr(optarg, '~')) { mask = parse_flag_names(optarg, 1); bits = parse_flag_names(optarg, 0); } else { mask = parse_flag_names(optarg, 0); bits = KPF_ALL_BITS; } add_bits_filter(mask, bits); } static void describe_flags(const char *optarg) { uint64_t flags = parse_flag_names(optarg, 0); printf("0x%016llx\t%s\t%s\n", (unsigned long long)flags, page_flag_name(flags), page_flag_longname(flags)); } static const struct option opts[] = { { "raw" , 0, NULL, 'r' }, { "pid" , 1, NULL, 'p' }, { "file" , 1, NULL, 'f' }, { "addr" , 1, NULL, 'a' }, { "bits" , 1, NULL, 'b' }, { "describe" , 1, NULL, 'd' }, { "list" , 0, NULL, 'l' }, { "list-each" , 0, NULL, 'L' }, { "no-summary", 0, NULL, 'N' }, { "hwpoison" , 0, NULL, 'X' }, { "unpoison" , 0, NULL, 'x' }, { "help" , 0, NULL, 'h' }, { NULL , 0, NULL, 0 } }; int main(int argc, char *argv[]) { int c; page_size = getpagesize(); while ((c = getopt_long(argc, argv, "rp:f:a:b:d:lLNXxh", opts, NULL)) != -1) { switch (c) { case 'r': opt_raw = 1; break; case 'p': parse_pid(optarg); break; case 'f': parse_file(optarg); break; case 'a': parse_addr_range(optarg); break; case 'b': parse_bits_mask(optarg); break; case 'd': describe_flags(optarg); exit(0); case 'l': opt_list = 1; break; case 'L': opt_list = 2; break; case 'N': opt_no_summary = 1; break; case 'X': opt_hwpoison = 1; prepare_hwpoison_fd(); break; case 'x': opt_unpoison = 1; prepare_hwpoison_fd(); break; case 'h': usage(); exit(0); default: usage(); exit(1); } } if (opt_list && opt_pid) printf("voffset\t"); if (opt_list == 1) printf("offset\tlen\tflags\n"); if (opt_list == 2) printf("offset\tflags\n"); walk_addr_ranges(); if (opt_list == 1) show_page_range(0, 0, 0); /* drain the buffer */ if (opt_no_summary) return 0; if (opt_list) printf("\n\n"); show_summary(); return 0; }
gpl-2.0
crimeofheart/n7000_tw_jb_kernel
drivers/video/backlight/corgi_lcd.c
3186
17057
/* * LCD/Backlight Driver for Sharp Zaurus Handhelds (various models) * * Copyright (c) 2004-2006 Richard Purdie * * Based on Sharp's 2.4 Backlight Driver * * Copyright (c) 2008 Marvell International Ltd. * Converted to SPI device based LCD/Backlight device driver * by Eric Miao <eric.miao@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/fb.h> #include <linux/lcd.h> #include <linux/spi/spi.h> #include <linux/spi/corgi_lcd.h> #include <linux/slab.h> #include <asm/mach/sharpsl_param.h> #define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) /* Register Addresses */ #define RESCTL_ADRS 0x00 #define PHACTRL_ADRS 0x01 #define DUTYCTRL_ADRS 0x02 #define POWERREG0_ADRS 0x03 #define POWERREG1_ADRS 0x04 #define GPOR3_ADRS 0x05 #define PICTRL_ADRS 0x06 #define POLCTRL_ADRS 0x07 /* Register Bit Definitions */ #define RESCTL_QVGA 0x01 #define RESCTL_VGA 0x00 #define POWER1_VW_ON 0x01 /* VW Supply FET ON */ #define POWER1_GVSS_ON 0x02 /* GVSS(-8V) Power Supply ON */ #define POWER1_VDD_ON 0x04 /* VDD(8V),SVSS(-4V) Power Supply ON */ #define POWER1_VW_OFF 0x00 /* VW Supply FET OFF */ #define POWER1_GVSS_OFF 0x00 /* GVSS(-8V) Power Supply OFF */ #define POWER1_VDD_OFF 0x00 /* VDD(8V),SVSS(-4V) Power Supply OFF */ #define POWER0_COM_DCLK 0x01 /* COM Voltage DC Bias DAC Serial Data Clock */ #define POWER0_COM_DOUT 0x02 /* COM Voltage DC Bias DAC Serial Data Out */ #define POWER0_DAC_ON 0x04 /* DAC Power Supply ON */ #define POWER0_COM_ON 0x08 /* COM Power Supply ON */ #define POWER0_VCC5_ON 0x10 /* VCC5 Power Supply ON */ #define POWER0_DAC_OFF 0x00 /* DAC Power Supply OFF */ #define POWER0_COM_OFF 0x00 /* COM Power Supply OFF */ #define POWER0_VCC5_OFF 0x00 /* VCC5 Power Supply OFF */ #define PICTRL_INIT_STATE 0x01 #define PICTRL_INIOFF 0x02 #define PICTRL_POWER_DOWN 0x04 #define PICTRL_COM_SIGNAL_OFF 0x08 #define PICTRL_DAC_SIGNAL_OFF 0x10 #define POLCTRL_SYNC_POL_FALL 0x01 #define POLCTRL_EN_POL_FALL 0x02 #define POLCTRL_DATA_POL_FALL 0x04 #define POLCTRL_SYNC_ACT_H 0x08 #define POLCTRL_EN_ACT_L 0x10 #define POLCTRL_SYNC_POL_RISE 0x00 #define POLCTRL_EN_POL_RISE 0x00 #define POLCTRL_DATA_POL_RISE 0x00 #define POLCTRL_SYNC_ACT_L 0x00 #define POLCTRL_EN_ACT_H 0x00 #define PHACTRL_PHASE_MANUAL 0x01 #define DEFAULT_PHAD_QVGA (9) #define DEFAULT_COMADJ (125) struct corgi_lcd { struct spi_device *spi_dev; struct lcd_device *lcd_dev; struct backlight_device *bl_dev; int limit_mask; int intensity; int power; int mode; char buf[2]; int gpio_backlight_on; int gpio_backlight_cont; int gpio_backlight_cont_inverted; void (*kick_battery)(void); }; static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int reg, uint8_t val); static struct corgi_lcd *the_corgi_lcd; static unsigned long corgibl_flags; #define CORGIBL_SUSPENDED 0x01 #define CORGIBL_BATTLOW 0x02 /* * This is only a pseudo I2C interface. We can't use the standard kernel * routines as the interface is write only. We just assume the data is acked... */ static void lcdtg_ssp_i2c_send(struct corgi_lcd *lcd, uint8_t data) { corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, data); udelay(10); } static void lcdtg_i2c_send_bit(struct corgi_lcd *lcd, uint8_t data) { lcdtg_ssp_i2c_send(lcd, data); lcdtg_ssp_i2c_send(lcd, data | POWER0_COM_DCLK); lcdtg_ssp_i2c_send(lcd, data); } static void lcdtg_i2c_send_start(struct corgi_lcd *lcd, uint8_t base) { lcdtg_ssp_i2c_send(lcd, base | POWER0_COM_DCLK | POWER0_COM_DOUT); lcdtg_ssp_i2c_send(lcd, base | POWER0_COM_DCLK); lcdtg_ssp_i2c_send(lcd, base); } static void lcdtg_i2c_send_stop(struct corgi_lcd *lcd, uint8_t base) { lcdtg_ssp_i2c_send(lcd, base); lcdtg_ssp_i2c_send(lcd, base | POWER0_COM_DCLK); lcdtg_ssp_i2c_send(lcd, base | POWER0_COM_DCLK | POWER0_COM_DOUT); } static void lcdtg_i2c_send_byte(struct corgi_lcd *lcd, uint8_t base, uint8_t data) { int i; for (i = 0; i < 8; i++) { if (data & 0x80) lcdtg_i2c_send_bit(lcd, base | POWER0_COM_DOUT); else lcdtg_i2c_send_bit(lcd, base); data <<= 1; } } static void lcdtg_i2c_wait_ack(struct corgi_lcd *lcd, uint8_t base) { lcdtg_i2c_send_bit(lcd, base); } static void lcdtg_set_common_voltage(struct corgi_lcd *lcd, uint8_t base_data, uint8_t data) { /* Set Common Voltage to M62332FP via I2C */ lcdtg_i2c_send_start(lcd, base_data); lcdtg_i2c_send_byte(lcd, base_data, 0x9c); lcdtg_i2c_wait_ack(lcd, base_data); lcdtg_i2c_send_byte(lcd, base_data, 0x00); lcdtg_i2c_wait_ack(lcd, base_data); lcdtg_i2c_send_byte(lcd, base_data, data); lcdtg_i2c_wait_ack(lcd, base_data); lcdtg_i2c_send_stop(lcd, base_data); } static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int adrs, uint8_t data) { struct spi_message msg; struct spi_transfer xfer = { .len = 1, .cs_change = 1, .tx_buf = lcd->buf, }; lcd->buf[0] = ((adrs & 0x07) << 5) | (data & 0x1f); spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); return spi_sync(lcd->spi_dev, &msg); } /* Set Phase Adjust */ static void lcdtg_set_phadadj(struct corgi_lcd *lcd, int mode) { int adj; switch(mode) { case CORGI_LCD_MODE_VGA: /* Setting for VGA */ adj = sharpsl_param.phadadj; adj = (adj < 0) ? PHACTRL_PHASE_MANUAL : PHACTRL_PHASE_MANUAL | ((adj & 0xf) << 1); break; case CORGI_LCD_MODE_QVGA: default: /* Setting for QVGA */ adj = (DEFAULT_PHAD_QVGA << 1) | PHACTRL_PHASE_MANUAL; break; } corgi_ssp_lcdtg_send(lcd, PHACTRL_ADRS, adj); } static void corgi_lcd_power_on(struct corgi_lcd *lcd) { int comadj; /* Initialize Internal Logic & Port */ corgi_ssp_lcdtg_send(lcd, PICTRL_ADRS, PICTRL_POWER_DOWN | PICTRL_INIOFF | PICTRL_INIT_STATE | PICTRL_COM_SIGNAL_OFF | PICTRL_DAC_SIGNAL_OFF); corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_OFF | POWER0_COM_OFF | POWER0_VCC5_OFF); corgi_ssp_lcdtg_send(lcd, POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF); /* VDD(+8V), SVSS(-4V) ON */ corgi_ssp_lcdtg_send(lcd, POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_ON); mdelay(3); /* DAC ON */ corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF); /* INIB = H, INI = L */ /* PICTL[0] = H , PICTL[1] = PICTL[2] = PICTL[4] = L */ corgi_ssp_lcdtg_send(lcd, PICTRL_ADRS, PICTRL_INIT_STATE | PICTRL_COM_SIGNAL_OFF); /* Set Common Voltage */ comadj = sharpsl_param.comadj; if (comadj < 0) comadj = DEFAULT_COMADJ; lcdtg_set_common_voltage(lcd, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF, comadj); /* VCC5 ON, DAC ON */ corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON); /* GVSS(-8V) ON, VDD ON */ corgi_ssp_lcdtg_send(lcd, POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_ON | POWER1_VDD_ON); mdelay(2); /* COM SIGNAL ON (PICTL[3] = L) */ corgi_ssp_lcdtg_send(lcd, PICTRL_ADRS, PICTRL_INIT_STATE); /* COM ON, DAC ON, VCC5_ON */ corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON | POWER0_COM_ON | POWER0_VCC5_ON); /* VW ON, GVSS ON, VDD ON */ corgi_ssp_lcdtg_send(lcd, POWERREG1_ADRS, POWER1_VW_ON | POWER1_GVSS_ON | POWER1_VDD_ON); /* Signals output enable */ corgi_ssp_lcdtg_send(lcd, PICTRL_ADRS, 0); /* Set Phase Adjust */ lcdtg_set_phadadj(lcd, lcd->mode); /* Initialize for Input Signals from ATI */ corgi_ssp_lcdtg_send(lcd, POLCTRL_ADRS, POLCTRL_SYNC_POL_RISE | POLCTRL_EN_POL_RISE | POLCTRL_DATA_POL_RISE | POLCTRL_SYNC_ACT_L | POLCTRL_EN_ACT_H); udelay(1000); switch (lcd->mode) { case CORGI_LCD_MODE_VGA: corgi_ssp_lcdtg_send(lcd, RESCTL_ADRS, RESCTL_VGA); break; case CORGI_LCD_MODE_QVGA: default: corgi_ssp_lcdtg_send(lcd, RESCTL_ADRS, RESCTL_QVGA); break; } } static void corgi_lcd_power_off(struct corgi_lcd *lcd) { /* 60Hz x 2 frame = 16.7msec x 2 = 33.4 msec */ msleep(34); /* (1)VW OFF */ corgi_ssp_lcdtg_send(lcd, POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_ON | POWER1_VDD_ON); /* (2)COM OFF */ corgi_ssp_lcdtg_send(lcd, PICTRL_ADRS, PICTRL_COM_SIGNAL_OFF); corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON); /* (3)Set Common Voltage Bias 0V */ lcdtg_set_common_voltage(lcd, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON, 0); /* (4)GVSS OFF */ corgi_ssp_lcdtg_send(lcd, POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_ON); /* (5)VCC5 OFF */ corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF); /* (6)Set PDWN, INIOFF, DACOFF */ corgi_ssp_lcdtg_send(lcd, PICTRL_ADRS, PICTRL_INIOFF | PICTRL_DAC_SIGNAL_OFF | PICTRL_POWER_DOWN | PICTRL_COM_SIGNAL_OFF); /* (7)DAC OFF */ corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, POWER0_DAC_OFF | POWER0_COM_OFF | POWER0_VCC5_OFF); /* (8)VDD OFF */ corgi_ssp_lcdtg_send(lcd, POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF); } static int corgi_lcd_set_mode(struct lcd_device *ld, struct fb_videomode *m) { struct corgi_lcd *lcd = dev_get_drvdata(&ld->dev); int mode = CORGI_LCD_MODE_QVGA; if (m->xres == 640 || m->xres == 480) mode = CORGI_LCD_MODE_VGA; if (lcd->mode == mode) return 0; lcdtg_set_phadadj(lcd, mode); switch (mode) { case CORGI_LCD_MODE_VGA: corgi_ssp_lcdtg_send(lcd, RESCTL_ADRS, RESCTL_VGA); break; case CORGI_LCD_MODE_QVGA: default: corgi_ssp_lcdtg_send(lcd, RESCTL_ADRS, RESCTL_QVGA); break; } lcd->mode = mode; return 0; } static int corgi_lcd_set_power(struct lcd_device *ld, int power) { struct corgi_lcd *lcd = dev_get_drvdata(&ld->dev); if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power)) corgi_lcd_power_on(lcd); if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power)) corgi_lcd_power_off(lcd); lcd->power = power; return 0; } static int corgi_lcd_get_power(struct lcd_device *ld) { struct corgi_lcd *lcd = dev_get_drvdata(&ld->dev); return lcd->power; } static struct lcd_ops corgi_lcd_ops = { .get_power = corgi_lcd_get_power, .set_power = corgi_lcd_set_power, .set_mode = corgi_lcd_set_mode, }; static int corgi_bl_get_intensity(struct backlight_device *bd) { struct corgi_lcd *lcd = dev_get_drvdata(&bd->dev); return lcd->intensity; } static int corgi_bl_set_intensity(struct corgi_lcd *lcd, int intensity) { int cont; if (intensity > 0x10) intensity += 0x10; corgi_ssp_lcdtg_send(lcd, DUTYCTRL_ADRS, intensity); /* Bit 5 via GPIO_BACKLIGHT_CONT */ cont = !!(intensity & 0x20) ^ lcd->gpio_backlight_cont_inverted; if (gpio_is_valid(lcd->gpio_backlight_cont)) gpio_set_value(lcd->gpio_backlight_cont, cont); if (gpio_is_valid(lcd->gpio_backlight_on)) gpio_set_value(lcd->gpio_backlight_on, intensity); if (lcd->kick_battery) lcd->kick_battery(); lcd->intensity = intensity; return 0; } static int corgi_bl_update_status(struct backlight_device *bd) { struct corgi_lcd *lcd = dev_get_drvdata(&bd->dev); int intensity = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; if (corgibl_flags & CORGIBL_SUSPENDED) intensity = 0; if ((corgibl_flags & CORGIBL_BATTLOW) && intensity > lcd->limit_mask) intensity = lcd->limit_mask; return corgi_bl_set_intensity(lcd, intensity); } void corgi_lcd_limit_intensity(int limit) { if (limit) corgibl_flags |= CORGIBL_BATTLOW; else corgibl_flags &= ~CORGIBL_BATTLOW; backlight_update_status(the_corgi_lcd->bl_dev); } EXPORT_SYMBOL(corgi_lcd_limit_intensity); static const struct backlight_ops corgi_bl_ops = { .get_brightness = corgi_bl_get_intensity, .update_status = corgi_bl_update_status, }; #ifdef CONFIG_PM static int corgi_lcd_suspend(struct spi_device *spi, pm_message_t state) { struct corgi_lcd *lcd = dev_get_drvdata(&spi->dev); corgibl_flags |= CORGIBL_SUSPENDED; corgi_bl_set_intensity(lcd, 0); corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN); return 0; } static int corgi_lcd_resume(struct spi_device *spi) { struct corgi_lcd *lcd = dev_get_drvdata(&spi->dev); corgibl_flags &= ~CORGIBL_SUSPENDED; corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_UNBLANK); backlight_update_status(lcd->bl_dev); return 0; } #else #define corgi_lcd_suspend NULL #define corgi_lcd_resume NULL #endif static int setup_gpio_backlight(struct corgi_lcd *lcd, struct corgi_lcd_platform_data *pdata) { struct spi_device *spi = lcd->spi_dev; int err; lcd->gpio_backlight_on = -1; lcd->gpio_backlight_cont = -1; if (gpio_is_valid(pdata->gpio_backlight_on)) { err = gpio_request(pdata->gpio_backlight_on, "BL_ON"); if (err) { dev_err(&spi->dev, "failed to request GPIO%d for " "backlight_on\n", pdata->gpio_backlight_on); return err; } lcd->gpio_backlight_on = pdata->gpio_backlight_on; gpio_direction_output(lcd->gpio_backlight_on, 0); } if (gpio_is_valid(pdata->gpio_backlight_cont)) { err = gpio_request(pdata->gpio_backlight_cont, "BL_CONT"); if (err) { dev_err(&spi->dev, "failed to request GPIO%d for " "backlight_cont\n", pdata->gpio_backlight_cont); goto err_free_backlight_on; } lcd->gpio_backlight_cont = pdata->gpio_backlight_cont; /* spitz and akita use both GPIOs for backlight, and * have inverted polarity of GPIO_BACKLIGHT_CONT */ if (gpio_is_valid(lcd->gpio_backlight_on)) { lcd->gpio_backlight_cont_inverted = 1; gpio_direction_output(lcd->gpio_backlight_cont, 1); } else { lcd->gpio_backlight_cont_inverted = 0; gpio_direction_output(lcd->gpio_backlight_cont, 0); } } return 0; err_free_backlight_on: if (gpio_is_valid(lcd->gpio_backlight_on)) gpio_free(lcd->gpio_backlight_on); return err; } static int __devinit corgi_lcd_probe(struct spi_device *spi) { struct backlight_properties props; struct corgi_lcd_platform_data *pdata = spi->dev.platform_data; struct corgi_lcd *lcd; int ret = 0; if (pdata == NULL) { dev_err(&spi->dev, "platform data not available\n"); return -EINVAL; } lcd = kzalloc(sizeof(struct corgi_lcd), GFP_KERNEL); if (!lcd) { dev_err(&spi->dev, "failed to allocate memory\n"); return -ENOMEM; } lcd->spi_dev = spi; lcd->lcd_dev = lcd_device_register("corgi_lcd", &spi->dev, lcd, &corgi_lcd_ops); if (IS_ERR(lcd->lcd_dev)) { ret = PTR_ERR(lcd->lcd_dev); goto err_free_lcd; } lcd->power = FB_BLANK_POWERDOWN; lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = pdata->max_intensity; lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev, lcd, &corgi_bl_ops, &props); if (IS_ERR(lcd->bl_dev)) { ret = PTR_ERR(lcd->bl_dev); goto err_unregister_lcd; } lcd->bl_dev->props.brightness = pdata->default_intensity; lcd->bl_dev->props.power = FB_BLANK_UNBLANK; ret = setup_gpio_backlight(lcd, pdata); if (ret) goto err_unregister_bl; lcd->kick_battery = pdata->kick_battery; dev_set_drvdata(&spi->dev, lcd); corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_UNBLANK); backlight_update_status(lcd->bl_dev); lcd->limit_mask = pdata->limit_mask; the_corgi_lcd = lcd; return 0; err_unregister_bl: backlight_device_unregister(lcd->bl_dev); err_unregister_lcd: lcd_device_unregister(lcd->lcd_dev); err_free_lcd: kfree(lcd); return ret; } static int __devexit corgi_lcd_remove(struct spi_device *spi) { struct corgi_lcd *lcd = dev_get_drvdata(&spi->dev); lcd->bl_dev->props.power = FB_BLANK_UNBLANK; lcd->bl_dev->props.brightness = 0; backlight_update_status(lcd->bl_dev); backlight_device_unregister(lcd->bl_dev); if (gpio_is_valid(lcd->gpio_backlight_on)) gpio_free(lcd->gpio_backlight_on); if (gpio_is_valid(lcd->gpio_backlight_cont)) gpio_free(lcd->gpio_backlight_cont); corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN); lcd_device_unregister(lcd->lcd_dev); kfree(lcd); return 0; } static struct spi_driver corgi_lcd_driver = { .driver = { .name = "corgi-lcd", .owner = THIS_MODULE, }, .probe = corgi_lcd_probe, .remove = __devexit_p(corgi_lcd_remove), .suspend = corgi_lcd_suspend, .resume = corgi_lcd_resume, }; static int __init corgi_lcd_init(void) { return spi_register_driver(&corgi_lcd_driver); } module_init(corgi_lcd_init); static void __exit corgi_lcd_exit(void) { spi_unregister_driver(&corgi_lcd_driver); } module_exit(corgi_lcd_exit); MODULE_DESCRIPTION("LCD and backlight driver for SHARP C7x0/Cxx00"); MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:corgi-lcd");
gpl-2.0
kamarush/caf_kernel_mm
arch/x86/kvm/i8259.c
4722
14694
/* * 8259 interrupt controller emulation * * Copyright (c) 2003-2004 Fabrice Bellard * Copyright (c) 2007 Intel Corporation * Copyright 2009 Red Hat, Inc. and/or its affiliates. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * Authors: * Yaozu (Eddie) Dong <Eddie.dong@intel.com> * Port from Qemu. */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/bitops.h> #include "irq.h" #include <linux/kvm_host.h> #include "trace.h" #define pr_pic_unimpl(fmt, ...) \ pr_err_ratelimited("kvm: pic: " fmt, ## __VA_ARGS__) static void pic_irq_request(struct kvm *kvm, int level); static void pic_lock(struct kvm_pic *s) __acquires(&s->lock) { spin_lock(&s->lock); } static void pic_unlock(struct kvm_pic *s) __releases(&s->lock) { bool wakeup = s->wakeup_needed; struct kvm_vcpu *vcpu, *found = NULL; int i; s->wakeup_needed = false; spin_unlock(&s->lock); if (wakeup) { kvm_for_each_vcpu(i, vcpu, s->kvm) { if (kvm_apic_accept_pic_intr(vcpu)) { found = vcpu; break; } } if (!found) return; kvm_make_request(KVM_REQ_EVENT, found); kvm_vcpu_kick(found); } } static void pic_clear_isr(struct kvm_kpic_state *s, int irq) { s->isr &= ~(1 << irq); if (s != &s->pics_state->pics[0]) irq += 8; /* * We are dropping lock while calling ack notifiers since ack * notifier callbacks for assigned devices call into PIC recursively. * Other interrupt may be delivered to PIC while lock is dropped but * it should be safe since PIC state is already updated at this stage. */ pic_unlock(s->pics_state); kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); pic_lock(s->pics_state); } /* * set irq level. If an edge is detected, then the IRR is set to 1 */ static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) { int mask, ret = 1; mask = 1 << irq; if (s->elcr & mask) /* level triggered */ if (level) { ret = !(s->irr & mask); s->irr |= mask; s->last_irr |= mask; } else { s->irr &= ~mask; s->last_irr &= ~mask; } else /* edge triggered */ if (level) { if ((s->last_irr & mask) == 0) { ret = !(s->irr & mask); s->irr |= mask; } s->last_irr |= mask; } else s->last_irr &= ~mask; return (s->imr & mask) ? -1 : ret; } /* * return the highest priority found in mask (highest = smallest * number). Return 8 if no irq */ static inline int get_priority(struct kvm_kpic_state *s, int mask) { int priority; if (mask == 0) return 8; priority = 0; while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0) priority++; return priority; } /* * return the pic wanted interrupt. return -1 if none */ static int pic_get_irq(struct kvm_kpic_state *s) { int mask, cur_priority, priority; mask = s->irr & ~s->imr; priority = get_priority(s, mask); if (priority == 8) return -1; /* * compute current priority. If special fully nested mode on the * master, the IRQ coming from the slave is not taken into account * for the priority computation. */ mask = s->isr; if (s->special_fully_nested_mode && s == &s->pics_state->pics[0]) mask &= ~(1 << 2); cur_priority = get_priority(s, mask); if (priority < cur_priority) /* * higher priority found: an irq should be generated */ return (priority + s->priority_add) & 7; else return -1; } /* * raise irq to CPU if necessary. must be called every time the active * irq may change */ static void pic_update_irq(struct kvm_pic *s) { int irq2, irq; irq2 = pic_get_irq(&s->pics[1]); if (irq2 >= 0) { /* * if irq request by slave pic, signal master PIC */ pic_set_irq1(&s->pics[0], 2, 1); pic_set_irq1(&s->pics[0], 2, 0); } irq = pic_get_irq(&s->pics[0]); pic_irq_request(s->kvm, irq >= 0); } void kvm_pic_update_irq(struct kvm_pic *s) { pic_lock(s); pic_update_irq(s); pic_unlock(s); } int kvm_pic_set_irq(void *opaque, int irq, int level) { struct kvm_pic *s = opaque; int ret = -1; pic_lock(s); if (irq >= 0 && irq < PIC_NUM_PINS) { ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); pic_update_irq(s); trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr, s->pics[irq >> 3].imr, ret == 0); } pic_unlock(s); return ret; } /* * acknowledge interrupt 'irq' */ static inline void pic_intack(struct kvm_kpic_state *s, int irq) { s->isr |= 1 << irq; /* * We don't clear a level sensitive interrupt here */ if (!(s->elcr & (1 << irq))) s->irr &= ~(1 << irq); if (s->auto_eoi) { if (s->rotate_on_auto_eoi) s->priority_add = (irq + 1) & 7; pic_clear_isr(s, irq); } } int kvm_pic_read_irq(struct kvm *kvm) { int irq, irq2, intno; struct kvm_pic *s = pic_irqchip(kvm); pic_lock(s); irq = pic_get_irq(&s->pics[0]); if (irq >= 0) { pic_intack(&s->pics[0], irq); if (irq == 2) { irq2 = pic_get_irq(&s->pics[1]); if (irq2 >= 0) pic_intack(&s->pics[1], irq2); else /* * spurious IRQ on slave controller */ irq2 = 7; intno = s->pics[1].irq_base + irq2; irq = irq2 + 8; } else intno = s->pics[0].irq_base + irq; } else { /* * spurious IRQ on host controller */ irq = 7; intno = s->pics[0].irq_base + irq; } pic_update_irq(s); pic_unlock(s); return intno; } void kvm_pic_reset(struct kvm_kpic_state *s) { int irq, i; struct kvm_vcpu *vcpu; u8 irr = s->irr, isr = s->imr; bool found = false; s->last_irr = 0; s->irr = 0; s->imr = 0; s->isr = 0; s->priority_add = 0; s->irq_base = 0; s->read_reg_select = 0; s->poll = 0; s->special_mask = 0; s->init_state = 0; s->auto_eoi = 0; s->rotate_on_auto_eoi = 0; s->special_fully_nested_mode = 0; s->init4 = 0; kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm) if (kvm_apic_accept_pic_intr(vcpu)) { found = true; break; } if (!found) return; for (irq = 0; irq < PIC_NUM_PINS/2; irq++) if (irr & (1 << irq) || isr & (1 << irq)) pic_clear_isr(s, irq); } static void pic_ioport_write(void *opaque, u32 addr, u32 val) { struct kvm_kpic_state *s = opaque; int priority, cmd, irq; addr &= 1; if (addr == 0) { if (val & 0x10) { s->init4 = val & 1; s->last_irr = 0; s->irr &= s->elcr; s->imr = 0; s->priority_add = 0; s->special_mask = 0; s->read_reg_select = 0; if (!s->init4) { s->special_fully_nested_mode = 0; s->auto_eoi = 0; } s->init_state = 1; if (val & 0x02) pr_pic_unimpl("single mode not supported"); if (val & 0x08) pr_pic_unimpl( "level sensitive irq not supported"); } else if (val & 0x08) { if (val & 0x04) s->poll = 1; if (val & 0x02) s->read_reg_select = val & 1; if (val & 0x40) s->special_mask = (val >> 5) & 1; } else { cmd = val >> 5; switch (cmd) { case 0: case 4: s->rotate_on_auto_eoi = cmd >> 2; break; case 1: /* end of interrupt */ case 5: priority = get_priority(s, s->isr); if (priority != 8) { irq = (priority + s->priority_add) & 7; if (cmd == 5) s->priority_add = (irq + 1) & 7; pic_clear_isr(s, irq); pic_update_irq(s->pics_state); } break; case 3: irq = val & 7; pic_clear_isr(s, irq); pic_update_irq(s->pics_state); break; case 6: s->priority_add = (val + 1) & 7; pic_update_irq(s->pics_state); break; case 7: irq = val & 7; s->priority_add = (irq + 1) & 7; pic_clear_isr(s, irq); pic_update_irq(s->pics_state); break; default: break; /* no operation */ } } } else switch (s->init_state) { case 0: { /* normal mode */ u8 imr_diff = s->imr ^ val, off = (s == &s->pics_state->pics[0]) ? 0 : 8; s->imr = val; for (irq = 0; irq < PIC_NUM_PINS/2; irq++) if (imr_diff & (1 << irq)) kvm_fire_mask_notifiers( s->pics_state->kvm, SELECT_PIC(irq + off), irq + off, !!(s->imr & (1 << irq))); pic_update_irq(s->pics_state); break; } case 1: s->irq_base = val & 0xf8; s->init_state = 2; break; case 2: if (s->init4) s->init_state = 3; else s->init_state = 0; break; case 3: s->special_fully_nested_mode = (val >> 4) & 1; s->auto_eoi = (val >> 1) & 1; s->init_state = 0; break; } } static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) { int ret; ret = pic_get_irq(s); if (ret >= 0) { if (addr1 >> 7) { s->pics_state->pics[0].isr &= ~(1 << 2); s->pics_state->pics[0].irr &= ~(1 << 2); } s->irr &= ~(1 << ret); pic_clear_isr(s, ret); if (addr1 >> 7 || ret != 2) pic_update_irq(s->pics_state); } else { ret = 0x07; pic_update_irq(s->pics_state); } return ret; } static u32 pic_ioport_read(void *opaque, u32 addr1) { struct kvm_kpic_state *s = opaque; unsigned int addr; int ret; addr = addr1; addr &= 1; if (s->poll) { ret = pic_poll_read(s, addr1); s->poll = 0; } else if (addr == 0) if (s->read_reg_select) ret = s->isr; else ret = s->irr; else ret = s->imr; return ret; } static void elcr_ioport_write(void *opaque, u32 addr, u32 val) { struct kvm_kpic_state *s = opaque; s->elcr = val & s->elcr_mask; } static u32 elcr_ioport_read(void *opaque, u32 addr1) { struct kvm_kpic_state *s = opaque; return s->elcr; } static int picdev_in_range(gpa_t addr) { switch (addr) { case 0x20: case 0x21: case 0xa0: case 0xa1: case 0x4d0: case 0x4d1: return 1; default: return 0; } } static int picdev_write(struct kvm_pic *s, gpa_t addr, int len, const void *val) { unsigned char data = *(unsigned char *)val; if (!picdev_in_range(addr)) return -EOPNOTSUPP; if (len != 1) { pr_pic_unimpl("non byte write\n"); return 0; } pic_lock(s); switch (addr) { case 0x20: case 0x21: case 0xa0: case 0xa1: pic_ioport_write(&s->pics[addr >> 7], addr, data); break; case 0x4d0: case 0x4d1: elcr_ioport_write(&s->pics[addr & 1], addr, data); break; } pic_unlock(s); return 0; } static int picdev_read(struct kvm_pic *s, gpa_t addr, int len, void *val) { unsigned char data = 0; if (!picdev_in_range(addr)) return -EOPNOTSUPP; if (len != 1) { pr_pic_unimpl("non byte read\n"); return 0; } pic_lock(s); switch (addr) { case 0x20: case 0x21: case 0xa0: case 0xa1: data = pic_ioport_read(&s->pics[addr >> 7], addr); break; case 0x4d0: case 0x4d1: data = elcr_ioport_read(&s->pics[addr & 1], addr); break; } *(unsigned char *)val = data; pic_unlock(s); return 0; } static int picdev_master_write(struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { return picdev_write(container_of(dev, struct kvm_pic, dev_master), addr, len, val); } static int picdev_master_read(struct kvm_io_device *dev, gpa_t addr, int len, void *val) { return picdev_read(container_of(dev, struct kvm_pic, dev_master), addr, len, val); } static int picdev_slave_write(struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { return picdev_write(container_of(dev, struct kvm_pic, dev_slave), addr, len, val); } static int picdev_slave_read(struct kvm_io_device *dev, gpa_t addr, int len, void *val) { return picdev_read(container_of(dev, struct kvm_pic, dev_slave), addr, len, val); } static int picdev_eclr_write(struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { return picdev_write(container_of(dev, struct kvm_pic, dev_eclr), addr, len, val); } static int picdev_eclr_read(struct kvm_io_device *dev, gpa_t addr, int len, void *val) { return picdev_read(container_of(dev, struct kvm_pic, dev_eclr), addr, len, val); } /* * callback when PIC0 irq status changed */ static void pic_irq_request(struct kvm *kvm, int level) { struct kvm_pic *s = pic_irqchip(kvm); if (!s->output) s->wakeup_needed = true; s->output = level; } static const struct kvm_io_device_ops picdev_master_ops = { .read = picdev_master_read, .write = picdev_master_write, }; static const struct kvm_io_device_ops picdev_slave_ops = { .read = picdev_slave_read, .write = picdev_slave_write, }; static const struct kvm_io_device_ops picdev_eclr_ops = { .read = picdev_eclr_read, .write = picdev_eclr_write, }; struct kvm_pic *kvm_create_pic(struct kvm *kvm) { struct kvm_pic *s; int ret; s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); if (!s) return NULL; spin_lock_init(&s->lock); s->kvm = kvm; s->pics[0].elcr_mask = 0xf8; s->pics[1].elcr_mask = 0xde; s->pics[0].pics_state = s; s->pics[1].pics_state = s; /* * Initialize PIO device */ kvm_iodevice_init(&s->dev_master, &picdev_master_ops); kvm_iodevice_init(&s->dev_slave, &picdev_slave_ops); kvm_iodevice_init(&s->dev_eclr, &picdev_eclr_ops); mutex_lock(&kvm->slots_lock); ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x20, 2, &s->dev_master); if (ret < 0) goto fail_unlock; ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0xa0, 2, &s->dev_slave); if (ret < 0) goto fail_unreg_2; ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x4d0, 2, &s->dev_eclr); if (ret < 0) goto fail_unreg_1; mutex_unlock(&kvm->slots_lock); return s; fail_unreg_1: kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_slave); fail_unreg_2: kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_master); fail_unlock: mutex_unlock(&kvm->slots_lock); kfree(s); return NULL; } void kvm_destroy_pic(struct kvm *kvm) { struct kvm_pic *vpic = kvm->arch.vpic; if (vpic) { kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_master); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_slave); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_eclr); kvm->arch.vpic = NULL; kfree(vpic); } }
gpl-2.0
KylinUI/android_kernel_samsung_d2
drivers/misc/ibmasm/module.c
4978
6627
/* * IBM ASM Service Processor Device Driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2004 * * Author: Max Asböck <amax@us.ibm.com> * * This driver is based on code originally written by Pete Reynolds * and others. * */ /* * The ASM device driver does the following things: * * 1) When loaded it sends a message to the service processor, * indicating that an OS is * running. This causes the service processor * to send periodic heartbeats to the OS. * * 2) Answers the periodic heartbeats sent by the service processor. * Failure to do so would result in system reboot. * * 3) Acts as a pass through for dot commands sent from user applications. * The interface for this is the ibmasmfs file system. * * 4) Allows user applications to register for event notification. Events * are sent to the driver through interrupts. They can be read from user * space through the ibmasmfs file system. * * 5) Allows user space applications to send heartbeats to the service * processor (aka reverse heartbeats). Again this happens through ibmasmfs. * * 6) Handles remote mouse and keyboard event interrupts and makes them * available to user applications through ibmasmfs. * */ #include <linux/pci.h> #include <linux/init.h> #include <linux/slab.h> #include "ibmasm.h" #include "lowlevel.h" #include "remote.h" int ibmasm_debug = 0; module_param(ibmasm_debug, int , S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ibmasm_debug, " Set debug mode on or off"); static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { int result; struct service_processor *sp; if ((result = pci_enable_device(pdev))) { dev_err(&pdev->dev, "Failed to enable PCI device\n"); return result; } if ((result = pci_request_regions(pdev, DRIVER_NAME))) { dev_err(&pdev->dev, "Failed to allocate PCI resources\n"); goto error_resources; } /* vnc client won't work without bus-mastering */ pci_set_master(pdev); sp = kzalloc(sizeof(struct service_processor), GFP_KERNEL); if (sp == NULL) { dev_err(&pdev->dev, "Failed to allocate memory\n"); result = -ENOMEM; goto error_kmalloc; } spin_lock_init(&sp->lock); INIT_LIST_HEAD(&sp->command_queue); pci_set_drvdata(pdev, (void *)sp); sp->dev = &pdev->dev; sp->number = pdev->bus->number; snprintf(sp->dirname, IBMASM_NAME_SIZE, "%d", sp->number); snprintf(sp->devname, IBMASM_NAME_SIZE, "%s%d", DRIVER_NAME, sp->number); if (ibmasm_event_buffer_init(sp)) { dev_err(sp->dev, "Failed to allocate event buffer\n"); goto error_eventbuffer; } if (ibmasm_heartbeat_init(sp)) { dev_err(sp->dev, "Failed to allocate heartbeat command\n"); goto error_heartbeat; } sp->irq = pdev->irq; sp->base_address = pci_ioremap_bar(pdev, 0); if (!sp->base_address) { dev_err(sp->dev, "Failed to ioremap pci memory\n"); result = -ENODEV; goto error_ioremap; } result = request_irq(sp->irq, ibmasm_interrupt_handler, IRQF_SHARED, sp->devname, (void*)sp); if (result) { dev_err(sp->dev, "Failed to register interrupt handler\n"); goto error_request_irq; } enable_sp_interrupts(sp->base_address); result = ibmasm_init_remote_input_dev(sp); if (result) { dev_err(sp->dev, "Failed to initialize remote queue\n"); goto error_send_message; } result = ibmasm_send_driver_vpd(sp); if (result) { dev_err(sp->dev, "Failed to send driver VPD to service processor\n"); goto error_send_message; } result = ibmasm_send_os_state(sp, SYSTEM_STATE_OS_UP); if (result) { dev_err(sp->dev, "Failed to send OS state to service processor\n"); goto error_send_message; } ibmasmfs_add_sp(sp); ibmasm_register_uart(sp); return 0; error_send_message: disable_sp_interrupts(sp->base_address); ibmasm_free_remote_input_dev(sp); free_irq(sp->irq, (void *)sp); error_request_irq: iounmap(sp->base_address); error_ioremap: ibmasm_heartbeat_exit(sp); error_heartbeat: ibmasm_event_buffer_exit(sp); error_eventbuffer: pci_set_drvdata(pdev, NULL); kfree(sp); error_kmalloc: pci_release_regions(pdev); error_resources: pci_disable_device(pdev); return result; } static void __devexit ibmasm_remove_one(struct pci_dev *pdev) { struct service_processor *sp = (struct service_processor *)pci_get_drvdata(pdev); dbg("Unregistering UART\n"); ibmasm_unregister_uart(sp); dbg("Sending OS down message\n"); if (ibmasm_send_os_state(sp, SYSTEM_STATE_OS_DOWN)) err("failed to get repsonse to 'Send OS State' command\n"); dbg("Disabling heartbeats\n"); ibmasm_heartbeat_exit(sp); dbg("Disabling interrupts\n"); disable_sp_interrupts(sp->base_address); dbg("Freeing SP irq\n"); free_irq(sp->irq, (void *)sp); dbg("Cleaning up\n"); ibmasm_free_remote_input_dev(sp); iounmap(sp->base_address); ibmasm_event_buffer_exit(sp); pci_set_drvdata(pdev, NULL); kfree(sp); pci_release_regions(pdev); pci_disable_device(pdev); } static struct pci_device_id ibmasm_pci_table[] = { { PCI_DEVICE(VENDORID_IBM, DEVICEID_RSA) }, {}, }; static struct pci_driver ibmasm_driver = { .name = DRIVER_NAME, .id_table = ibmasm_pci_table, .probe = ibmasm_init_one, .remove = __devexit_p(ibmasm_remove_one), }; static void __exit ibmasm_exit (void) { ibmasm_unregister_panic_notifier(); ibmasmfs_unregister(); pci_unregister_driver(&ibmasm_driver); info(DRIVER_DESC " version " DRIVER_VERSION " unloaded"); } static int __init ibmasm_init(void) { int result = pci_register_driver(&ibmasm_driver); if (result) return result; result = ibmasmfs_register(); if (result) { pci_unregister_driver(&ibmasm_driver); err("Failed to register ibmasmfs file system"); return result; } ibmasm_register_panic_notifier(); info(DRIVER_DESC " version " DRIVER_VERSION " loaded"); return 0; } module_init(ibmasm_init); module_exit(ibmasm_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ibmasm_pci_table);
gpl-2.0
thiz11/kernel_mediatek_wiko
drivers/staging/cxt1e1/pmcc4_drv.c
5234
55239
/*----------------------------------------------------------------------------- * pmcc4_drv.c - * * Copyright (C) 2007 One Stop Systems, Inc. * Copyright (C) 2002-2006 SBE, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * For further information, contact via email: support@onestopsystems.com * One Stop Systems, Inc. Escondido, California U.S.A. *----------------------------------------------------------------------------- */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include "pmcc4_sysdep.h" #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> /* include for timer */ #include <linux/timer.h> /* include for timer */ #include <linux/hdlc.h> #include <asm/io.h> #include "sbecom_inline_linux.h" #include "libsbew.h" #include "pmcc4_private.h" #include "pmcc4.h" #include "pmcc4_ioctls.h" #include "musycc.h" #include "comet.h" #include "sbe_bid.h" #ifdef SBE_INCLUDE_SYMBOLS #define STATIC #else #define STATIC static #endif #define KERN_WARN KERN_WARNING /* forward references */ status_t c4_wk_chan_init (mpi_t *, mch_t *); void c4_wq_port_cleanup (mpi_t *); status_t c4_wq_port_init (mpi_t *); int c4_loop_port (ci_t *, int, u_int8_t); status_t c4_set_port (ci_t *, int); status_t musycc_chan_down (ci_t *, int); u_int32_t musycc_chan_proto (int); status_t musycc_dump_ring (ci_t *, unsigned int); status_t __init musycc_init (ci_t *); void musycc_init_mdt (mpi_t *); void musycc_serv_req (mpi_t *, u_int32_t); void musycc_update_timeslots (mpi_t *); extern void musycc_update_tx_thp (mch_t *); extern int cxt1e1_log_level; extern int cxt1e1_max_mru; extern int cxt1e1_max_mtu; extern int max_rxdesc_used, max_rxdesc_default; extern int max_txdesc_used, max_txdesc_default; #if defined (__powerpc__) extern void *memset (void *s, int c, size_t n); #endif int drvr_state = SBE_DRVR_INIT; ci_t *c4_list = 0; ci_t *CI; /* dummy pointer to board ZEROE's data - * DEBUG USAGE */ void sbecom_set_loglevel (int d) { /* * The code within the following -if- clause is a backdoor debug facility * which can be used to display the state of a board's channel. */ if (d > LOG_DEBUG) { unsigned int channum = d - (LOG_DEBUG + 1); /* convert to ZERO * relativity */ (void) musycc_dump_ring ((ci_t *) CI, channum); /* CI implies support * for card 0 only */ } else { if (cxt1e1_log_level != d) { pr_info("log level changed from %d to %d\n", cxt1e1_log_level, d); cxt1e1_log_level = d; /* set new */ } else pr_info("log level is %d\n", cxt1e1_log_level); } } mch_t * c4_find_chan (int channum) { ci_t *ci; mch_t *ch; int portnum, gchan; for (ci = c4_list; ci; ci = ci->next) for (portnum = 0; portnum < ci->max_port; portnum++) for (gchan = 0; gchan < MUSYCC_NCHANS; gchan++) { if ((ch = ci->port[portnum].chan[gchan])) { if ((ch->state != UNASSIGNED) && (ch->channum == channum)) return (ch); } } return 0; } ci_t *__init c4_new (void *hi) { ci_t *ci; #ifdef SBE_MAP_DEBUG pr_warning("c4_new() entered, ci needs %u.\n", (unsigned int) sizeof (ci_t)); #endif ci = (ci_t *) OS_kmalloc (sizeof (ci_t)); if (ci) { ci->hdw_info = hi; ci->state = C_INIT; /* mark as hardware not available */ ci->next = c4_list; c4_list = ci; ci->brdno = ci->next ? ci->next->brdno + 1 : 0; } else pr_warning("failed CI malloc, size %u.\n", (unsigned int) sizeof (ci_t)); if (CI == 0) CI = ci; /* DEBUG, only board 0 usage */ return ci; } /*** * Check port state and set LED states using watchdog or ioctl... * also check for in-band SF loopback commands (& cause results if they are there) * * Alarm function depends on comet bits indicating change in * link status (linkMask) to keep the link status indication straight. * * Indications are only LED and system log -- except when ioctl is invoked. * * "alarmed" record (a.k.a. copyVal, in some cases below) decodes as: * * RMAI (E1 only) 0x100 * alarm LED on 0x80 * link LED on 0x40 * link returned 0x20 (link was down, now it's back and 'port get' hasn't run) * change in LED 0x10 (update LED register because value has changed) * link is down 0x08 * YelAlm(RAI) 0x04 * RedAlm 0x02 * AIS(blue)Alm 0x01 * * note "link has returned" indication is reset on read * (e.g. by use of the c4_control port get command) */ #define sbeLinkMask 0x41 /* change in signal status (lost/recovered) + * state */ #define sbeLinkChange 0x40 #define sbeLinkDown 0x01 #define sbeAlarmsMask 0x07 /* red / yellow / blue alarm conditions */ #define sbeE1AlarmsMask 0x107 /* alarm conditions */ #define COMET_LBCMD_READ 0x80 /* read only (do not set, return read value) */ void checkPorts (ci_t * ci) { #ifndef CONFIG_SBE_PMCC4_NCOMM /* * PORT POINT - NCOMM needs to avoid this code since the polling of * alarms conflicts with NCOMM's interrupt servicing implementation. */ comet_t *comet; volatile u_int32_t value; u_int32_t copyVal, LEDval; u_int8_t portnum; LEDval = 0; for (portnum = 0; portnum < ci->max_port; portnum++) { copyVal = 0x12f & (ci->alarmed[portnum]); /* port's alarm record */ comet = ci->port[portnum].cometbase; value = pci_read_32 ((u_int32_t *) &comet->cdrc_ists) & sbeLinkMask; /* link loss reg */ if (value & sbeLinkChange) /* is there a change in the link stuff */ { /* if there's been a change (above) and yet it's the same (below) */ if (!(((copyVal >> 3) & sbeLinkDown) ^ (value & sbeLinkDown))) { if (value & sbeLinkDown) pr_warning("%s: Port %d momentarily recovered.\n", ci->devname, portnum); else pr_warning("%s: Warning: Port %d link was briefly down.\n", ci->devname, portnum); } else if (value & sbeLinkDown) pr_warning("%s: Warning: Port %d link is down.\n", ci->devname, portnum); else { pr_warning("%s: Port %d link has recovered.\n", ci->devname, portnum); copyVal |= 0x20; /* record link transition to up */ } copyVal |= 0x10; /* change (link) --> update LEDs */ } copyVal &= 0x137; /* clear LED & link old history bits & * save others */ if (value & sbeLinkDown) copyVal |= 0x08; /* record link status (now) */ else { /* if link is up, do this */ copyVal |= 0x40; /* LED indicate link is up */ /* Alarm things & the like ... first if E1, then if T1 */ if (IS_FRAME_ANY_E1 (ci->port[portnum].p.port_mode)) { /* * first check Codeword (SaX) changes & CRC and * sub-multi-frame errors */ /* * note these errors are printed every time they are detected * vs. alarms */ value = pci_read_32 ((u_int32_t *) &comet->e1_frmr_nat_ists); /* codeword */ if (value & 0x1f) { /* if errors (crc or smf only) */ if (value & 0x10) pr_warning("%s: E1 Port %d Codeword Sa4 change detected.\n", ci->devname, portnum); if (value & 0x08) pr_warning("%s: E1 Port %d Codeword Sa5 change detected.\n", ci->devname, portnum); if (value & 0x04) pr_warning("%s: E1 Port %d Codeword Sa6 change detected.\n", ci->devname, portnum); if (value & 0x02) pr_warning("%s: E1 Port %d Codeword Sa7 change detected.\n", ci->devname, portnum); if (value & 0x01) pr_warning("%s: E1 Port %d Codeword Sa8 change detected.\n", ci->devname, portnum); } value = pci_read_32 ((u_int32_t *) &comet->e1_frmr_mists); /* crc & smf */ if (value & 0x3) { /* if errors (crc or smf only) */ if (value & sbeE1CRC) pr_warning("%s: E1 Port %d CRC-4 error(s) detected.\n", ci->devname, portnum); if (value & sbeE1errSMF) /* error in sub-multiframe */ pr_warning("%s: E1 Port %d received errored SMF.\n", ci->devname, portnum); } value = pci_read_32 ((u_int32_t *) &comet->e1_frmr_masts) & 0xcc; /* alarms */ /* * pack alarms together (bitmiser), and construct similar to * T1 */ /* RAI,RMAI,.,.,LOF,AIS,.,. ==> RMAI,.,.,.,.,.,RAI,LOF,AIS */ /* see 0x97 */ value = (value >> 2); if (value & 0x30) { if (value & 0x20) value |= 0x40; /* RAI */ if (value & 0x10) value |= 0x100; /* RMAI */ value &= ~0x30; } /* finished packing alarm in handy order */ if (value != (copyVal & sbeE1AlarmsMask)) { /* if alarms changed */ copyVal |= 0x10;/* change LED status */ if ((copyVal & sbeRedAlm) && !(value & sbeRedAlm)) { copyVal &= ~sbeRedAlm; pr_warning("%s: E1 Port %d LOF alarm ended.\n", ci->devname, portnum); } else if (!(copyVal & sbeRedAlm) && (value & sbeRedAlm)) { copyVal |= sbeRedAlm; pr_warning("%s: E1 Warning: Port %d LOF alarm.\n", ci->devname, portnum); } else if ((copyVal & sbeYelAlm) && !(value & sbeYelAlm)) { copyVal &= ~sbeYelAlm; pr_warning("%s: E1 Port %d RAI alarm ended.\n", ci->devname, portnum); } else if (!(copyVal & sbeYelAlm) && (value & sbeYelAlm)) { copyVal |= sbeYelAlm; pr_warning("%s: E1 Warning: Port %d RAI alarm.\n", ci->devname, portnum); } else if ((copyVal & sbeE1RMAI) && !(value & sbeE1RMAI)) { copyVal &= ~sbeE1RMAI; pr_warning("%s: E1 Port %d RMAI alarm ended.\n", ci->devname, portnum); } else if (!(copyVal & sbeE1RMAI) && (value & sbeE1RMAI)) { copyVal |= sbeE1RMAI; pr_warning("%s: E1 Warning: Port %d RMAI alarm.\n", ci->devname, portnum); } else if ((copyVal & sbeAISAlm) && !(value & sbeAISAlm)) { copyVal &= ~sbeAISAlm; pr_warning("%s: E1 Port %d AIS alarm ended.\n", ci->devname, portnum); } else if (!(copyVal & sbeAISAlm) && (value & sbeAISAlm)) { copyVal |= sbeAISAlm; pr_warning("%s: E1 Warning: Port %d AIS alarm.\n", ci->devname, portnum); } } /* end of E1 alarm code */ } else { /* if a T1 mode */ value = pci_read_32 ((u_int32_t *) &comet->t1_almi_ists); /* alarms */ value &= sbeAlarmsMask; if (value != (copyVal & sbeAlarmsMask)) { /* if alarms changed */ copyVal |= 0x10;/* change LED status */ if ((copyVal & sbeRedAlm) && !(value & sbeRedAlm)) { copyVal &= ~sbeRedAlm; pr_warning("%s: Port %d red alarm ended.\n", ci->devname, portnum); } else if (!(copyVal & sbeRedAlm) && (value & sbeRedAlm)) { copyVal |= sbeRedAlm; pr_warning("%s: Warning: Port %d red alarm.\n", ci->devname, portnum); } else if ((copyVal & sbeYelAlm) && !(value & sbeYelAlm)) { copyVal &= ~sbeYelAlm; pr_warning("%s: Port %d yellow (RAI) alarm ended.\n", ci->devname, portnum); } else if (!(copyVal & sbeYelAlm) && (value & sbeYelAlm)) { copyVal |= sbeYelAlm; pr_warning("%s: Warning: Port %d yellow (RAI) alarm.\n", ci->devname, portnum); } else if ((copyVal & sbeAISAlm) && !(value & sbeAISAlm)) { copyVal &= ~sbeAISAlm; pr_warning("%s: Port %d blue (AIS) alarm ended.\n", ci->devname, portnum); } else if (!(copyVal & sbeAISAlm) && (value & sbeAISAlm)) { copyVal |= sbeAISAlm; pr_warning("%s: Warning: Port %d blue (AIS) alarm.\n", ci->devname, portnum); } } } /* end T1 mode alarm checks */ } if (copyVal & sbeAlarmsMask) copyVal |= 0x80; /* if alarm turn yel LED on */ if (copyVal & 0x10) LEDval |= 0x100; /* tag if LED values have changed */ LEDval |= ((copyVal & 0xc0) >> (6 - (portnum * 2))); ci->alarmed[portnum] &= 0xfffff000; /* out with the old (it's fff * ... foo) */ ci->alarmed[portnum] |= (copyVal); /* in with the new */ /* * enough with the alarms and LED's, now let's check for loopback * requests */ if (IS_FRAME_ANY_T1 (ci->port[portnum].p.port_mode)) { /* if a T1 mode */ /* * begin in-band (SF) loopback code detection -- start by reading * command */ value = pci_read_32 ((u_int32_t *) &comet->ibcd_ies); /* detect reg. */ value &= 0x3; /* trim to handy bits */ if (value & 0x2) { /* activate loopback (sets for deactivate * code length) */ copyVal = c4_loop_port (ci, portnum, COMET_LBCMD_READ); /* read line loopback * mode */ if (copyVal != COMET_MDIAG_LINELB) /* don't do it again if * already in that mode */ c4_loop_port (ci, portnum, COMET_MDIAG_LINELB); /* put port in line * loopback mode */ } if (value & 0x1) { /* deactivate loopback (sets for activate * code length) */ copyVal = c4_loop_port (ci, portnum, COMET_LBCMD_READ); /* read line loopback * mode */ if (copyVal != COMET_MDIAG_LBOFF) /* don't do it again if * already in that mode */ c4_loop_port (ci, portnum, COMET_MDIAG_LBOFF); /* take port out of any * loopback mode */ } } if (IS_FRAME_ANY_T1ESF (ci->port[portnum].p.port_mode)) { /* if a T1 ESF mode */ /* begin ESF loopback code */ value = pci_read_32 ((u_int32_t *) &comet->t1_rboc_sts) & 0x3f; /* read command */ if (value == 0x07) c4_loop_port (ci, portnum, COMET_MDIAG_LINELB); /* put port in line * loopback mode */ if (value == 0x0a) c4_loop_port (ci, portnum, COMET_MDIAG_PAYLB); /* put port in payload * loopbk mode */ if ((value == 0x1c) || (value == 0x19) || (value == 0x12)) c4_loop_port (ci, portnum, COMET_MDIAG_LBOFF); /* take port out of any * loopbk mode */ if (cxt1e1_log_level >= LOG_DEBUG) if (value != 0x3f) pr_warning("%s: BOC value = %x on Port %d\n", ci->devname, value, portnum); /* end ESF loopback code */ } } /* if something is new, update LED's */ if (LEDval & 0x100) pci_write_32 ((u_int32_t *) &ci->cpldbase->leds, LEDval & 0xff); #endif /*** CONFIG_SBE_PMCC4_NCOMM ***/ } STATIC void c4_watchdog (ci_t * ci) { if (drvr_state != SBE_DRVR_AVAILABLE) { if (cxt1e1_log_level >= LOG_MONITOR) pr_info("drvr not available (%x)\n", drvr_state); return; } ci->wdcount++; checkPorts (ci); ci->wd_notify = 0; } void c4_cleanup (void) { ci_t *ci, *next; mpi_t *pi; int portnum, j; ci = c4_list; while (ci) { next = ci->next; /* protect <next> from upcoming <free> */ pci_write_32 ((u_int32_t *) &ci->cpldbase->leds, PMCC4_CPLD_LED_OFF); for (portnum = 0; portnum < ci->max_port; portnum++) { pi = &ci->port[portnum]; c4_wq_port_cleanup (pi); for (j = 0; j < MUSYCC_NCHANS; j++) { if (pi->chan[j]) OS_kfree (pi->chan[j]); /* free mch_t struct */ } OS_kfree (pi->regram_saved); } OS_kfree (ci->iqd_p_saved); OS_kfree (ci); ci = next; /* cleanup next board, if any */ } } /* * This function issues a write to all comet chips and expects the same data * to be returned from the subsequent read. This determines the board build * to be a 1-port, 2-port, or 4-port build. The value returned represents a * bit-mask of the found ports. Only certain configurations are considered * VALID or LEGAL builds. */ int c4_get_portcfg (ci_t * ci) { comet_t *comet; int portnum, mask; u_int32_t wdata, rdata; wdata = COMET_MDIAG_LBOFF; /* take port out of any loopback mode */ mask = 0; for (portnum = 0; portnum < MUSYCC_NPORTS; portnum++) { comet = ci->port[portnum].cometbase; pci_write_32 ((u_int32_t *) &comet->mdiag, wdata); rdata = pci_read_32 ((u_int32_t *) &comet->mdiag) & COMET_MDIAG_LBMASK; if (wdata == rdata) mask |= 1 << portnum; } return mask; } /* nothing herein should generate interrupts */ status_t __init c4_init (ci_t * ci, u_char *func0, u_char *func1) { mpi_t *pi; mch_t *ch; static u_int32_t count = 0; int portnum, j; ci->state = C_INIT; ci->brdno = count++; ci->intlog.this_status_new = 0; atomic_set (&ci->bh_pending, 0); ci->reg = (struct musycc_globalr *) func0; ci->eeprombase = (u_int32_t *) (func1 + EEPROM_OFFSET); ci->cpldbase = (c4cpld_t *) ((u_int32_t *) (func1 + ISPLD_OFFSET)); /*** PORT POINT - the following is the first access of any type to the hardware ***/ #ifdef CONFIG_SBE_PMCC4_NCOMM /* NCOMM driver uses INTB interrupt to monitor CPLD register */ pci_write_32 ((u_int32_t *) &ci->reg->glcd, GCD_MAGIC); #else /* standard driver POLLS for INTB via CPLD register */ pci_write_32 ((u_int32_t *) &ci->reg->glcd, GCD_MAGIC | MUSYCC_GCD_INTB_DISABLE); #endif { int pmsk; /* need comet addresses available for determination of hardware build */ for (portnum = 0; portnum < MUSYCC_NPORTS; portnum++) { pi = &ci->port[portnum]; pi->cometbase = (comet_t *) ((u_int32_t *) (func1 + COMET_OFFSET (portnum))); pi->reg = (struct musycc_globalr *) ((u_char *) ci->reg + (portnum * 0x800)); pi->portnum = portnum; pi->p.portnum = portnum; pi->openchans = 0; #ifdef SBE_MAP_DEBUG pr_info("Comet-%d: addr = %p\n", portnum, pi->cometbase); #endif } pmsk = c4_get_portcfg (ci); switch (pmsk) { case 0x1: ci->max_port = 1; break; case 0x3: ci->max_port = 2; break; #if 0 case 0x7: /* not built, but could be... */ ci->max_port = 3; break; #endif case 0xf: ci->max_port = 4; break; default: ci->max_port = 0; pr_warning("%s: illegal port configuration (%x)\n", ci->devname, pmsk); return SBE_DRVR_FAIL; } #ifdef SBE_MAP_DEBUG pr_info(">> %s: c4_get_build - pmsk %x max_port %x\n", ci->devname, pmsk, ci->max_port); #endif } for (portnum = 0; portnum < ci->max_port; portnum++) { pi = &ci->port[portnum]; pi->up = ci; pi->sr_last = 0xffffffff; pi->p.port_mode = CFG_FRAME_SF; /* T1 B8ZS, the default */ pi->p.portP = (CFG_CLK_PORT_EXTERNAL | CFG_LBO_LH0); /* T1 defaults */ OS_sem_init (&pi->sr_sem_busy, SEM_AVAILABLE); OS_sem_init (&pi->sr_sem_wait, SEM_TAKEN); for (j = 0; j < 32; j++) { pi->fifomap[j] = -1; pi->tsm[j] = 0; /* no assignments, all available */ } /* allocate channel structures for this port */ for (j = 0; j < MUSYCC_NCHANS; j++) { ch = OS_kmalloc (sizeof (mch_t)); if (ch) { pi->chan[j] = ch; ch->state = UNASSIGNED; ch->up = pi; ch->gchan = (-1); /* channel assignment not yet known */ ch->channum = (-1); /* channel assignment not yet known */ ch->p.card = ci->brdno; ch->p.port = portnum; ch->p.channum = (-1); /* channel assignment not yet known */ ch->p.mode_56k = 0; /* default is 64kbps mode */ } else { pr_warning("failed mch_t malloc, port %d channel %d size %u.\n", portnum, j, (unsigned int) sizeof (mch_t)); break; } } } { /* * Set LEDs through their paces to supply visual proof that LEDs are * functional and not burnt out nor broken. * * YELLOW + GREEN -> OFF. */ pci_write_32 ((u_int32_t *) &ci->cpldbase->leds, PMCC4_CPLD_LED_GREEN | PMCC4_CPLD_LED_YELLOW); OS_uwait (750000, "leds"); pci_write_32 ((u_int32_t *) &ci->cpldbase->leds, PMCC4_CPLD_LED_OFF); } OS_init_watchdog (&ci->wd, (void (*) (void *)) c4_watchdog, ci, WATCHDOG_TIMEOUT); return SBE_DRVR_SUCCESS; } /* better be fully setup to handle interrupts when you call this */ status_t __init c4_init2 (ci_t * ci) { status_t ret; /* PORT POINT: this routine generates first interrupt */ if ((ret = musycc_init (ci)) != SBE_DRVR_SUCCESS) return ret; #if 0 ci->p.framing_type = FRAMING_CBP; ci->p.h110enable = 1; #if 0 ci->p.hypersize = 0; #else hyperdummy = 0; #endif ci->p.clock = 0; /* Use internal clocking until set to * external */ c4_card_set_params (ci, &ci->p); #endif OS_start_watchdog (&ci->wd); return SBE_DRVR_SUCCESS; } /* This function sets the loopback mode (or clears it, as the case may be). */ int c4_loop_port (ci_t * ci, int portnum, u_int8_t cmd) { comet_t *comet; volatile u_int32_t loopValue; comet = ci->port[portnum].cometbase; loopValue = pci_read_32 ((u_int32_t *) &comet->mdiag) & COMET_MDIAG_LBMASK; if (cmd & COMET_LBCMD_READ) return loopValue; /* return the read value */ if (loopValue != cmd) { switch (cmd) { case COMET_MDIAG_LINELB: /* set(SF)loopback down (turn off) code length to 6 bits */ pci_write_32 ((u_int32_t *) &comet->ibcd_cfg, 0x05); break; case COMET_MDIAG_LBOFF: /* set (SF) loopback up (turn on) code length to 5 bits */ pci_write_32 ((u_int32_t *) &comet->ibcd_cfg, 0x00); break; } pci_write_32 ((u_int32_t *) &comet->mdiag, cmd); if (cxt1e1_log_level >= LOG_WARN) pr_info("%s: loopback mode changed to %2x from %2x on Port %d\n", ci->devname, cmd, loopValue, portnum); loopValue = pci_read_32 ((u_int32_t *) &comet->mdiag) & COMET_MDIAG_LBMASK; if (loopValue != cmd) { if (cxt1e1_log_level >= LOG_ERROR) pr_info("%s: write to loop register failed, unknown state for Port %d\n", ci->devname, portnum); } } else { if (cxt1e1_log_level >= LOG_WARN) pr_info("%s: loopback already in that mode (%2x)\n", ci->devname, loopValue); } return 0; } /* c4_frame_rw: read or write the comet register specified * (modifies use of port_param to non-standard use of struct) * Specifically: * pp.portnum (one guess) * pp.port_mode offset of register * pp.portP write (or not, i.e. read) * pp.portStatus write value * BTW: * pp.portStatus also used to return read value * pp.portP also used during write, to return old reg value */ status_t c4_frame_rw (ci_t * ci, struct sbecom_port_param * pp) { comet_t *comet; volatile u_int32_t data; if (pp->portnum >= ci->max_port)/* sanity check */ return ENXIO; comet = ci->port[pp->portnum].cometbase; data = pci_read_32 ((u_int32_t *) comet + pp->port_mode) & 0xff; if (pp->portP) { /* control says this is a register * _write_ */ if (pp->portStatus == data) pr_info("%s: Port %d already that value! Writing again anyhow.\n", ci->devname, pp->portnum); pp->portP = (u_int8_t) data; pci_write_32 ((u_int32_t *) comet + pp->port_mode, pp->portStatus); data = pci_read_32 ((u_int32_t *) comet + pp->port_mode) & 0xff; } pp->portStatus = (u_int8_t) data; return 0; } /* c4_pld_rw: read or write the pld register specified * (modifies use of port_param to non-standard use of struct) * Specifically: * pp.port_mode offset of register * pp.portP write (or not, i.e. read) * pp.portStatus write value * BTW: * pp.portStatus also used to return read value * pp.portP also used during write, to return old reg value */ status_t c4_pld_rw (ci_t * ci, struct sbecom_port_param * pp) { volatile u_int32_t *regaddr; volatile u_int32_t data; int regnum = pp->port_mode; regaddr = (u_int32_t *) ci->cpldbase + regnum; data = pci_read_32 ((u_int32_t *) regaddr) & 0xff; if (pp->portP) { /* control says this is a register * _write_ */ pp->portP = (u_int8_t) data; pci_write_32 ((u_int32_t *) regaddr, pp->portStatus); data = pci_read_32 ((u_int32_t *) regaddr) & 0xff; } pp->portStatus = (u_int8_t) data; return 0; } /* c4_musycc_rw: read or write the musycc register specified * (modifies use of port_param to non-standard use of struct) * Specifically: * mcp.RWportnum port number and write indication bit (0x80) * mcp.offset offset of register * mcp.value write value going in and read value returning */ /* PORT POINT: TX Subchannel Map registers are write-only * areas within the MUSYCC and always return FF */ /* PORT POINT: regram and reg structures are minorly different and <offset> ioctl * settings are aligned with the <reg> struct musycc_globalr{} usage. * Also, regram is separately allocated shared memory, allocated for each port. * PORT POINT: access offsets of 0x6000 for Msg Cfg Desc Tbl are for 4-port MUSYCC * only. (An 8-port MUSYCC has 0x16000 offsets for accessing its upper 4 tables.) */ status_t c4_musycc_rw (ci_t * ci, struct c4_musycc_param * mcp) { mpi_t *pi; volatile u_int32_t *dph; /* hardware implemented register */ u_int32_t *dpr = 0; /* RAM image of registers for group command * usage */ int offset = mcp->offset % 0x800; /* group relative address * offset, mcp->portnum is * not used */ int portnum, ramread = 0; volatile u_int32_t data; /* * Sanity check hardware accessibility. The 0x6000 portion handles port * numbers associated with Msg Descr Tbl decoding. */ portnum = (mcp->offset % 0x6000) / 0x800; if (portnum >= ci->max_port) return ENXIO; pi = &ci->port[portnum]; if (mcp->offset >= 0x6000) offset += 0x6000; /* put back in MsgCfgDesc address offset */ dph = (u_int32_t *) ((u_long) pi->reg + offset); /* read of TX are from RAM image, since hardware returns FF */ dpr = (u_int32_t *) ((u_long) pi->regram + offset); if (mcp->offset < 0x6000) /* non MsgDesc Tbl accesses might require * RAM access */ { if (offset >= 0x200 && offset < 0x380) ramread = 1; if (offset >= 0x10 && offset < 0x200) ramread = 1; } /* read register from RAM or hardware, depending... */ if (ramread) { data = *dpr; //pr_info("c4_musycc_rw: RAM addr %p read data %x (portno %x offset %x RAM ramread %x)\n", dpr, data, portnum, offset, ramread); /* RLD DEBUG */ } else { data = pci_read_32 ((u_int32_t *) dph); //pr_info("c4_musycc_rw: REG addr %p read data %x (portno %x offset %x RAM ramread %x)\n", dph, data, portnum, offset, ramread); /* RLD DEBUG */ } if (mcp->RWportnum & 0x80) { /* control says this is a register * _write_ */ if (mcp->value == data) pr_info("%s: musycc grp%d already that value! writing again anyhow.\n", ci->devname, (mcp->RWportnum & 0x7)); /* write register RAM */ if (ramread) *dpr = mcp->value; /* write hardware register */ pci_write_32 ((u_int32_t *) dph, mcp->value); } mcp->value = data; /* return the read value (or the 'old * value', if is write) */ return 0; } status_t c4_get_port (ci_t * ci, int portnum) { if (portnum >= ci->max_port) /* sanity check */ return ENXIO; SD_SEM_TAKE (&ci->sem_wdbusy, "_wd_"); /* only 1 thru here, per * board */ checkPorts (ci); ci->port[portnum].p.portStatus = (u_int8_t) ci->alarmed[portnum]; ci->alarmed[portnum] &= 0xdf; SD_SEM_GIVE (&ci->sem_wdbusy); /* release per-board hold */ return 0; } status_t c4_set_port (ci_t * ci, int portnum) { mpi_t *pi; struct sbecom_port_param *pp; int e1mode; u_int8_t clck; int i; if (portnum >= ci->max_port) /* sanity check */ return ENXIO; pi = &ci->port[portnum]; pp = &ci->port[portnum].p; e1mode = IS_FRAME_ANY_E1 (pp->port_mode); if (cxt1e1_log_level >= LOG_MONITOR2) { pr_info("%s: c4_set_port[%d]: entered, e1mode = %x, openchans %d.\n", ci->devname, portnum, e1mode, pi->openchans); } if (pi->openchans) return EBUSY; /* group needs initialization only for * first channel of a group */ { status_t ret; if ((ret = c4_wq_port_init (pi))) /* create/init * workqueue_struct */ return (ret); } init_comet (ci, pi->cometbase, pp->port_mode, 1 /* clockmaster == true */ , pp->portP); clck = pci_read_32 ((u_int32_t *) &ci->cpldbase->mclk) & PMCC4_CPLD_MCLK_MASK; if (e1mode) clck |= 1 << portnum; else clck &= 0xf ^ (1 << portnum); pci_write_32 ((u_int32_t *) &ci->cpldbase->mclk, clck); pci_write_32 ((u_int32_t *) &ci->cpldbase->mcsr, PMCC4_CPLD_MCSR_IND); pci_write_32 ((u_int32_t *) &pi->reg->gbp, OS_vtophys (pi->regram)); /*********************************************************************/ /* ERRATA: If transparent mode is used, do not set OOFMP_DISABLE bit */ /*********************************************************************/ pi->regram->grcd = __constant_cpu_to_le32 (MUSYCC_GRCD_RX_ENABLE | MUSYCC_GRCD_TX_ENABLE | MUSYCC_GRCD_OOFMP_DISABLE | MUSYCC_GRCD_SF_ALIGN | /* per MUSYCC ERRATA, * for T1 * fix */ MUSYCC_GRCD_COFAIRQ_DISABLE | MUSYCC_GRCD_MC_ENABLE | (MUSYCC_GRCD_POLLTH_32 << MUSYCC_GRCD_POLLTH_SHIFT)); pi->regram->pcd = __constant_cpu_to_le32 ((e1mode ? 1 : 0) | MUSYCC_PCD_TXSYNC_RISING | MUSYCC_PCD_RXSYNC_RISING | MUSYCC_PCD_RXDATA_RISING); /* Message length descriptor */ pi->regram->mld = __constant_cpu_to_le32 (cxt1e1_max_mru | (cxt1e1_max_mru << 16)); /* tsm algorithm */ for (i = 0; i < 32; i++) { /*** ASSIGNMENT NOTES: ***/ /*** Group's channel ZERO unavailable if E1. ***/ /*** Group's channel 16 unavailable if E1 CAS. ***/ /*** Group's channels 24-31 unavailable if T1. ***/ if (((i == 0) && e1mode) || ((i == 16) && ((pp->port_mode == CFG_FRAME_E1CRC_CAS) || (pp->port_mode == CFG_FRAME_E1CRC_CAS_AMI))) || ((i > 23) && (!e1mode))) { pi->tsm[i] = 0xff; /* make tslot unavailable for this mode */ } else { pi->tsm[i] = 0x00; /* make tslot available for assignment */ } } for (i = 0; i < MUSYCC_NCHANS; i++) { pi->regram->ttsm[i] = 0; pi->regram->rtsm[i] = 0; } FLUSH_MEM_WRITE (); musycc_serv_req (pi, SR_GROUP_INIT | SR_RX_DIRECTION); musycc_serv_req (pi, SR_GROUP_INIT | SR_TX_DIRECTION); musycc_init_mdt (pi); pi->group_is_set = 1; pi->p = *pp; return 0; } unsigned int max_int = 0; status_t c4_new_chan (ci_t * ci, int portnum, int channum, void *user) { mpi_t *pi; mch_t *ch; int gchan; if (c4_find_chan (channum)) /* a new channel shouldn't already exist */ return EEXIST; if (portnum >= ci->max_port) /* sanity check */ return ENXIO; pi = &(ci->port[portnum]); /* find any available channel within this port */ for (gchan = 0; gchan < MUSYCC_NCHANS; gchan++) { ch = pi->chan[gchan]; if (ch && ch->state == UNASSIGNED) /* no assignment is good! */ break; } if (gchan == MUSYCC_NCHANS) /* exhausted table, all were assigned */ return ENFILE; ch->up = pi; /* NOTE: mch_t already cleared during OS_kmalloc() */ ch->state = DOWN; ch->user = user; ch->gchan = gchan; ch->channum = channum; /* mark our channel assignment */ ch->p.channum = channum; #if 1 ch->p.card = ci->brdno; ch->p.port = portnum; #endif ch->p.chan_mode = CFG_CH_PROTO_HDLC_FCS16; ch->p.idlecode = CFG_CH_FLAG_7E; ch->p.pad_fill_count = 2; spin_lock_init (&ch->ch_rxlock); spin_lock_init (&ch->ch_txlock); { status_t ret; if ((ret = c4_wk_chan_init (pi, ch))) return ret; } /* save off interface assignments which bound a board */ if (ci->first_if == 0) /* first channel registered is assumed to * be the lowest channel */ { ci->first_if = ci->last_if = user; ci->first_channum = ci->last_channum = channum; } else { ci->last_if = user; if (ci->last_channum < channum) /* higher number channel found */ ci->last_channum = channum; } return 0; } status_t c4_del_chan (int channum) { mch_t *ch; if (!(ch = c4_find_chan (channum))) return ENOENT; if (ch->state == UP) musycc_chan_down ((ci_t *) 0, channum); ch->state = UNASSIGNED; ch->gchan = (-1); ch->channum = (-1); ch->p.channum = (-1); return 0; } status_t c4_del_chan_stats (int channum) { mch_t *ch; if (!(ch = c4_find_chan (channum))) return ENOENT; memset (&ch->s, 0, sizeof (struct sbecom_chan_stats)); return 0; } status_t c4_set_chan (int channum, struct sbecom_chan_param * p) { mch_t *ch; int i, x = 0; if (!(ch = c4_find_chan (channum))) return ENOENT; #if 1 if (ch->p.card != p->card || ch->p.port != p->port || ch->p.channum != p->channum) return EINVAL; #endif if (!(ch->up->group_is_set)) { return EIO; /* out of order, SET_PORT command * required prior to first group's * SET_CHAN command */ } /* * Check for change of parameter settings in order to invoke closing of * channel prior to hardware poking. */ if (ch->p.status != p->status || ch->p.chan_mode != p->chan_mode || ch->p.data_inv != p->data_inv || ch->p.intr_mask != p->intr_mask || ch->txd_free < ch->txd_num) /* to clear out queued messages */ x = 1; /* we have a change requested */ for (i = 0; i < 32; i++) /* check for timeslot mapping changes */ if (ch->p.bitmask[i] != p->bitmask[i]) x = 1; /* we have a change requested */ ch->p = *p; if (x && (ch->state == UP)) /* if change request and channel is * open... */ { status_t ret; if ((ret = musycc_chan_down ((ci_t *) 0, channum))) return ret; if ((ret = c4_chan_up (ch->up->up, channum))) return ret; sd_enable_xmit (ch->user); /* re-enable to catch flow controlled * channel */ } return 0; } status_t c4_get_chan (int channum, struct sbecom_chan_param * p) { mch_t *ch; if (!(ch = c4_find_chan (channum))) return ENOENT; *p = ch->p; return 0; } status_t c4_get_chan_stats (int channum, struct sbecom_chan_stats * p) { mch_t *ch; if (!(ch = c4_find_chan (channum))) return ENOENT; *p = ch->s; p->tx_pending = atomic_read (&ch->tx_pending); return 0; } STATIC int c4_fifo_alloc (mpi_t * pi, int chan, int *len) { int i, l = 0, start = 0, max = 0, maxstart = 0; for (i = 0; i < 32; i++) { if (pi->fifomap[i] != -1) { l = 0; start = i + 1; continue; } ++l; if (l > max) { max = l; maxstart = start; } if (max == *len) break; } if (max != *len) { if (cxt1e1_log_level >= LOG_WARN) pr_info("%s: wanted to allocate %d fifo space, but got only %d\n", pi->up->devname, *len, max); *len = max; } if (cxt1e1_log_level >= LOG_DEBUG) pr_info("%s: allocated %d fifo at %d for channel %d/%d\n", pi->up->devname, max, start, chan, pi->p.portnum); for (i = maxstart; i < (maxstart + max); i++) pi->fifomap[i] = chan; return start; } void c4_fifo_free (mpi_t * pi, int chan) { int i; if (cxt1e1_log_level >= LOG_DEBUG) pr_info("%s: deallocated fifo for channel %d/%d\n", pi->up->devname, chan, pi->p.portnum); for (i = 0; i < 32; i++) if (pi->fifomap[i] == chan) pi->fifomap[i] = -1; } status_t c4_chan_up (ci_t * ci, int channum) { mpi_t *pi; mch_t *ch; struct mbuf *m; struct mdesc *md; int nts, nbuf, txnum, rxnum; int addr, i, j, gchan; u_int32_t tmp; /* for optimizing conversion across BE * platform */ if (!(ch = c4_find_chan (channum))) return ENOENT; if (ch->state == UP) { if (cxt1e1_log_level >= LOG_MONITOR) pr_info("%s: channel already UP, graceful early exit\n", ci->devname); return 0; } pi = ch->up; gchan = ch->gchan; /* find nts ('number of timeslots') */ nts = 0; for (i = 0; i < 32; i++) { if (ch->p.bitmask[i] & pi->tsm[i]) { if (1 || cxt1e1_log_level >= LOG_WARN) { pr_info("%s: c4_chan_up[%d] EINVAL (attempt to cfg in-use or unavailable TimeSlot[%d])\n", ci->devname, channum, i); pr_info("+ ask4 %x, currently %x\n", ch->p.bitmask[i], pi->tsm[i]); } return EINVAL; } for (j = 0; j < 8; j++) if (ch->p.bitmask[i] & (1 << j)) nts++; } nbuf = nts / 8 ? nts / 8 : 1; if (!nbuf) { /* if( cxt1e1_log_level >= LOG_WARN) */ pr_info("%s: c4_chan_up[%d] ENOBUFS (no TimeSlots assigned)\n", ci->devname, channum); return ENOBUFS; /* this should not happen */ } addr = c4_fifo_alloc (pi, gchan, &nbuf); ch->state = UP; /* Setup the Time Slot Map */ musycc_update_timeslots (pi); /* ch->tx_limit = nts; */ ch->s.tx_pending = 0; /* Set Channel Configuration Descriptors */ { u_int32_t ccd; ccd = musycc_chan_proto (ch->p.chan_mode) << MUSYCC_CCD_PROTO_SHIFT; if ((ch->p.chan_mode == CFG_CH_PROTO_ISLP_MODE) || (ch->p.chan_mode == CFG_CH_PROTO_TRANS)) { ccd |= MUSYCC_CCD_FCS_XFER; /* Non FSC Mode */ } ccd |= 2 << MUSYCC_CCD_MAX_LENGTH; /* Select second MTU */ ccd |= ch->p.intr_mask; ccd |= addr << MUSYCC_CCD_BUFFER_LOC; if (ch->p.chan_mode == CFG_CH_PROTO_TRANS) ccd |= (nbuf) << MUSYCC_CCD_BUFFER_LENGTH; else ccd |= (nbuf - 1) << MUSYCC_CCD_BUFFER_LENGTH; if (ch->p.data_inv & CFG_CH_DINV_TX) ccd |= MUSYCC_CCD_INVERT_DATA; /* Invert data */ pi->regram->tcct[gchan] = cpu_to_le32 (ccd); if (ch->p.data_inv & CFG_CH_DINV_RX) ccd |= MUSYCC_CCD_INVERT_DATA; /* Invert data */ else ccd &= ~MUSYCC_CCD_INVERT_DATA; /* take away data inversion */ pi->regram->rcct[gchan] = cpu_to_le32 (ccd); FLUSH_MEM_WRITE (); } /* Reread the Channel Configuration Descriptor for this channel */ musycc_serv_req (pi, SR_CHANNEL_CONFIG | SR_RX_DIRECTION | gchan); musycc_serv_req (pi, SR_CHANNEL_CONFIG | SR_TX_DIRECTION | gchan); /* * Figure out how many buffers we want. If the customer has changed from * the defaults, then use the changed values. Otherwise, use Transparent * mode's specific minimum default settings. */ if (ch->p.chan_mode == CFG_CH_PROTO_TRANS) { if (max_rxdesc_used == max_rxdesc_default) /* use default setting */ max_rxdesc_used = MUSYCC_RXDESC_TRANS; if (max_txdesc_used == max_txdesc_default) /* use default setting */ max_txdesc_used = MUSYCC_TXDESC_TRANS; } /* * Increase counts when hyperchanneling, since this implies an increase * in throughput per channel */ rxnum = max_rxdesc_used + (nts / 4); txnum = max_txdesc_used + (nts / 4); #if 0 /* DEBUG INFO */ if (cxt1e1_log_level >= LOG_MONITOR) pr_info("%s: mode %x rxnum %d (rxused %d def %d) txnum %d (txused %d def %d)\n", ci->devname, ch->p.chan_mode, rxnum, max_rxdesc_used, max_rxdesc_default, txnum, max_txdesc_used, max_txdesc_default); #endif ch->rxd_num = rxnum; ch->txd_num = txnum; ch->rxix_irq_srv = 0; ch->mdr = OS_kmalloc (sizeof (struct mdesc) * rxnum); ch->mdt = OS_kmalloc (sizeof (struct mdesc) * txnum); if (ch->p.chan_mode == CFG_CH_PROTO_TRANS) tmp = __constant_cpu_to_le32 (cxt1e1_max_mru | EOBIRQ_ENABLE); else tmp = __constant_cpu_to_le32 (cxt1e1_max_mru); for (i = 0, md = ch->mdr; i < rxnum; i++, md++) { if (i == (rxnum - 1)) { md->snext = &ch->mdr[0];/* wrapness */ } else { md->snext = &ch->mdr[i + 1]; } md->next = cpu_to_le32 (OS_vtophys (md->snext)); if (!(m = OS_mem_token_alloc (cxt1e1_max_mru))) { if (cxt1e1_log_level >= LOG_MONITOR) pr_info("%s: c4_chan_up[%d] - token alloc failure, size = %d.\n", ci->devname, channum, cxt1e1_max_mru); goto errfree; } md->mem_token = m; md->data = cpu_to_le32 (OS_vtophys (OS_mem_token_data (m))); md->status = tmp | MUSYCC_RX_OWNED; /* MUSYCC owns RX descriptor ** * CODING NOTE: * MUSYCC_RX_OWNED = 0 so no * need to byteSwap */ } for (i = 0, md = ch->mdt; i < txnum; i++, md++) { md->status = HOST_TX_OWNED; /* Host owns TX descriptor ** CODING * NOTE: HOST_TX_OWNED = 0 so no need to * byteSwap */ md->mem_token = 0; md->data = 0; if (i == (txnum - 1)) { md->snext = &ch->mdt[0];/* wrapness */ } else { md->snext = &ch->mdt[i + 1]; } md->next = cpu_to_le32 (OS_vtophys (md->snext)); } ch->txd_irq_srv = ch->txd_usr_add = &ch->mdt[0]; ch->txd_free = txnum; ch->tx_full = 0; ch->txd_required = 0; /* Configure it into the chip */ tmp = cpu_to_le32 (OS_vtophys (&ch->mdt[0])); pi->regram->thp[gchan] = tmp; pi->regram->tmp[gchan] = tmp; tmp = cpu_to_le32 (OS_vtophys (&ch->mdr[0])); pi->regram->rhp[gchan] = tmp; pi->regram->rmp[gchan] = tmp; /* Activate the Channel */ FLUSH_MEM_WRITE (); if (ch->p.status & RX_ENABLED) { #ifdef RLD_TRANS_DEBUG pr_info("++ c4_chan_up() CHAN RX ACTIVATE: chan %d\n", ch->channum); #endif ch->ch_start_rx = 0; /* we are restarting RX... */ musycc_serv_req (pi, SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION | gchan); } if (ch->p.status & TX_ENABLED) { #ifdef RLD_TRANS_DEBUG pr_info("++ c4_chan_up() CHAN TX ACTIVATE: chan %d <delayed>\n", ch->channum); #endif ch->ch_start_tx = CH_START_TX_1ST; /* we are delaying start * until receipt from user of * first packet to transmit. */ } ch->status = ch->p.status; pi->openchans++; return 0; errfree: while (i > 0) { /* Don't leak all the previously allocated mbufs in this loop */ i--; OS_mem_token_free (ch->mdr[i].mem_token); } OS_kfree (ch->mdt); ch->mdt = 0; ch->txd_num = 0; OS_kfree (ch->mdr); ch->mdr = 0; ch->rxd_num = 0; ch->state = DOWN; return ENOBUFS; } /* stop the hardware from servicing & interrupting */ void c4_stopwd (ci_t * ci) { OS_stop_watchdog (&ci->wd); SD_SEM_TAKE (&ci->sem_wdbusy, "_stop_"); /* ensure WD not running */ SD_SEM_GIVE (&ci->sem_wdbusy); } void sbecom_get_brdinfo (ci_t * ci, struct sbe_brd_info * bip, u_int8_t *bsn) { char *np; u_int32_t sn = 0; int i; bip->brdno = ci->brdno; /* our board number */ bip->brd_id = ci->brd_id; bip->brd_hdw_id = ci->hdw_bid; bip->brd_chan_cnt = MUSYCC_NCHANS * ci->max_port; /* number of channels * being used */ bip->brd_port_cnt = ci->max_port; /* number of ports being used */ bip->brd_pci_speed = BINFO_PCI_SPEED_unk; /* PCI speed not yet * determinable */ if (ci->first_if) { { struct net_device *dev; dev = (struct net_device *) ci->first_if; np = (char *) dev->name; } strncpy (bip->first_iname, np, CHNM_STRLEN - 1); } else strcpy (bip->first_iname, "<NULL>"); if (ci->last_if) { { struct net_device *dev; dev = (struct net_device *) ci->last_if; np = (char *) dev->name; } strncpy (bip->last_iname, np, CHNM_STRLEN - 1); } else strcpy (bip->last_iname, "<NULL>"); if (bsn) { for (i = 0; i < 3; i++) { bip->brd_mac_addr[i] = *bsn++; } for (; i < 6; i++) { bip->brd_mac_addr[i] = *bsn; sn = (sn << 8) | *bsn++; } } else { for (i = 0; i < 6; i++) bip->brd_mac_addr[i] = 0; } bip->brd_sn = sn; } status_t c4_get_iidinfo (ci_t * ci, struct sbe_iid_info * iip) { struct net_device *dev; char *np; if (!(dev = getuserbychan (iip->channum))) return ENOENT; np = dev->name; strncpy (iip->iname, np, CHNM_STRLEN - 1); return 0; } #ifdef CONFIG_SBE_PMCC4_NCOMM void (*nciInterrupt[MAX_BOARDS][4]) (void); extern void wanpmcC4T1E1_hookInterrupt (int cardID, int deviceID, void *handler); void wanpmcC4T1E1_hookInterrupt (int cardID, int deviceID, void *handler) { if (cardID < MAX_BOARDS) /* sanity check */ nciInterrupt[cardID][deviceID] = handler; } irqreturn_t c4_ebus_intr_th_handler (void *devp) { ci_t *ci = (ci_t *) devp; volatile u_int32_t ists; int handled = 0; int brdno; /* which COMET caused the interrupt */ brdno = ci->brdno; ists = pci_read_32 ((u_int32_t *) &ci->cpldbase->intr); if (ists & PMCC4_CPLD_INTR_CMT_1) { handled = 0x1; if (nciInterrupt[brdno][0] != NULL) (*nciInterrupt[brdno][0]) (); } if (ists & PMCC4_CPLD_INTR_CMT_2) { handled |= 0x2; if (nciInterrupt[brdno][1] != NULL) (*nciInterrupt[brdno][1]) (); } if (ists & PMCC4_CPLD_INTR_CMT_3) { handled |= 0x4; if (nciInterrupt[brdno][2] != NULL) (*nciInterrupt[brdno][2]) (); } if (ists & PMCC4_CPLD_INTR_CMT_4) { handled |= 0x8; if (nciInterrupt[brdno][3] != NULL) (*nciInterrupt[brdno][3]) (); } #if 0 /*** Test code just de-implements the asserted interrupt. Alternate vendor will supply COMET interrupt handling code herein or such. ***/ pci_write_32 ((u_int32_t *) &ci->reg->glcd, GCD_MAGIC | MUSYCC_GCD_INTB_DISABLE); #endif return IRQ_RETVAL (handled); } unsigned long wanpmcC4T1E1_getBaseAddress (int cardID, int deviceID) { ci_t *ci; unsigned long base = 0; ci = c4_list; while (ci) { if (ci->brdno == cardID) /* found valid device */ { if (deviceID < ci->max_port) /* comet is supported */ base = ((unsigned long) ci->port[deviceID].cometbase); break; } ci = ci->next; /* next board, if any */ } return (base); } #endif /*** CONFIG_SBE_PMCC4_NCOMM ***/ /*** End-of-File ***/
gpl-2.0
vredniiy/sprout
arch/openrisc/kernel/or32_ksyms.c
9586
1349
/* * OpenRISC or32_ksyms.c * * Linux architectural port borrowing liberally from similar works of * others. All original copyrights apply as per the original source * declaration. * * Modifications for the OpenRISC architecture: * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/elfcore.h> #include <linux/sched.h> #include <linux/in6.h> #include <linux/interrupt.h> #include <linux/vmalloc.h> #include <linux/semaphore.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/checksum.h> #include <asm/io.h> #include <asm/hardirq.h> #include <asm/delay.h> #include <asm/pgalloc.h> #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name) /* compiler generated symbols */ DECLARE_EXPORT(__udivsi3); DECLARE_EXPORT(__divsi3); DECLARE_EXPORT(__umodsi3); DECLARE_EXPORT(__modsi3); DECLARE_EXPORT(__muldi3); DECLARE_EXPORT(__ashrdi3); DECLARE_EXPORT(__ashldi3); DECLARE_EXPORT(__lshrdi3); EXPORT_SYMBOL(__copy_tofrom_user);
gpl-2.0
RitaLee79/android_kernel_xiaomi_armani-kk
drivers/mca/mca-legacy.c
9842
8588
/* -*- mode: c; c-basic-offset: 8 -*- */ /* * MCA bus support functions for legacy (2.4) API. * * Legacy API means the API that operates in terms of MCA slot number * * (C) 2002 James Bottomley <James.Bottomley@HansenPartnership.com> * **----------------------------------------------------------------------------- ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ** ** You should have received a copy of the GNU General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ** **----------------------------------------------------------------------------- */ #include <linux/module.h> #include <linux/device.h> #include <linux/mca-legacy.h> #include <asm/io.h> /* NOTE: This structure is stack allocated */ struct mca_find_adapter_info { int id; int slot; struct mca_device *mca_dev; }; /* The purpose of this iterator is to loop over all the devices and * find the one with the smallest slot number that's just greater than * or equal to the required slot with a matching id */ static int mca_find_adapter_callback(struct device *dev, void *data) { struct mca_find_adapter_info *info = data; struct mca_device *mca_dev = to_mca_device(dev); if(mca_dev->pos_id != info->id) return 0; if(mca_dev->slot < info->slot) return 0; if(!info->mca_dev || info->mca_dev->slot >= mca_dev->slot) info->mca_dev = mca_dev; return 0; } /** * mca_find_adapter - scan for adapters * @id: MCA identification to search for * @start: starting slot * * Search the MCA configuration for adapters matching the 16bit * ID given. The first time it should be called with start as zero * and then further calls made passing the return value of the * previous call until %MCA_NOTFOUND is returned. * * Disabled adapters are not reported. */ int mca_find_adapter(int id, int start) { struct mca_find_adapter_info info; if(id == 0xffff) return MCA_NOTFOUND; info.slot = start; info.id = id; info.mca_dev = NULL; for(;;) { bus_for_each_dev(&mca_bus_type, NULL, &info, mca_find_adapter_callback); if(info.mca_dev == NULL) return MCA_NOTFOUND; if(info.mca_dev->status != MCA_ADAPTER_DISABLED) break; /* OK, found adapter but it was disabled. Go around * again, excluding the slot we just found */ info.slot = info.mca_dev->slot + 1; info.mca_dev = NULL; } return info.mca_dev->slot; } EXPORT_SYMBOL(mca_find_adapter); /*--------------------------------------------------------------------*/ /** * mca_find_unused_adapter - scan for unused adapters * @id: MCA identification to search for * @start: starting slot * * Search the MCA configuration for adapters matching the 16bit * ID given. The first time it should be called with start as zero * and then further calls made passing the return value of the * previous call until %MCA_NOTFOUND is returned. * * Adapters that have been claimed by drivers and those that * are disabled are not reported. This function thus allows a driver * to scan for further cards when some may already be driven. */ int mca_find_unused_adapter(int id, int start) { struct mca_find_adapter_info info = { 0 }; if (!MCA_bus || id == 0xffff) return MCA_NOTFOUND; info.slot = start; info.id = id; info.mca_dev = NULL; for(;;) { bus_for_each_dev(&mca_bus_type, NULL, &info, mca_find_adapter_callback); if(info.mca_dev == NULL) return MCA_NOTFOUND; if(info.mca_dev->status != MCA_ADAPTER_DISABLED && !info.mca_dev->driver_loaded) break; /* OK, found adapter but it was disabled or already in * use. Go around again, excluding the slot we just * found */ info.slot = info.mca_dev->slot + 1; info.mca_dev = NULL; } return info.mca_dev->slot; } EXPORT_SYMBOL(mca_find_unused_adapter); /* NOTE: stack allocated structure */ struct mca_find_device_by_slot_info { int slot; struct mca_device *mca_dev; }; static int mca_find_device_by_slot_callback(struct device *dev, void *data) { struct mca_find_device_by_slot_info *info = data; struct mca_device *mca_dev = to_mca_device(dev); if(mca_dev->slot == info->slot) info->mca_dev = mca_dev; return 0; } struct mca_device *mca_find_device_by_slot(int slot) { struct mca_find_device_by_slot_info info; info.slot = slot; info.mca_dev = NULL; bus_for_each_dev(&mca_bus_type, NULL, &info, mca_find_device_by_slot_callback); return info.mca_dev; } /** * mca_read_stored_pos - read POS register from boot data * @slot: slot number to read from * @reg: register to read from * * Fetch a POS value that was stored at boot time by the kernel * when it scanned the MCA space. The register value is returned. * Missing or invalid registers report 0. */ unsigned char mca_read_stored_pos(int slot, int reg) { struct mca_device *mca_dev = mca_find_device_by_slot(slot); if(!mca_dev) return 0; return mca_device_read_stored_pos(mca_dev, reg); } EXPORT_SYMBOL(mca_read_stored_pos); /** * mca_read_pos - read POS register from card * @slot: slot number to read from * @reg: register to read from * * Fetch a POS value directly from the hardware to obtain the * current value. This is much slower than mca_read_stored_pos and * may not be invoked from interrupt context. It handles the * deep magic required for onboard devices transparently. */ unsigned char mca_read_pos(int slot, int reg) { struct mca_device *mca_dev = mca_find_device_by_slot(slot); if(!mca_dev) return 0; return mca_device_read_pos(mca_dev, reg); } EXPORT_SYMBOL(mca_read_pos); /** * mca_write_pos - read POS register from card * @slot: slot number to read from * @reg: register to read from * @byte: byte to write to the POS registers * * Store a POS value directly from the hardware. You should not * normally need to use this function and should have a very good * knowledge of MCA bus before you do so. Doing this wrongly can * damage the hardware. * * This function may not be used from interrupt context. * * Note that this a technically a Bad Thing, as IBM tech stuff says * you should only set POS values through their utilities. * However, some devices such as the 3c523 recommend that you write * back some data to make sure the configuration is consistent. * I'd say that IBM is right, but I like my drivers to work. * * This function can't do checks to see if multiple devices end up * with the same resources, so you might see magic smoke if someone * screws up. */ void mca_write_pos(int slot, int reg, unsigned char byte) { struct mca_device *mca_dev = mca_find_device_by_slot(slot); if(!mca_dev) return; mca_device_write_pos(mca_dev, reg, byte); } EXPORT_SYMBOL(mca_write_pos); /** * mca_set_adapter_name - Set the description of the card * @slot: slot to name * @name: text string for the namen * * This function sets the name reported via /proc for this * adapter slot. This is for user information only. Setting a * name deletes any previous name. */ void mca_set_adapter_name(int slot, char* name) { struct mca_device *mca_dev = mca_find_device_by_slot(slot); if(!mca_dev) return; mca_device_set_name(mca_dev, name); } EXPORT_SYMBOL(mca_set_adapter_name); /** * mca_mark_as_used - claim an MCA device * @slot: slot to claim * FIXME: should we make this threadsafe * * Claim an MCA slot for a device driver. If the * slot is already taken the function returns 1, * if it is not taken it is claimed and 0 is * returned. */ int mca_mark_as_used(int slot) { struct mca_device *mca_dev = mca_find_device_by_slot(slot); if(!mca_dev) /* FIXME: this is actually a severe error */ return 1; if(mca_device_claimed(mca_dev)) return 1; mca_device_set_claim(mca_dev, 1); return 0; } EXPORT_SYMBOL(mca_mark_as_used); /** * mca_mark_as_unused - release an MCA device * @slot: slot to claim * * Release the slot for other drives to use. */ void mca_mark_as_unused(int slot) { struct mca_device *mca_dev = mca_find_device_by_slot(slot); if(!mca_dev) return; mca_device_set_claim(mca_dev, 0); } EXPORT_SYMBOL(mca_mark_as_unused);
gpl-2.0
V1sk/android_kernel_sony_msm8960
fs/ntfs/collate.c
14962
3675
/* * collate.c - NTFS kernel collation handling. Part of the Linux-NTFS project. * * Copyright (c) 2004 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "collate.h" #include "debug.h" #include "ntfs.h" static int ntfs_collate_binary(ntfs_volume *vol, const void *data1, const int data1_len, const void *data2, const int data2_len) { int rc; ntfs_debug("Entering."); rc = memcmp(data1, data2, min(data1_len, data2_len)); if (!rc && (data1_len != data2_len)) { if (data1_len < data2_len) rc = -1; else rc = 1; } ntfs_debug("Done, returning %i", rc); return rc; } static int ntfs_collate_ntofs_ulong(ntfs_volume *vol, const void *data1, const int data1_len, const void *data2, const int data2_len) { int rc; u32 d1, d2; ntfs_debug("Entering."); // FIXME: We don't really want to bug here. BUG_ON(data1_len != data2_len); BUG_ON(data1_len != 4); d1 = le32_to_cpup(data1); d2 = le32_to_cpup(data2); if (d1 < d2) rc = -1; else { if (d1 == d2) rc = 0; else rc = 1; } ntfs_debug("Done, returning %i", rc); return rc; } typedef int (*ntfs_collate_func_t)(ntfs_volume *, const void *, const int, const void *, const int); static ntfs_collate_func_t ntfs_do_collate0x0[3] = { ntfs_collate_binary, NULL/*ntfs_collate_file_name*/, NULL/*ntfs_collate_unicode_string*/, }; static ntfs_collate_func_t ntfs_do_collate0x1[4] = { ntfs_collate_ntofs_ulong, NULL/*ntfs_collate_ntofs_sid*/, NULL/*ntfs_collate_ntofs_security_hash*/, NULL/*ntfs_collate_ntofs_ulongs*/, }; /** * ntfs_collate - collate two data items using a specified collation rule * @vol: ntfs volume to which the data items belong * @cr: collation rule to use when comparing the items * @data1: first data item to collate * @data1_len: length in bytes of @data1 * @data2: second data item to collate * @data2_len: length in bytes of @data2 * * Collate the two data items @data1 and @data2 using the collation rule @cr * and return -1, 0, ir 1 if @data1 is found, respectively, to collate before, * to match, or to collate after @data2. * * For speed we use the collation rule @cr as an index into two tables of * function pointers to call the appropriate collation function. */ int ntfs_collate(ntfs_volume *vol, COLLATION_RULE cr, const void *data1, const int data1_len, const void *data2, const int data2_len) { int i; ntfs_debug("Entering."); /* * FIXME: At the moment we only support COLLATION_BINARY and * COLLATION_NTOFS_ULONG, so we BUG() for everything else for now. */ BUG_ON(cr != COLLATION_BINARY && cr != COLLATION_NTOFS_ULONG); i = le32_to_cpu(cr); BUG_ON(i < 0); if (i <= 0x02) return ntfs_do_collate0x0[i](vol, data1, data1_len, data2, data2_len); BUG_ON(i < 0x10); i -= 0x10; if (likely(i <= 3)) return ntfs_do_collate0x1[i](vol, data1, data1_len, data2, data2_len); BUG(); return 0; }
gpl-2.0
MoKee/android_kernel_samsung_piranha
fs/ntfs/collate.c
14962
3675
/* * collate.c - NTFS kernel collation handling. Part of the Linux-NTFS project. * * Copyright (c) 2004 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "collate.h" #include "debug.h" #include "ntfs.h" static int ntfs_collate_binary(ntfs_volume *vol, const void *data1, const int data1_len, const void *data2, const int data2_len) { int rc; ntfs_debug("Entering."); rc = memcmp(data1, data2, min(data1_len, data2_len)); if (!rc && (data1_len != data2_len)) { if (data1_len < data2_len) rc = -1; else rc = 1; } ntfs_debug("Done, returning %i", rc); return rc; } static int ntfs_collate_ntofs_ulong(ntfs_volume *vol, const void *data1, const int data1_len, const void *data2, const int data2_len) { int rc; u32 d1, d2; ntfs_debug("Entering."); // FIXME: We don't really want to bug here. BUG_ON(data1_len != data2_len); BUG_ON(data1_len != 4); d1 = le32_to_cpup(data1); d2 = le32_to_cpup(data2); if (d1 < d2) rc = -1; else { if (d1 == d2) rc = 0; else rc = 1; } ntfs_debug("Done, returning %i", rc); return rc; } typedef int (*ntfs_collate_func_t)(ntfs_volume *, const void *, const int, const void *, const int); static ntfs_collate_func_t ntfs_do_collate0x0[3] = { ntfs_collate_binary, NULL/*ntfs_collate_file_name*/, NULL/*ntfs_collate_unicode_string*/, }; static ntfs_collate_func_t ntfs_do_collate0x1[4] = { ntfs_collate_ntofs_ulong, NULL/*ntfs_collate_ntofs_sid*/, NULL/*ntfs_collate_ntofs_security_hash*/, NULL/*ntfs_collate_ntofs_ulongs*/, }; /** * ntfs_collate - collate two data items using a specified collation rule * @vol: ntfs volume to which the data items belong * @cr: collation rule to use when comparing the items * @data1: first data item to collate * @data1_len: length in bytes of @data1 * @data2: second data item to collate * @data2_len: length in bytes of @data2 * * Collate the two data items @data1 and @data2 using the collation rule @cr * and return -1, 0, ir 1 if @data1 is found, respectively, to collate before, * to match, or to collate after @data2. * * For speed we use the collation rule @cr as an index into two tables of * function pointers to call the appropriate collation function. */ int ntfs_collate(ntfs_volume *vol, COLLATION_RULE cr, const void *data1, const int data1_len, const void *data2, const int data2_len) { int i; ntfs_debug("Entering."); /* * FIXME: At the moment we only support COLLATION_BINARY and * COLLATION_NTOFS_ULONG, so we BUG() for everything else for now. */ BUG_ON(cr != COLLATION_BINARY && cr != COLLATION_NTOFS_ULONG); i = le32_to_cpu(cr); BUG_ON(i < 0); if (i <= 0x02) return ntfs_do_collate0x0[i](vol, data1, data1_len, data2, data2_len); BUG_ON(i < 0x10); i -= 0x10; if (likely(i <= 3)) return ntfs_do_collate0x1[i](vol, data1, data1_len, data2, data2_len); BUG(); return 0; }
gpl-2.0
alpscale/linux
drivers/infiniband/hw/qib/qib_iba7220.c
371
146632
/* * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * This file contains all of the code that is specific to the * QLogic_IB 7220 chip (except that specific to the SerDes) */ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/io.h> #include <rdma/ib_verbs.h> #include "qib.h" #include "qib_7220.h" static void qib_setup_7220_setextled(struct qib_pportdata *, u32); static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t); static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op); static u32 qib_7220_iblink_state(u64); static u8 qib_7220_phys_portstate(u64); static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16); static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16); /* * This file contains almost all the chip-specific register information and * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the * exception of SerDes support, which in in qib_sd7220.c. */ /* Below uses machine-generated qib_chipnum_regs.h file */ #define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64)) /* Use defines to tie machine-generated names to lower-case names */ #define kr_control KREG_IDX(Control) #define kr_counterregbase KREG_IDX(CntrRegBase) #define kr_errclear KREG_IDX(ErrClear) #define kr_errmask KREG_IDX(ErrMask) #define kr_errstatus KREG_IDX(ErrStatus) #define kr_extctrl KREG_IDX(EXTCtrl) #define kr_extstatus KREG_IDX(EXTStatus) #define kr_gpio_clear KREG_IDX(GPIOClear) #define kr_gpio_mask KREG_IDX(GPIOMask) #define kr_gpio_out KREG_IDX(GPIOOut) #define kr_gpio_status KREG_IDX(GPIOStatus) #define kr_hrtbt_guid KREG_IDX(HRTBT_GUID) #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl) #define kr_hwerrclear KREG_IDX(HwErrClear) #define kr_hwerrmask KREG_IDX(HwErrMask) #define kr_hwerrstatus KREG_IDX(HwErrStatus) #define kr_ibcctrl KREG_IDX(IBCCtrl) #define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl) #define kr_ibcddrstatus KREG_IDX(IBCDDRStatus) #define kr_ibcstatus KREG_IDX(IBCStatus) #define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl) #define kr_intclear KREG_IDX(IntClear) #define kr_intmask KREG_IDX(IntMask) #define kr_intstatus KREG_IDX(IntStatus) #define kr_ncmodectrl KREG_IDX(IBNCModeCtrl) #define kr_palign KREG_IDX(PageAlign) #define kr_partitionkey KREG_IDX(RcvPartitionKey) #define kr_portcnt KREG_IDX(PortCnt) #define kr_rcvbthqp KREG_IDX(RcvBTHQP) #define kr_rcvctrl KREG_IDX(RcvCtrl) #define kr_rcvegrbase KREG_IDX(RcvEgrBase) #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt) #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt) #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize) #define kr_rcvhdrsize KREG_IDX(RcvHdrSize) #define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt) #define kr_rcvtidbase KREG_IDX(RcvTIDBase) #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt) #define kr_revision KREG_IDX(Revision) #define kr_scratch KREG_IDX(Scratch) #define kr_sendbuffererror KREG_IDX(SendBufErr0) #define kr_sendctrl KREG_IDX(SendCtrl) #define kr_senddmabase KREG_IDX(SendDmaBase) #define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0) #define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1) #define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2) #define kr_senddmahead KREG_IDX(SendDmaHead) #define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr) #define kr_senddmalengen KREG_IDX(SendDmaLenGen) #define kr_senddmastatus KREG_IDX(SendDmaStatus) #define kr_senddmatail KREG_IDX(SendDmaTail) #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr) #define kr_sendpiobufbase KREG_IDX(SendBufBase) #define kr_sendpiobufcnt KREG_IDX(SendBufCnt) #define kr_sendpiosize KREG_IDX(SendBufSize) #define kr_sendregbase KREG_IDX(SendRegBase) #define kr_userregbase KREG_IDX(UserRegBase) #define kr_xgxs_cfg KREG_IDX(XGXSCfg) /* These must only be written via qib_write_kreg_ctxt() */ #define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0) #define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0) #define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \ QIB_7220_LBIntCnt_OFFS) / sizeof(u64)) #define cr_badformat CREG_IDX(RxVersionErrCnt) #define cr_erricrc CREG_IDX(RxICRCErrCnt) #define cr_errlink CREG_IDX(RxLinkMalformCnt) #define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt) #define cr_errpkey CREG_IDX(RxPKeyMismatchCnt) #define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt) #define cr_err_rlen CREG_IDX(RxLenErrCnt) #define cr_errslen CREG_IDX(TxLenErrCnt) #define cr_errtidfull CREG_IDX(RxTIDFullErrCnt) #define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt) #define cr_errvcrc CREG_IDX(RxVCRCErrCnt) #define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt) #define cr_lbint CREG_IDX(LBIntCnt) #define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt) #define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt) #define cr_lbflowstall CREG_IDX(LBFlowStallCnt) #define cr_pktrcv CREG_IDX(RxDataPktCnt) #define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt) #define cr_pktsend CREG_IDX(TxDataPktCnt) #define cr_pktsendflow CREG_IDX(TxFlowPktCnt) #define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt) #define cr_rcvebp CREG_IDX(RxEBPCnt) #define cr_rcvovfl CREG_IDX(RxBufOvflCnt) #define cr_senddropped CREG_IDX(TxDroppedPktCnt) #define cr_sendstall CREG_IDX(TxFlowStallCnt) #define cr_sendunderrun CREG_IDX(TxUnderrunCnt) #define cr_wordrcv CREG_IDX(RxDwordCnt) #define cr_wordsend CREG_IDX(TxDwordCnt) #define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt) #define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt) #define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt) #define cr_iblinkdown CREG_IDX(IBLinkDownedCnt) #define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt) #define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt) #define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt) #define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt) #define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt) #define cr_rxvlerr CREG_IDX(RxVlErrCnt) #define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt) #define cr_psstat CREG_IDX(PSStat) #define cr_psstart CREG_IDX(PSStart) #define cr_psinterval CREG_IDX(PSInterval) #define cr_psrcvdatacount CREG_IDX(PSRcvDataCount) #define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount) #define cr_psxmitdatacount CREG_IDX(PSXmitDataCount) #define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount) #define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount) #define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt) #define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt) #define SYM_RMASK(regname, fldname) ((u64) \ QIB_7220_##regname##_##fldname##_RMASK) #define SYM_MASK(regname, fldname) ((u64) \ QIB_7220_##regname##_##fldname##_RMASK << \ QIB_7220_##regname##_##fldname##_LSB) #define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB) #define SYM_FIELD(value, regname, fldname) ((u64) \ (((value) >> SYM_LSB(regname, fldname)) & \ SYM_RMASK(regname, fldname))) #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask) #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask) /* ibcctrl bits */ #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 /* cycle through TS1/TS2 till OK */ #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2 /* wait for TS1, then go on */ #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3 #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16 #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ #define BLOB_7220_IBCHG 0x81 /* * We could have a single register get/put routine, that takes a group type, * but this is somewhat clearer and cleaner. It also gives us some error * checking. 64 bit register reads should always work, but are inefficient * on opteron (the northbridge always generates 2 separate HT 32 bit reads), * so we use kreg32 wherever possible. User register and counter register * reads are always 32 bit reads, so only one form of those routines. */ /** * qib_read_ureg32 - read 32-bit virtualized per-context register * @dd: device * @regno: register number * @ctxt: context number * * Return the contents of a register that is virtualized to be per context. * Returns -1 on errors (not distinguishable from valid contents at * runtime; we may add a separate error variable at some point). */ static inline u32 qib_read_ureg32(const struct qib_devdata *dd, enum qib_ureg regno, int ctxt) { if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) return 0; if (dd->userbase) return readl(regno + (u64 __iomem *) ((char __iomem *)dd->userbase + dd->ureg_align * ctxt)); else return readl(regno + (u64 __iomem *) (dd->uregbase + (char __iomem *)dd->kregbase + dd->ureg_align * ctxt)); } /** * qib_write_ureg - write 32-bit virtualized per-context register * @dd: device * @regno: register number * @value: value * @ctxt: context * * Write the contents of a register that is virtualized to be per context. */ static inline void qib_write_ureg(const struct qib_devdata *dd, enum qib_ureg regno, u64 value, int ctxt) { u64 __iomem *ubase; if (dd->userbase) ubase = (u64 __iomem *) ((char __iomem *) dd->userbase + dd->ureg_align * ctxt); else ubase = (u64 __iomem *) (dd->uregbase + (char __iomem *) dd->kregbase + dd->ureg_align * ctxt); if (dd->kregbase && (dd->flags & QIB_PRESENT)) writeq(value, &ubase[regno]); } /** * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register * @dd: the qlogic_ib device * @regno: the register number to write * @ctxt: the context containing the register * @value: the value to write */ static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd, const u16 regno, unsigned ctxt, u64 value) { qib_write_kreg(dd, regno + ctxt, value); } static inline void write_7220_creg(const struct qib_devdata *dd, u16 regno, u64 value) { if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT)) writeq(value, &dd->cspec->cregbase[regno]); } static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno) { if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) return 0; return readq(&dd->cspec->cregbase[regno]); } static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno) { if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) return 0; return readl(&dd->cspec->cregbase[regno]); } /* kr_revision bits */ #define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1) #define QLOGIC_IB_R_EMULATORREV_SHIFT 40 /* kr_control bits */ #define QLOGIC_IB_C_RESET (1U << 7) /* kr_intstatus, kr_intclear, kr_intmask bits */ #define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1) #define QLOGIC_IB_I_RCVURG_SHIFT 32 #define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1) #define QLOGIC_IB_I_RCVAVAIL_SHIFT 0 #define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27) #define QLOGIC_IB_C_FREEZEMODE 0x00000002 #define QLOGIC_IB_C_LINKENABLE 0x00000004 #define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL #define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL #define QLOGIC_IB_I_ERROR 0x0000000080000000ULL #define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL #define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL #define QLOGIC_IB_I_GPIO 0x0000000010000000ULL /* variables for sanity checking interrupt and errors */ #define QLOGIC_IB_I_BITSEXTANT \ (QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \ (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \ (QLOGIC_IB_I_RCVAVAIL_MASK << \ QLOGIC_IB_I_RCVAVAIL_SHIFT) | \ QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \ QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \ QLOGIC_IB_I_SERDESTRIMDONE) #define IB_HWE_BITSEXTANT \ (HWE_MASK(RXEMemParityErr) | \ HWE_MASK(TXEMemParityErr) | \ (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \ QLOGIC_IB_HWE_PCIE1PLLFAILED | \ QLOGIC_IB_HWE_PCIE0PLLFAILED | \ QLOGIC_IB_HWE_PCIEPOISONEDTLP | \ QLOGIC_IB_HWE_PCIECPLTIMEOUT | \ QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \ QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \ QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \ HWE_MASK(PowerOnBISTFailed) | \ QLOGIC_IB_HWE_COREPLL_FBSLIP | \ QLOGIC_IB_HWE_COREPLL_RFSLIP | \ QLOGIC_IB_HWE_SERDESPLLFAILED | \ HWE_MASK(IBCBusToSPCParityErr) | \ HWE_MASK(IBCBusFromSPCParityErr) | \ QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \ QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \ QLOGIC_IB_HWE_SDMAMEMREADERR | \ QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \ QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \ QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \ QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \ QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \ QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \ QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \ QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \ QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR) #define IB_E_BITSEXTANT \ (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \ ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \ ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \ ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \ ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \ ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \ ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \ ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \ ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \ ERR_MASK(SendSpecialTriggerErr) | \ ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \ ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \ ERR_MASK(SendDroppedDataPktErr) | \ ERR_MASK(SendPioArmLaunchErr) | \ ERR_MASK(SendUnexpectedPktNumErr) | \ ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \ ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \ ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \ ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \ ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \ ERR_MASK(SDmaUnexpDataErr) | \ ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \ ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \ ERR_MASK(SDmaDescAddrMisalignErr) | \ ERR_MASK(InvalidEEPCmd)) /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ #define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL #define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0 #define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL #define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL #define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL #define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL #define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL #define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL #define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL #define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL #define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL #define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL /* specific to this chip */ #define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL #define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL #define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL #define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL #define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL #define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL #define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL #define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL #define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL #define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL #define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL #define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL #define IBA7220_IBCC_LINKCMD_SHIFT 19 /* kr_ibcddrctrl bits */ #define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL #define IBA7220_IBC_DLIDLMC_SHIFT 32 #define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \ SYM_RMASK(IBCDDRCtrl, HRTBT_ENB)) #define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB) #define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8) #define IBA7220_IBC_LREV_MASK 1 #define IBA7220_IBC_LREV_SHIFT 8 #define IBA7220_IBC_RXPOL_MASK 1 #define IBA7220_IBC_RXPOL_SHIFT 7 #define IBA7220_IBC_WIDTH_SHIFT 5 #define IBA7220_IBC_WIDTH_MASK 0x3 #define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT) #define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT) #define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT) #define IBA7220_IBC_SPEED_AUTONEG (1 << 1) #define IBA7220_IBC_SPEED_SDR (1 << 2) #define IBA7220_IBC_SPEED_DDR (1 << 3) #define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1) #define IBA7220_IBC_IBTA_1_2_MASK (1) /* kr_ibcddrstatus */ /* link latency shift is 0, don't bother defining */ #define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff /* kr_extstatus bits */ #define QLOGIC_IB_EXTS_FREQSEL 0x2 #define QLOGIC_IB_EXTS_SERDESSEL 0x4 #define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000 #define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000 /* kr_xgxsconfig bits */ #define QLOGIC_IB_XGXS_RESET 0x5ULL #define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63) /* kr_rcvpktledcnt */ #define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */ #define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */ #define _QIB_GPIO_SDA_NUM 1 #define _QIB_GPIO_SCL_NUM 0 #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */ #define QIB_TWSI_TEMP_DEV 0x98 /* HW counter clock is at 4nsec */ #define QIB_7220_PSXMITWAIT_CHECK_RATE 4000 #define IBA7220_R_INTRAVAIL_SHIFT 17 #define IBA7220_R_PKEY_DIS_SHIFT 34 #define IBA7220_R_TAILUPD_SHIFT 35 #define IBA7220_R_CTXTCFG_SHIFT 36 #define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */ /* * the size bits give us 2^N, in KB units. 0 marks as invalid, * and 7 is reserved. We currently use only 2KB and 4KB */ #define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */ #define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */ #define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */ #define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */ #define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */ #define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */ #define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */ /* packet rate matching delay multiplier */ static u8 rate_to_delay[2][2] = { /* 1x, 4x */ { 8, 2 }, /* SDR */ { 4, 1 } /* DDR */ }; static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = { [IB_RATE_2_5_GBPS] = 8, [IB_RATE_5_GBPS] = 4, [IB_RATE_10_GBPS] = 2, [IB_RATE_20_GBPS] = 1 }; #define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive) #define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive) /* link training states, from IBC */ #define IB_7220_LT_STATE_DISABLED 0x00 #define IB_7220_LT_STATE_LINKUP 0x01 #define IB_7220_LT_STATE_POLLACTIVE 0x02 #define IB_7220_LT_STATE_POLLQUIET 0x03 #define IB_7220_LT_STATE_SLEEPDELAY 0x04 #define IB_7220_LT_STATE_SLEEPQUIET 0x05 #define IB_7220_LT_STATE_CFGDEBOUNCE 0x08 #define IB_7220_LT_STATE_CFGRCVFCFG 0x09 #define IB_7220_LT_STATE_CFGWAITRMT 0x0a #define IB_7220_LT_STATE_CFGIDLE 0x0b #define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c #define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e #define IB_7220_LT_STATE_RECOVERIDLE 0x0f /* link state machine states from IBC */ #define IB_7220_L_STATE_DOWN 0x0 #define IB_7220_L_STATE_INIT 0x1 #define IB_7220_L_STATE_ARM 0x2 #define IB_7220_L_STATE_ACTIVE 0x3 #define IB_7220_L_STATE_ACT_DEFER 0x4 static const u8 qib_7220_physportstate[0x20] = { [IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED, [IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP, [IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL, [IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL, [IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP, [IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP, [IB_7220_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN, [IB_7220_LT_STATE_CFGRCVFCFG] = IB_PHYSPORTSTATE_CFG_TRAIN, [IB_7220_LT_STATE_CFGWAITRMT] = IB_PHYSPORTSTATE_CFG_TRAIN, [IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN, [IB_7220_LT_STATE_RECOVERRETRAIN] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [IB_7220_LT_STATE_RECOVERWAITRMT] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [IB_7220_LT_STATE_RECOVERIDLE] = IB_PHYSPORTSTATE_LINK_ERR_RECOVER, [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN }; int qib_special_trigger; module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO); MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch"); #define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr) #define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr) #define SYM_MASK_BIT(regname, fldname, bit) ((u64) \ (1ULL << (SYM_LSB(regname, fldname) + (bit)))) #define TXEMEMPARITYERR_PIOBUF \ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0) #define TXEMEMPARITYERR_PIOPBC \ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1) #define TXEMEMPARITYERR_PIOLAUNCHFIFO \ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2) #define RXEMEMPARITYERR_RCVBUF \ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0) #define RXEMEMPARITYERR_LOOKUPQ \ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1) #define RXEMEMPARITYERR_EXPTID \ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2) #define RXEMEMPARITYERR_EAGERTID \ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3) #define RXEMEMPARITYERR_FLAGBUF \ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4) #define RXEMEMPARITYERR_DATAINFO \ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5) #define RXEMEMPARITYERR_HDRINFO \ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6) /* 7220 specific hardware errors... */ static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = { /* generic hardware errors */ QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"), QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"), QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF, "TXE PIOBUF Memory Parity"), QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC, "TXE PIOPBC Memory Parity"), QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO, "TXE PIOLAUNCHFIFO Memory Parity"), QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF, "RXE RCVBUF Memory Parity"), QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ, "RXE LOOKUPQ Memory Parity"), QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID, "RXE EAGERTID Memory Parity"), QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID, "RXE EXPTID Memory Parity"), QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF, "RXE FLAGBUF Memory Parity"), QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO, "RXE DATAINFO Memory Parity"), QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO, "RXE HDRINFO Memory Parity"), /* chip-specific hardware errors */ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP, "PCIe Poisoned TLP"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT, "PCIe completion timeout"), /* * In practice, it's unlikely wthat we'll see PCIe PLL, or bus * parity or memory parity error failures, because most likely we * won't be able to talk to the core of the chip. Nonetheless, we * might see them, if they are in parts of the PCIe core that aren't * essential. */ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED, "PCIePLL1"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED, "PCIePLL0"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH, "PCIe XTLH core parity"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM, "PCIe ADM TX core parity"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM, "PCIe ADM RX core parity"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED, "SerDes PLL"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR, "PCIe cpl header queue"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR, "PCIe cpl data queue"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR, "Send DMA memory read"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED, "uC PLL clock not locked"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT, "PCIe serdes Q0 no clock"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT, "PCIe serdes Q1 no clock"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT, "PCIe serdes Q2 no clock"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT, "PCIe serdes Q3 no clock"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR, "DDS RXEQ memory parity"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR, "IB uC memory parity"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR, "PCIe uC oct0 memory parity"), QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR, "PCIe uC oct1 memory parity"), }; #define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID) #define QLOGIC_IB_E_PKTERRS (\ ERR_MASK(SendPktLenErr) | \ ERR_MASK(SendDroppedDataPktErr) | \ ERR_MASK(RcvVCRCErr) | \ ERR_MASK(RcvICRCErr) | \ ERR_MASK(RcvShortPktLenErr) | \ ERR_MASK(RcvEBPErr)) /* Convenience for decoding Send DMA errors */ #define QLOGIC_IB_E_SDMAERRS ( \ ERR_MASK(SDmaGenMismatchErr) | \ ERR_MASK(SDmaOutOfBoundErr) | \ ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \ ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \ ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \ ERR_MASK(SDmaUnexpDataErr) | \ ERR_MASK(SDmaDescAddrMisalignErr) | \ ERR_MASK(SDmaDisabledErr) | \ ERR_MASK(SendBufMisuseErr)) /* These are all rcv-related errors which we want to count for stats */ #define E_SUM_PKTERRS \ (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \ ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \ ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \ ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \ ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \ ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr)) /* These are all send-related errors which we want to count for stats */ #define E_SUM_ERRS \ (ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \ ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \ ERR_MASK(InvalidAddrErr)) /* * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore * errors not related to freeze and cancelling buffers. Can't ignore * armlaunch because could get more while still cleaning up, and need * to cancel those as they happen. */ #define E_SPKT_ERRS_IGNORE \ (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \ ERR_MASK(SendPktLenErr)) /* * these are errors that can occur when the link changes state while * a packet is being sent or received. This doesn't cover things * like EBP or VCRC that can be the result of a sending having the * link change state, so we receive a "known bad" packet. */ #define E_SUM_LINK_PKTERRS \ (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \ ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \ ERR_MASK(RcvUnexpectedCharErr)) static void autoneg_7220_work(struct work_struct *); static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *); /* * Called when we might have an error that is specific to a particular * PIO buffer, and may need to cancel that buffer, so it can be re-used. * because we don't need to force the update of pioavail. */ static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd) { unsigned long sbuf[3]; struct qib_devdata *dd = ppd->dd; /* * It's possible that sendbuffererror could have bits set; might * have already done this as a result of hardware error handling. */ /* read these before writing errorclear */ sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror); sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1); sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2); if (sbuf[0] || sbuf[1] || sbuf[2]) qib_disarm_piobufs_set(dd, sbuf, dd->piobcnt2k + dd->piobcnt4k); } static void qib_7220_txe_recover(struct qib_devdata *dd) { qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n"); qib_disarm_7220_senderrbufs(dd->pport); } /* * This is called with interrupts disabled and sdma_lock held. */ static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op) { struct qib_devdata *dd = ppd->dd; u64 set_sendctrl = 0; u64 clr_sendctrl = 0; if (op & QIB_SDMA_SENDCTRL_OP_ENABLE) set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable); else clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable); if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE) set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable); else clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable); if (op & QIB_SDMA_SENDCTRL_OP_HALT) set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt); else clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt); spin_lock(&dd->sendctrl_lock); dd->sendctrl |= set_sendctrl; dd->sendctrl &= ~clr_sendctrl; qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); qib_write_kreg(dd, kr_scratch, 0); spin_unlock(&dd->sendctrl_lock); } static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd, u64 err, char *buf, size_t blen) { static const struct { u64 err; const char *msg; } errs[] = { { ERR_MASK(SDmaGenMismatchErr), "SDmaGenMismatch" }, { ERR_MASK(SDmaOutOfBoundErr), "SDmaOutOfBound" }, { ERR_MASK(SDmaTailOutOfBoundErr), "SDmaTailOutOfBound" }, { ERR_MASK(SDmaBaseErr), "SDmaBase" }, { ERR_MASK(SDma1stDescErr), "SDma1stDesc" }, { ERR_MASK(SDmaRpyTagErr), "SDmaRpyTag" }, { ERR_MASK(SDmaDwEnErr), "SDmaDwEn" }, { ERR_MASK(SDmaMissingDwErr), "SDmaMissingDw" }, { ERR_MASK(SDmaUnexpDataErr), "SDmaUnexpData" }, { ERR_MASK(SDmaDescAddrMisalignErr), "SDmaDescAddrMisalign" }, { ERR_MASK(SendBufMisuseErr), "SendBufMisuse" }, { ERR_MASK(SDmaDisabledErr), "SDmaDisabled" }, }; int i; size_t bidx = 0; for (i = 0; i < ARRAY_SIZE(errs); i++) { if (err & errs[i].err) bidx += scnprintf(buf + bidx, blen - bidx, "%s ", errs[i].msg); } } /* * This is called as part of link down clean up so disarm and flush * all send buffers so that SMP packets can be sent. */ static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd) { /* This will trigger the Abort interrupt */ sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH | QIB_SENDCTRL_AVAIL_BLIP); ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */ } static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd) { /* * Set SendDmaLenGen and clear and set * the MSB of the generation count to enable generation checking * and load the internal generation counter. */ qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt); qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt | (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB)); } static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd) { qib_sdma_7220_setlengen(ppd); qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */ ppd->sdma_head_dma[0] = 0; } #define DISABLES_SDMA ( \ ERR_MASK(SDmaDisabledErr) | \ ERR_MASK(SDmaBaseErr) | \ ERR_MASK(SDmaTailOutOfBoundErr) | \ ERR_MASK(SDmaOutOfBoundErr) | \ ERR_MASK(SDma1stDescErr) | \ ERR_MASK(SDmaRpyTagErr) | \ ERR_MASK(SDmaGenMismatchErr) | \ ERR_MASK(SDmaDescAddrMisalignErr) | \ ERR_MASK(SDmaMissingDwErr) | \ ERR_MASK(SDmaDwEnErr)) static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs) { unsigned long flags; struct qib_devdata *dd = ppd->dd; char *msg; errs &= QLOGIC_IB_E_SDMAERRS; msg = dd->cspec->sdmamsgbuf; qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf); spin_lock_irqsave(&ppd->sdma_lock, flags); if (errs & ERR_MASK(SendBufMisuseErr)) { unsigned long sbuf[3]; sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror); sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1); sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2); qib_dev_err(ppd->dd, "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n", ppd->dd->unit, ppd->port, sbuf[2], sbuf[1], sbuf[0]); } if (errs & ERR_MASK(SDmaUnexpDataErr)) qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit, ppd->port); switch (ppd->sdma_state.current_state) { case qib_sdma_state_s00_hw_down: /* not expecting any interrupts */ break; case qib_sdma_state_s10_hw_start_up_wait: /* handled in intr path */ break; case qib_sdma_state_s20_idle: /* not expecting any interrupts */ break; case qib_sdma_state_s30_sw_clean_up_wait: /* not expecting any interrupts */ break; case qib_sdma_state_s40_hw_clean_up_wait: if (errs & ERR_MASK(SDmaDisabledErr)) __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned); break; case qib_sdma_state_s50_hw_halt_wait: /* handled in intr path */ break; case qib_sdma_state_s99_running: if (errs & DISABLES_SDMA) __qib_sdma_process_event(ppd, qib_sdma_event_e7220_err_halted); break; } spin_unlock_irqrestore(&ppd->sdma_lock, flags); } /* * Decode the error status into strings, deciding whether to always * print * it or not depending on "normal packet errors" vs everything * else. Return 1 if "real" errors, otherwise 0 if only packet * errors, so caller can decide what to print with the string. */ static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err) { int iserr = 1; *buf = '\0'; if (err & QLOGIC_IB_E_PKTERRS) { if (!(err & ~QLOGIC_IB_E_PKTERRS)) iserr = 0; if ((err & ERR_MASK(RcvICRCErr)) && !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr)))) strlcat(buf, "CRC ", blen); if (!iserr) goto done; } if (err & ERR_MASK(RcvHdrLenErr)) strlcat(buf, "rhdrlen ", blen); if (err & ERR_MASK(RcvBadTidErr)) strlcat(buf, "rbadtid ", blen); if (err & ERR_MASK(RcvBadVersionErr)) strlcat(buf, "rbadversion ", blen); if (err & ERR_MASK(RcvHdrErr)) strlcat(buf, "rhdr ", blen); if (err & ERR_MASK(SendSpecialTriggerErr)) strlcat(buf, "sendspecialtrigger ", blen); if (err & ERR_MASK(RcvLongPktLenErr)) strlcat(buf, "rlongpktlen ", blen); if (err & ERR_MASK(RcvMaxPktLenErr)) strlcat(buf, "rmaxpktlen ", blen); if (err & ERR_MASK(RcvMinPktLenErr)) strlcat(buf, "rminpktlen ", blen); if (err & ERR_MASK(SendMinPktLenErr)) strlcat(buf, "sminpktlen ", blen); if (err & ERR_MASK(RcvFormatErr)) strlcat(buf, "rformaterr ", blen); if (err & ERR_MASK(RcvUnsupportedVLErr)) strlcat(buf, "runsupvl ", blen); if (err & ERR_MASK(RcvUnexpectedCharErr)) strlcat(buf, "runexpchar ", blen); if (err & ERR_MASK(RcvIBFlowErr)) strlcat(buf, "ribflow ", blen); if (err & ERR_MASK(SendUnderRunErr)) strlcat(buf, "sunderrun ", blen); if (err & ERR_MASK(SendPioArmLaunchErr)) strlcat(buf, "spioarmlaunch ", blen); if (err & ERR_MASK(SendUnexpectedPktNumErr)) strlcat(buf, "sunexperrpktnum ", blen); if (err & ERR_MASK(SendDroppedSmpPktErr)) strlcat(buf, "sdroppedsmppkt ", blen); if (err & ERR_MASK(SendMaxPktLenErr)) strlcat(buf, "smaxpktlen ", blen); if (err & ERR_MASK(SendUnsupportedVLErr)) strlcat(buf, "sunsupVL ", blen); if (err & ERR_MASK(InvalidAddrErr)) strlcat(buf, "invalidaddr ", blen); if (err & ERR_MASK(RcvEgrFullErr)) strlcat(buf, "rcvegrfull ", blen); if (err & ERR_MASK(RcvHdrFullErr)) strlcat(buf, "rcvhdrfull ", blen); if (err & ERR_MASK(IBStatusChanged)) strlcat(buf, "ibcstatuschg ", blen); if (err & ERR_MASK(RcvIBLostLinkErr)) strlcat(buf, "riblostlink ", blen); if (err & ERR_MASK(HardwareErr)) strlcat(buf, "hardware ", blen); if (err & ERR_MASK(ResetNegated)) strlcat(buf, "reset ", blen); if (err & QLOGIC_IB_E_SDMAERRS) qib_decode_7220_sdma_errs(dd->pport, err, buf, blen); if (err & ERR_MASK(InvalidEEPCmd)) strlcat(buf, "invalideepromcmd ", blen); done: return iserr; } static void reenable_7220_chase(unsigned long opaque) { struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; ppd->cpspec->chase_timer.expires = 0; qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, QLOGIC_IB_IBCC_LINKINITCMD_POLL); } static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst) { u8 ibclt; unsigned long tnow; ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState); /* * Detect and handle the state chase issue, where we can * get stuck if we are unlucky on timing on both sides of * the link. If we are, we disable, set a timer, and * then re-enable. */ switch (ibclt) { case IB_7220_LT_STATE_CFGRCVFCFG: case IB_7220_LT_STATE_CFGWAITRMT: case IB_7220_LT_STATE_TXREVLANES: case IB_7220_LT_STATE_CFGENH: tnow = jiffies; if (ppd->cpspec->chase_end && time_after(tnow, ppd->cpspec->chase_end)) { ppd->cpspec->chase_end = 0; qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME; add_timer(&ppd->cpspec->chase_timer); } else if (!ppd->cpspec->chase_end) ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME; break; default: ppd->cpspec->chase_end = 0; break; } } static void handle_7220_errors(struct qib_devdata *dd, u64 errs) { char *msg; u64 ignore_this_time = 0; u64 iserr = 0; int log_idx; struct qib_pportdata *ppd = dd->pport; u64 mask; /* don't report errors that are masked */ errs &= dd->cspec->errormask; msg = dd->cspec->emsgbuf; /* do these first, they are most important */ if (errs & ERR_MASK(HardwareErr)) qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); else for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) if (errs & dd->eep_st_masks[log_idx].errs_to_log) qib_inc_eeprom_err(dd, log_idx, 1); if (errs & QLOGIC_IB_E_SDMAERRS) sdma_7220_errors(ppd, errs); if (errs & ~IB_E_BITSEXTANT) qib_dev_err(dd, "error interrupt with unknown errors %llx set\n", (unsigned long long) (errs & ~IB_E_BITSEXTANT)); if (errs & E_SUM_ERRS) { qib_disarm_7220_senderrbufs(ppd); if ((errs & E_SUM_LINK_PKTERRS) && !(ppd->lflags & QIBL_LINKACTIVE)) { /* * This can happen when trying to bring the link * up, but the IB link changes state at the "wrong" * time. The IB logic then complains that the packet * isn't valid. We don't want to confuse people, so * we just don't print them, except at debug */ ignore_this_time = errs & E_SUM_LINK_PKTERRS; } } else if ((errs & E_SUM_LINK_PKTERRS) && !(ppd->lflags & QIBL_LINKACTIVE)) { /* * This can happen when SMA is trying to bring the link * up, but the IB link changes state at the "wrong" time. * The IB logic then complains that the packet isn't * valid. We don't want to confuse people, so we just * don't print them, except at debug */ ignore_this_time = errs & E_SUM_LINK_PKTERRS; } qib_write_kreg(dd, kr_errclear, errs); errs &= ~ignore_this_time; if (!errs) goto done; /* * The ones we mask off are handled specially below * or above. Also mask SDMADISABLED by default as it * is too chatty. */ mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr); qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); if (errs & E_SUM_PKTERRS) qib_stats.sps_rcverrs++; if (errs & E_SUM_ERRS) qib_stats.sps_txerrs++; iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS | ERR_MASK(SDmaDisabledErr)); if (errs & ERR_MASK(IBStatusChanged)) { u64 ibcs; ibcs = qib_read_kreg64(dd, kr_ibcstatus); if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) handle_7220_chase(ppd, ibcs); /* Update our picture of width and speed from chip */ ppd->link_width_active = ((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ? IB_WIDTH_4X : IB_WIDTH_1X; ppd->link_speed_active = ((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ? QIB_IB_DDR : QIB_IB_SDR; /* * Since going into a recovery state causes the link state * to go down and since recovery is transitory, it is better * if we "miss" ever seeing the link training state go into * recovery (i.e., ignore this transition for link state * special handling purposes) without updating lastibcstat. */ if (qib_7220_phys_portstate(ibcs) != IB_PHYSPORTSTATE_LINK_ERR_RECOVER) qib_handle_e_ibstatuschanged(ppd, ibcs); } if (errs & ERR_MASK(ResetNegated)) { qib_dev_err(dd, "Got reset, requires re-init (unload and reload driver)\n"); dd->flags &= ~QIB_INITTED; /* needs re-init */ /* mark as having had error */ *dd->devstatusp |= QIB_STATUS_HWERROR; *dd->pport->statusp &= ~QIB_STATUS_IB_CONF; } if (*msg && iserr) qib_dev_porterr(dd, ppd->port, "%s error\n", msg); if (ppd->state_wanted & ppd->lflags) wake_up_interruptible(&ppd->state_wait); /* * If there were hdrq or egrfull errors, wake up any processes * waiting in poll. We used to try to check which contexts had * the overflow, but given the cost of that and the chip reads * to support it, it's better to just wake everybody up if we * get an overflow; waiters can poll again if it's not them. */ if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) { qib_handle_urcv(dd, ~0U); if (errs & ERR_MASK(RcvEgrFullErr)) qib_stats.sps_buffull++; else qib_stats.sps_hdrfull++; } done: return; } /* enable/disable chip from delivering interrupts */ static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable) { if (enable) { if (dd->flags & QIB_BADINTR) return; qib_write_kreg(dd, kr_intmask, ~0ULL); /* force re-interrupt of any pending interrupts. */ qib_write_kreg(dd, kr_intclear, 0ULL); } else qib_write_kreg(dd, kr_intmask, 0ULL); } /* * Try to cleanup as much as possible for anything that might have gone * wrong while in freeze mode, such as pio buffers being written by user * processes (causing armlaunch), send errors due to going into freeze mode, * etc., and try to avoid causing extra interrupts while doing so. * Forcibly update the in-memory pioavail register copies after cleanup * because the chip won't do it while in freeze mode (the register values * themselves are kept correct). * Make sure that we don't lose any important interrupts by using the chip * feature that says that writing 0 to a bit in *clear that is set in * *status will cause an interrupt to be generated again (if allowed by * the *mask value). * This is in chip-specific code because of all of the register accesses, * even though the details are similar on most chips. */ static void qib_7220_clear_freeze(struct qib_devdata *dd) { /* disable error interrupts, to avoid confusion */ qib_write_kreg(dd, kr_errmask, 0ULL); /* also disable interrupts; errormask is sometimes overwriten */ qib_7220_set_intr_state(dd, 0); qib_cancel_sends(dd->pport); /* clear the freeze, and be sure chip saw it */ qib_write_kreg(dd, kr_control, dd->control); qib_read_kreg32(dd, kr_scratch); /* force in-memory update now we are out of freeze */ qib_force_pio_avail_update(dd); /* * force new interrupt if any hwerr, error or interrupt bits are * still set, and clear "safe" send packet errors related to freeze * and cancelling sends. Re-enable error interrupts before possible * force of re-interrupt on pending interrupts. */ qib_write_kreg(dd, kr_hwerrclear, 0ULL); qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE); qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); qib_7220_set_intr_state(dd, 1); } /** * qib_7220_handle_hwerrors - display hardware errors. * @dd: the qlogic_ib device * @msg: the output buffer * @msgl: the size of the output buffer * * Use same msg buffer as regular errors to avoid excessive stack * use. Most hardware errors are catastrophic, but for right now, * we'll print them and continue. We reuse the same message buffer as * handle_7220_errors() to avoid excessive stack usage. */ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, size_t msgl) { u64 hwerrs; u32 bits, ctrl; int isfatal = 0; char *bitsmsg; int log_idx; hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); if (!hwerrs) goto bail; if (hwerrs == ~0ULL) { qib_dev_err(dd, "Read of hardware error status failed (all bits set); ignoring\n"); goto bail; } qib_stats.sps_hwerrs++; /* * Always clear the error status register, except MEMBISTFAIL, * regardless of whether we continue or stop using the chip. * We want that set so we know it failed, even across driver reload. * We'll still ignore it in the hwerrmask. We do this partly for * diagnostics, but also for support. */ qib_write_kreg(dd, kr_hwerrclear, hwerrs & ~HWE_MASK(PowerOnBISTFailed)); hwerrs &= dd->cspec->hwerrmask; /* We log some errors to EEPROM, check if we have any of those. */ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log) qib_inc_eeprom_err(dd, log_idx, 1); if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC | RXE_PARITY)) qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx (cleared)\n", (unsigned long long) hwerrs); if (hwerrs & ~IB_HWE_BITSEXTANT) qib_dev_err(dd, "hwerror interrupt with unknown errors %llx set\n", (unsigned long long) (hwerrs & ~IB_HWE_BITSEXTANT)); if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) qib_sd7220_clr_ibpar(dd); ctrl = qib_read_kreg32(dd, kr_control); if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) { /* * Parity errors in send memory are recoverable by h/w * just do housekeeping, exit freeze mode and continue. */ if (hwerrs & (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC)) { qib_7220_txe_recover(dd); hwerrs &= ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC); } if (hwerrs) isfatal = 1; else qib_7220_clear_freeze(dd); } *msg = '\0'; if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { isfatal = 1; strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]", msgl); /* ignore from now on, so disable until driver reloaded */ dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); } qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs, ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl); bitsmsg = dd->cspec->bitsmsgbuf; if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) { bits = (u32) ((hwerrs >> QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, "[PCIe Mem Parity Errs %x] ", bits); strlcat(msg, bitsmsg, msgl); } #define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \ QLOGIC_IB_HWE_COREPLL_RFSLIP) if (hwerrs & _QIB_PLL_FAIL) { isfatal = 1; snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, "[PLL failed (%llx), InfiniPath hardware unusable]", (unsigned long long) hwerrs & _QIB_PLL_FAIL); strlcat(msg, bitsmsg, msgl); /* ignore from now on, so disable until driver reloaded */ dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); } if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) { /* * If it occurs, it is left masked since the eternal * interface is unused. */ dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED; qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); } qib_dev_err(dd, "%s hardware error\n", msg); if (isfatal && !dd->diag_client) { qib_dev_err(dd, "Fatal Hardware Error, no longer usable, SN %.16s\n", dd->serial); /* * For /sys status file and user programs to print; if no * trailing brace is copied, we'll know it was truncated. */ if (dd->freezemsg) snprintf(dd->freezemsg, dd->freezelen, "{%s}", msg); qib_disable_after_error(dd); } bail:; } /** * qib_7220_init_hwerrors - enable hardware errors * @dd: the qlogic_ib device * * now that we have finished initializing everything that might reasonably * cause a hardware error, and cleared those errors bits as they occur, * we can enable hardware errors in the mask (potentially enabling * freeze mode), and enable hardware errors as errors (along with * everything else) in errormask */ static void qib_7220_init_hwerrors(struct qib_devdata *dd) { u64 val; u64 extsval; extsval = qib_read_kreg64(dd, kr_extstatus); if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST | QLOGIC_IB_EXTS_MEMBIST_DISABLED))) qib_dev_err(dd, "MemBIST did not complete!\n"); if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED) qib_devinfo(dd->pcidev, "MemBIST is disabled.\n"); val = ~0ULL; /* default to all hwerrors become interrupts, */ val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR; dd->cspec->hwerrmask = val; qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed)); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); /* clear all */ qib_write_kreg(dd, kr_errclear, ~0ULL); /* enable errors that are masked, at least this first time. */ qib_write_kreg(dd, kr_errmask, ~0ULL); dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask); /* clear any interrupts up to this point (ints still not enabled) */ qib_write_kreg(dd, kr_intclear, ~0ULL); } /* * Disable and enable the armlaunch error. Used for PIO bandwidth testing * on chips that are count-based, rather than trigger-based. There is no * reference counting, but that's also fine, given the intended use. * Only chip-specific because it's all register accesses */ static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable) { if (enable) { qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr)); dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr); } else dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr); qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); } /* * Formerly took parameter <which> in pre-shifted, * pre-merged form with LinkCmd and LinkInitCmd * together, and assuming the zero was NOP. */ static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd, u16 linitcmd) { u64 mod_wd; struct qib_devdata *dd = ppd->dd; unsigned long flags; if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) { /* * If we are told to disable, note that so link-recovery * code does not attempt to bring us back up. */ spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags |= QIBL_IB_LINK_DISABLED; spin_unlock_irqrestore(&ppd->lflags_lock, flags); } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) { /* * Any other linkinitcmd will lead to LINKDOWN and then * to INIT (if all is well), so clear flag to let * link-recovery code attempt to bring us back up. */ spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_IB_LINK_DISABLED; spin_unlock_irqrestore(&ppd->lflags_lock, flags); } mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) | (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd); /* write to chip to prevent back-to-back writes of ibc reg */ qib_write_kreg(dd, kr_scratch, 0); } /* * All detailed interaction with the SerDes has been moved to qib_sd7220.c * * The portion of IBA7220-specific bringup_serdes() that actually deals with * registers and memory within the SerDes itself is qib_sd7220_init(). */ /** * qib_7220_bringup_serdes - bring up the serdes * @ppd: physical port on the qlogic_ib device */ static int qib_7220_bringup_serdes(struct qib_pportdata *ppd) { struct qib_devdata *dd = ppd->dd; u64 val, prev_val, guid, ibc; int ret = 0; /* Put IBC in reset, sends disabled */ dd->control &= ~QLOGIC_IB_C_LINKENABLE; qib_write_kreg(dd, kr_control, 0ULL); if (qib_compat_ddr_negotiate) { ppd->cpspec->ibdeltainprog = 1; ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr); ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd, cr_iblinkerrrecov); } /* flowcontrolwatermark is in units of KBytes */ ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark); /* * How often flowctrl sent. More or less in usecs; balance against * watermark value, so that in theory senders always get a flow * control update in time to not let the IB link go idle. */ ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod); /* max error tolerance */ ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold); /* use "real" buffer space for */ ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale); /* IB credit flow control. */ ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold); /* * set initial max size pkt IBC will send, including ICRC; it's the * PIO buffer size in dwords, less 1; also see qib_set_mtu() */ ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen); ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */ /* initially come up waiting for TS1, without sending anything. */ val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); qib_write_kreg(dd, kr_ibcctrl, val); if (!ppd->cpspec->ibcddrctrl) { /* not on re-init after reset */ ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl); if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR)) ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK | IBA7220_IBC_IBTA_1_2_MASK; else ppd->cpspec->ibcddrctrl |= ppd->link_speed_enabled == QIB_IB_DDR ? IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR; if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X)) ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG; else ppd->cpspec->ibcddrctrl |= ppd->link_width_enabled == IB_WIDTH_4X ? IBA7220_IBC_WIDTH_4X_ONLY : IBA7220_IBC_WIDTH_1X_ONLY; /* always enable these on driver reload, not sticky */ ppd->cpspec->ibcddrctrl |= IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT; ppd->cpspec->ibcddrctrl |= IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT; /* enable automatic lane reversal detection for receive */ ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED; } else /* write to chip to prevent back-to-back writes of ibc reg */ qib_write_kreg(dd, kr_scratch, 0); qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl); qib_write_kreg(dd, kr_scratch, 0); qib_write_kreg(dd, kr_ncmodectrl, 0Ull); qib_write_kreg(dd, kr_scratch, 0); ret = qib_sd7220_init(dd); val = qib_read_kreg64(dd, kr_xgxs_cfg); prev_val = val; val |= QLOGIC_IB_XGXS_FC_SAFE; if (val != prev_val) { qib_write_kreg(dd, kr_xgxs_cfg, val); qib_read_kreg32(dd, kr_scratch); } if (val & QLOGIC_IB_XGXS_RESET) val &= ~QLOGIC_IB_XGXS_RESET; if (val != prev_val) qib_write_kreg(dd, kr_xgxs_cfg, val); /* first time through, set port guid */ if (!ppd->guid) ppd->guid = dd->base_guid; guid = be64_to_cpu(ppd->guid); qib_write_kreg(dd, kr_hrtbt_guid, guid); if (!ret) { dd->control |= QLOGIC_IB_C_LINKENABLE; qib_write_kreg(dd, kr_control, dd->control); } else /* write to chip to prevent back-to-back writes of ibc reg */ qib_write_kreg(dd, kr_scratch, 0); return ret; } /** * qib_7220_quiet_serdes - set serdes to txidle * @ppd: physical port of the qlogic_ib device * Called when driver is being unloaded */ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd) { u64 val; struct qib_devdata *dd = ppd->dd; unsigned long flags; /* disable IBC */ dd->control &= ~QLOGIC_IB_C_LINKENABLE; qib_write_kreg(dd, kr_control, dd->control | QLOGIC_IB_C_FREEZEMODE); ppd->cpspec->chase_end = 0; if (ppd->cpspec->chase_timer.data) /* if initted */ del_timer_sync(&ppd->cpspec->chase_timer); if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) { u64 diagc; /* enable counter writes */ diagc = qib_read_kreg64(dd, kr_hwdiagctrl); qib_write_kreg(dd, kr_hwdiagctrl, diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable)); if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) { val = read_7220_creg32(dd, cr_ibsymbolerr); if (ppd->cpspec->ibdeltainprog) val -= val - ppd->cpspec->ibsymsnap; val -= ppd->cpspec->ibsymdelta; write_7220_creg(dd, cr_ibsymbolerr, val); } if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) { val = read_7220_creg32(dd, cr_iblinkerrrecov); if (ppd->cpspec->ibdeltainprog) val -= val - ppd->cpspec->iblnkerrsnap; val -= ppd->cpspec->iblnkerrdelta; write_7220_creg(dd, cr_iblinkerrrecov, val); } /* and disable counter writes */ qib_write_kreg(dd, kr_hwdiagctrl, diagc); } qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; spin_unlock_irqrestore(&ppd->lflags_lock, flags); wake_up(&ppd->cpspec->autoneg_wait); cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); shutdown_7220_relock_poll(ppd->dd); val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); val |= QLOGIC_IB_XGXS_RESET; qib_write_kreg(ppd->dd, kr_xgxs_cfg, val); } /** * qib_setup_7220_setextled - set the state of the two external LEDs * @dd: the qlogic_ib device * @on: whether the link is up or not * * The exact combo of LEDs if on is true is determined by looking * at the ibcstatus. * * These LEDs indicate the physical and logical state of IB link. * For this chip (at least with recommended board pinouts), LED1 * is Yellow (logical state) and LED2 is Green (physical state), * * Note: We try to match the Mellanox HCA LED behavior as best * we can. Green indicates physical link state is OK (something is * plugged in, and we can train). * Amber indicates the link is logically up (ACTIVE). * Mellanox further blinks the amber LED to indicate data packet * activity, but we have no hardware support for that, so it would * require waking up every 10-20 msecs and checking the counters * on the chip, and then turning the LED off if appropriate. That's * visible overhead, so not something we will do. * */ static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on) { struct qib_devdata *dd = ppd->dd; u64 extctl, ledblink = 0, val, lst, ltst; unsigned long flags; /* * The diags use the LED to indicate diag info, so we leave * the external LED alone when the diags are running. */ if (dd->diag_client) return; if (ppd->led_override) { ltst = (ppd->led_override & QIB_LED_PHYS) ? IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED, lst = (ppd->led_override & QIB_LED_LOG) ? IB_PORT_ACTIVE : IB_PORT_DOWN; } else if (on) { val = qib_read_kreg64(dd, kr_ibcstatus); ltst = qib_7220_phys_portstate(val); lst = qib_7220_iblink_state(val); } else { ltst = 0; lst = 0; } spin_lock_irqsave(&dd->cspec->gpio_lock, flags); extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) | SYM_MASK(EXTCtrl, LEDPriPortYellowOn)); if (ltst == IB_PHYSPORTSTATE_LINKUP) { extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn); /* * counts are in chip clock (4ns) periods. * This is 1/16 sec (66.6ms) on, * 3/16 sec (187.5 ms) off, with packets rcvd */ ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT) | ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT); } if (lst == IB_PORT_ACTIVE) extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn); dd->cspec->extctrl = extctl; qib_write_kreg(dd, kr_extctrl, extctl); spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); if (ledblink) /* blink the LED on packet receive */ qib_write_kreg(dd, kr_rcvpktledcnt, ledblink); } static void qib_7220_free_irq(struct qib_devdata *dd) { if (dd->cspec->irq) { free_irq(dd->cspec->irq, dd); dd->cspec->irq = 0; } qib_nomsi(dd); } /* * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff * @dd: the qlogic_ib device * * This is called during driver unload. * */ static void qib_setup_7220_cleanup(struct qib_devdata *dd) { qib_7220_free_irq(dd); kfree(dd->cspec->cntrs); kfree(dd->cspec->portcntrs); } /* * This is only called for SDmaInt. * SDmaDisabled is handled on the error path. */ static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat) { unsigned long flags; spin_lock_irqsave(&ppd->sdma_lock, flags); switch (ppd->sdma_state.current_state) { case qib_sdma_state_s00_hw_down: break; case qib_sdma_state_s10_hw_start_up_wait: __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started); break; case qib_sdma_state_s20_idle: break; case qib_sdma_state_s30_sw_clean_up_wait: break; case qib_sdma_state_s40_hw_clean_up_wait: break; case qib_sdma_state_s50_hw_halt_wait: __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted); break; case qib_sdma_state_s99_running: /* too chatty to print here */ __qib_sdma_intr(ppd); break; } spin_unlock_irqrestore(&ppd->sdma_lock, flags); } static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint) { unsigned long flags; spin_lock_irqsave(&dd->sendctrl_lock, flags); if (needint) { if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) goto done; /* * blip the availupd off, next write will be on, so * we ensure an avail update, regardless of threshold or * buffers becoming free, whenever we want an interrupt */ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl & ~SYM_MASK(SendCtrl, SendBufAvailUpd)); qib_write_kreg(dd, kr_scratch, 0ULL); dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail); } else dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail); qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); qib_write_kreg(dd, kr_scratch, 0ULL); done: spin_unlock_irqrestore(&dd->sendctrl_lock, flags); } /* * Handle errors and unusual events first, separate function * to improve cache hits for fast path interrupt handling. */ static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat) { if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT)) qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n", istat & ~QLOGIC_IB_I_BITSEXTANT); if (istat & QLOGIC_IB_I_GPIO) { u32 gpiostatus; /* * Boards for this chip currently don't use GPIO interrupts, * so clear by writing GPIOstatus to GPIOclear, and complain * to alert developer. To avoid endless repeats, clear * the bits in the mask, since there is some kind of * programming error or chip problem. */ gpiostatus = qib_read_kreg32(dd, kr_gpio_status); /* * In theory, writing GPIOstatus to GPIOclear could * have a bad side-effect on some diagnostic that wanted * to poll for a status-change, but the various shadows * make that problematic at best. Diags will just suppress * all GPIO interrupts during such tests. */ qib_write_kreg(dd, kr_gpio_clear, gpiostatus); if (gpiostatus) { const u32 mask = qib_read_kreg32(dd, kr_gpio_mask); u32 gpio_irq = mask & gpiostatus; /* * A bit set in status and (chip) Mask register * would cause an interrupt. Since we are not * expecting any, report it. Also check that the * chip reflects our shadow, report issues, * and refresh from the shadow. */ /* * Clear any troublemakers, and update chip * from shadow */ dd->cspec->gpio_mask &= ~gpio_irq; qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); } } if (istat & QLOGIC_IB_I_ERROR) { u64 estat; qib_stats.sps_errints++; estat = qib_read_kreg64(dd, kr_errstatus); if (!estat) qib_devinfo(dd->pcidev, "error interrupt (%Lx), but no error bits set!\n", istat); else handle_7220_errors(dd, estat); } } static irqreturn_t qib_7220intr(int irq, void *data) { struct qib_devdata *dd = data; irqreturn_t ret; u64 istat; u64 ctxtrbits; u64 rmask; unsigned i; if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) { /* * This return value is not great, but we do not want the * interrupt core code to remove our interrupt handler * because we don't appear to be handling an interrupt * during a chip reset. */ ret = IRQ_HANDLED; goto bail; } istat = qib_read_kreg64(dd, kr_intstatus); if (unlikely(!istat)) { ret = IRQ_NONE; /* not our interrupt, or already handled */ goto bail; } if (unlikely(istat == -1)) { qib_bad_intrstatus(dd); /* don't know if it was our interrupt or not */ ret = IRQ_NONE; goto bail; } this_cpu_inc(*dd->int_counter); if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT | QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR))) unlikely_7220_intr(dd, istat); /* * Clear the interrupt bits we found set, relatively early, so we * "know" know the chip will have seen this by the time we process * the queue, and will re-interrupt if necessary. The processor * itself won't take the interrupt again until we return. */ qib_write_kreg(dd, kr_intclear, istat); /* * Handle kernel receive queues before checking for pio buffers * available since receives can overflow; piobuf waiters can afford * a few extra cycles, since they were waiting anyway. */ ctxtrbits = istat & ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) | (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT)); if (ctxtrbits) { rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) | (1ULL << QLOGIC_IB_I_RCVURG_SHIFT); for (i = 0; i < dd->first_user_ctxt; i++) { if (ctxtrbits & rmask) { ctxtrbits &= ~rmask; qib_kreceive(dd->rcd[i], NULL, NULL); } rmask <<= 1; } if (ctxtrbits) { ctxtrbits = (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) | (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT); qib_handle_urcv(dd, ctxtrbits); } } /* only call for SDmaInt */ if (istat & QLOGIC_IB_I_SDMAINT) sdma_7220_intr(dd->pport, istat); if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED)) qib_ib_piobufavail(dd); ret = IRQ_HANDLED; bail: return ret; } /* * Set up our chip-specific interrupt handler. * The interrupt type has already been setup, so * we just need to do the registration and error checking. * If we are using MSI interrupts, we may fall back to * INTx later, if the interrupt handler doesn't get called * within 1/2 second (see verify_interrupt()). */ static void qib_setup_7220_interrupt(struct qib_devdata *dd) { if (!dd->cspec->irq) qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't work\n"); else { int ret = request_irq(dd->cspec->irq, qib_7220intr, dd->msi_lo ? 0 : IRQF_SHARED, QIB_DRV_NAME, dd); if (ret) qib_dev_err(dd, "Couldn't setup %s interrupt (irq=%d): %d\n", dd->msi_lo ? "MSI" : "INTx", dd->cspec->irq, ret); } } /** * qib_7220_boardname - fill in the board name * @dd: the qlogic_ib device * * info is based on the board revision register */ static void qib_7220_boardname(struct qib_devdata *dd) { char *n; u32 boardid, namelen; boardid = SYM_FIELD(dd->revision, Revision, BoardID); switch (boardid) { case 1: n = "InfiniPath_QLE7240"; break; case 2: n = "InfiniPath_QLE7280"; break; default: qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid); n = "Unknown_InfiniPath_7220"; break; } namelen = strlen(n) + 1; dd->boardname = kmalloc(namelen, GFP_KERNEL); if (!dd->boardname) qib_dev_err(dd, "Failed allocation for board name: %s\n", n); else snprintf(dd->boardname, namelen, "%s", n); if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2) qib_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n", dd->majrev, dd->minrev); snprintf(dd->boardversion, sizeof(dd->boardversion), "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname, (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch), dd->majrev, dd->minrev, (unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); } /* * This routine sleeps, so it can only be called from user context, not * from interrupt context. */ static int qib_setup_7220_reset(struct qib_devdata *dd) { u64 val; int i; int ret; u16 cmdval; u8 int_line, clinesz; unsigned long flags; qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz); /* Use dev_err so it shows up in logs, etc. */ qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit); /* no interrupts till re-initted */ qib_7220_set_intr_state(dd, 0); dd->pport->cpspec->ibdeltainprog = 0; dd->pport->cpspec->ibsymdelta = 0; dd->pport->cpspec->iblnkerrdelta = 0; /* * Keep chip from being accessed until we are ready. Use * writeq() directly, to allow the write even though QIB_PRESENT * isn't set. */ dd->flags &= ~(QIB_INITTED | QIB_PRESENT); /* so we check interrupts work again */ dd->z_int_counter = qib_int_counter(dd); val = dd->control | QLOGIC_IB_C_RESET; writeq(val, &dd->kregbase[kr_control]); mb(); /* prevent compiler reordering around actual reset */ for (i = 1; i <= 5; i++) { /* * Allow MBIST, etc. to complete; longer on each retry. * We sometimes get machine checks from bus timeout if no * response, so for now, make it *really* long. */ msleep(1000 + (1 + i) * 2000); qib_pcie_reenable(dd, cmdval, int_line, clinesz); /* * Use readq directly, so we don't need to mark it as PRESENT * until we get a successful indication that all is well. */ val = readq(&dd->kregbase[kr_revision]); if (val == dd->revision) { dd->flags |= QIB_PRESENT; /* it's back */ ret = qib_reinit_intr(dd); goto bail; } } ret = 0; /* failed */ bail: if (ret) { if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; continuing anyway\n"); /* hold IBC in reset, no sends, etc till later */ qib_write_kreg(dd, kr_control, 0ULL); /* clear the reset error, init error/hwerror mask */ qib_7220_init_hwerrors(dd); /* do setup similar to speed or link-width changes */ if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) dd->cspec->presets_needed = 1; spin_lock_irqsave(&dd->pport->lflags_lock, flags); dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY; dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED; spin_unlock_irqrestore(&dd->pport->lflags_lock, flags); } return ret; } /** * qib_7220_put_tid - write a TID to the chip * @dd: the qlogic_ib device * @tidptr: pointer to the expected TID (in chip) to update * @tidtype: 0 for eager, 1 for expected * @pa: physical address of in memory buffer; tidinvalid if freeing */ static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, u32 type, unsigned long pa) { if (pa != dd->tidinvalid) { u64 chippa = pa >> IBA7220_TID_PA_SHIFT; /* paranoia checks */ if (pa != (chippa << IBA7220_TID_PA_SHIFT)) { qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", pa); return; } if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) { qib_dev_err(dd, "Physical page address 0x%lx larger than supported\n", pa); return; } if (type == RCVHQ_RCV_TYPE_EAGER) chippa |= dd->tidtemplate; else /* for now, always full 4KB page */ chippa |= IBA7220_TID_SZ_4K; pa = chippa; } writeq(pa, tidptr); mmiowb(); } /** * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager * @dd: the qlogic_ib device * @ctxt: the ctxt * * clear all TID entries for a ctxt, expected and eager. * Used from qib_close(). On this chip, TIDs are only 32 bits, * not 64, but they are still on 64 bit boundaries, so tidbase * is declared as u64 * for the pointer math, even though we write 32 bits */ static void qib_7220_clear_tids(struct qib_devdata *dd, struct qib_ctxtdata *rcd) { u64 __iomem *tidbase; unsigned long tidinv; u32 ctxt; int i; if (!dd->kregbase || !rcd) return; ctxt = rcd->ctxt; tidinv = dd->tidinvalid; tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) + dd->rcvtidbase + ctxt * dd->rcvtidcnt * sizeof(*tidbase)); for (i = 0; i < dd->rcvtidcnt; i++) qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, tidinv); tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) + dd->rcvegrbase + rcd->rcvegr_tid_base * sizeof(*tidbase)); for (i = 0; i < rcd->rcvegrcnt; i++) qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, tidinv); } /** * qib_7220_tidtemplate - setup constants for TID updates * @dd: the qlogic_ib device * * We setup stuff that we use a lot, to avoid calculating each time */ static void qib_7220_tidtemplate(struct qib_devdata *dd) { if (dd->rcvegrbufsize == 2048) dd->tidtemplate = IBA7220_TID_SZ_2K; else if (dd->rcvegrbufsize == 4096) dd->tidtemplate = IBA7220_TID_SZ_4K; dd->tidinvalid = 0; } /** * qib_init_7220_get_base_info - set chip-specific flags for user code * @rcd: the qlogic_ib ctxt * @kbase: qib_base_info pointer * * We set the PCIE flag because the lower bandwidth on PCIe vs * HyperTransport can affect some user packet algorithims. */ static int qib_7220_get_base_info(struct qib_ctxtdata *rcd, struct qib_base_info *kinfo) { kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA; if (rcd->dd->flags & QIB_USE_SPCL_TRIG) kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER; return 0; } static struct qib_message_header * qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr) { u32 offset = qib_hdrget_offset(rhf_addr); return (struct qib_message_header *) (rhf_addr - dd->rhf_offset + offset); } static void qib_7220_config_ctxts(struct qib_devdata *dd) { unsigned long flags; u32 nchipctxts; nchipctxts = qib_read_kreg32(dd, kr_portcnt); dd->cspec->numctxts = nchipctxts; if (qib_n_krcv_queues > 1) { dd->qpn_mask = 0x3e; dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports; if (dd->first_user_ctxt > nchipctxts) dd->first_user_ctxt = nchipctxts; } else dd->first_user_ctxt = dd->num_pports; dd->n_krcv_queues = dd->first_user_ctxt; if (!qib_cfgctxts) { int nctxts = dd->first_user_ctxt + num_online_cpus(); if (nctxts <= 5) dd->ctxtcnt = 5; else if (nctxts <= 9) dd->ctxtcnt = 9; else if (nctxts <= nchipctxts) dd->ctxtcnt = nchipctxts; } else if (qib_cfgctxts <= nchipctxts) dd->ctxtcnt = qib_cfgctxts; if (!dd->ctxtcnt) /* none of the above, set to max */ dd->ctxtcnt = nchipctxts; /* * Chip can be configured for 5, 9, or 17 ctxts, and choice * affects number of eager TIDs per ctxt (1K, 2K, 4K). * Lock to be paranoid about later motion, etc. */ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); if (dd->ctxtcnt > 9) dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT; else if (dd->ctxtcnt > 5) dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT; /* else configure for default 5 receive ctxts */ if (dd->qpn_mask) dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB; qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); /* kr_rcvegrcnt changes based on the number of contexts enabled */ dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT); } static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which) { int lsb, ret = 0; u64 maskr; /* right-justified mask */ switch (which) { case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */ ret = ppd->link_width_enabled; goto done; case QIB_IB_CFG_LWID: /* Get currently active Link-width */ ret = ppd->link_width_active; goto done; case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */ ret = ppd->link_speed_enabled; goto done; case QIB_IB_CFG_SPD: /* Get current Link spd */ ret = ppd->link_speed_active; goto done; case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */ lsb = IBA7220_IBC_RXPOL_SHIFT; maskr = IBA7220_IBC_RXPOL_MASK; break; case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */ lsb = IBA7220_IBC_LREV_SHIFT; maskr = IBA7220_IBC_LREV_MASK; break; case QIB_IB_CFG_LINKLATENCY: ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus) & IBA7220_DDRSTAT_LINKLAT_MASK; goto done; case QIB_IB_CFG_OP_VLS: ret = ppd->vls_operational; goto done; case QIB_IB_CFG_VL_HIGH_CAP: ret = 0; goto done; case QIB_IB_CFG_VL_LOW_CAP: ret = 0; goto done; case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, OverrunThreshold); goto done; case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, PhyerrThreshold); goto done; case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ /* will only take effect when the link state changes */ ret = (ppd->cpspec->ibcctrl & SYM_MASK(IBCCtrl, LinkDownDefaultState)) ? IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL; goto done; case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */ lsb = IBA7220_IBC_HRTBT_SHIFT; maskr = IBA7220_IBC_HRTBT_MASK; break; case QIB_IB_CFG_PMA_TICKS: /* * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs * Since the clock is always 250MHz, the value is 1 or 0. */ ret = (ppd->link_speed_active == QIB_IB_DDR); goto done; default: ret = -EINVAL; goto done; } ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr); done: return ret; } static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val) { struct qib_devdata *dd = ppd->dd; u64 maskr; /* right-justified mask */ int lsb, ret = 0, setforce = 0; u16 lcmd, licmd; unsigned long flags; u32 tmp = 0; switch (which) { case QIB_IB_CFG_LIDLMC: /* * Set LID and LMC. Combined to avoid possible hazard * caller puts LMC in 16MSbits, DLID in 16LSbits of val */ lsb = IBA7220_IBC_DLIDLMC_SHIFT; maskr = IBA7220_IBC_DLIDLMC_MASK; break; case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */ /* * As with speed, only write the actual register if * the link is currently down, otherwise takes effect * on next link change. */ ppd->link_width_enabled = val; if (!(ppd->lflags & QIBL_LINKDOWN)) goto bail; /* * We set the QIBL_IB_FORCE_NOTIFY bit so updown * will get called because we want update * link_width_active, and the change may not take * effect for some time (if we are in POLL), so this * flag will force the updown routine to be called * on the next ibstatuschange down interrupt, even * if it's not an down->up transition. */ val--; /* convert from IB to chip */ maskr = IBA7220_IBC_WIDTH_MASK; lsb = IBA7220_IBC_WIDTH_SHIFT; setforce = 1; break; case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */ /* * If we turn off IB1.2, need to preset SerDes defaults, * but not right now. Set a flag for the next time * we command the link down. As with width, only write the * actual register if the link is currently down, otherwise * takes effect on next link change. Since setting is being * explicitly requested (via MAD or sysfs), clear autoneg * failure status if speed autoneg is enabled. */ ppd->link_speed_enabled = val; if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) && !(val & (val - 1))) dd->cspec->presets_needed = 1; if (!(ppd->lflags & QIBL_LINKDOWN)) goto bail; /* * We set the QIBL_IB_FORCE_NOTIFY bit so updown * will get called because we want update * link_speed_active, and the change may not take * effect for some time (if we are in POLL), so this * flag will force the updown routine to be called * on the next ibstatuschange down interrupt, even * if it's not an down->up transition. */ if (val == (QIB_IB_SDR | QIB_IB_DDR)) { val = IBA7220_IBC_SPEED_AUTONEG_MASK | IBA7220_IBC_IBTA_1_2_MASK; spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; spin_unlock_irqrestore(&ppd->lflags_lock, flags); } else val = val == QIB_IB_DDR ? IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR; maskr = IBA7220_IBC_SPEED_AUTONEG_MASK | IBA7220_IBC_IBTA_1_2_MASK; /* IBTA 1.2 mode + speed bits are contiguous */ lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE); setforce = 1; break; case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */ lsb = IBA7220_IBC_RXPOL_SHIFT; maskr = IBA7220_IBC_RXPOL_MASK; break; case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */ lsb = IBA7220_IBC_LREV_SHIFT; maskr = IBA7220_IBC_LREV_MASK; break; case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, OverrunThreshold); if (maskr != val) { ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, OverrunThreshold); ppd->cpspec->ibcctrl |= (u64) val << SYM_LSB(IBCCtrl, OverrunThreshold); qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); qib_write_kreg(dd, kr_scratch, 0); } goto bail; case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, PhyerrThreshold); if (maskr != val) { ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, PhyerrThreshold); ppd->cpspec->ibcctrl |= (u64) val << SYM_LSB(IBCCtrl, PhyerrThreshold); qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); qib_write_kreg(dd, kr_scratch, 0); } goto bail; case QIB_IB_CFG_PKEYS: /* update pkeys */ maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) | ((u64) ppd->pkeys[2] << 32) | ((u64) ppd->pkeys[3] << 48); qib_write_kreg(dd, kr_partitionkey, maskr); goto bail; case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ /* will only take effect when the link state changes */ if (val == IB_LINKINITCMD_POLL) ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, LinkDownDefaultState); else /* SLEEP */ ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, LinkDownDefaultState); qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); qib_write_kreg(dd, kr_scratch, 0); goto bail; case QIB_IB_CFG_MTU: /* update the MTU in IBC */ /* * Update our housekeeping variables, and set IBC max * size, same as init code; max IBC is max we allow in * buffer, less the qword pbc, plus 1 for ICRC, in dwords * Set even if it's unchanged, print debug message only * on changes. */ val = (ppd->ibmaxlen >> 2) + 1; ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen); ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen); qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); qib_write_kreg(dd, kr_scratch, 0); goto bail; case QIB_IB_CFG_LSTATE: /* set the IB link state */ switch (val & 0xffff0000) { case IB_LINKCMD_DOWN: lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN; if (!ppd->cpspec->ibdeltainprog && qib_compat_ddr_negotiate) { ppd->cpspec->ibdeltainprog = 1; ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr); ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd, cr_iblinkerrrecov); } break; case IB_LINKCMD_ARMED: lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED; break; case IB_LINKCMD_ACTIVE: lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE; break; default: ret = -EINVAL; qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16); goto bail; } switch (val & 0xffff) { case IB_LINKINITCMD_NOP: licmd = 0; break; case IB_LINKINITCMD_POLL: licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL; break; case IB_LINKINITCMD_SLEEP: licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP; break; case IB_LINKINITCMD_DISABLE: licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE; ppd->cpspec->chase_end = 0; /* * stop state chase counter and timer, if running. * wait forpending timer, but don't clear .data (ppd)! */ if (ppd->cpspec->chase_timer.expires) { del_timer_sync(&ppd->cpspec->chase_timer); ppd->cpspec->chase_timer.expires = 0; } break; default: ret = -EINVAL; qib_dev_err(dd, "bad linkinitcmd req 0x%x\n", val & 0xffff); goto bail; } qib_set_ib_7220_lstate(ppd, lcmd, licmd); maskr = IBA7220_IBC_WIDTH_MASK; lsb = IBA7220_IBC_WIDTH_SHIFT; tmp = (ppd->cpspec->ibcddrctrl >> lsb) & maskr; /* If the width active on the chip does not match the * width in the shadow register, write the new active * width to the chip. * We don't have to worry about speed as the speed is taken * care of by set_7220_ibspeed_fast called by ib_updown. */ if (ppd->link_width_enabled-1 != tmp) { ppd->cpspec->ibcddrctrl &= ~(maskr << lsb); ppd->cpspec->ibcddrctrl |= (((u64)(ppd->link_width_enabled-1) & maskr) << lsb); qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl); qib_write_kreg(dd, kr_scratch, 0); spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags |= QIBL_IB_FORCE_NOTIFY; spin_unlock_irqrestore(&ppd->lflags_lock, flags); } goto bail; case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */ if (val > IBA7220_IBC_HRTBT_MASK) { ret = -EINVAL; goto bail; } lsb = IBA7220_IBC_HRTBT_SHIFT; maskr = IBA7220_IBC_HRTBT_MASK; break; default: ret = -EINVAL; goto bail; } ppd->cpspec->ibcddrctrl &= ~(maskr << lsb); ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb); qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl); qib_write_kreg(dd, kr_scratch, 0); if (setforce) { spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags |= QIBL_IB_FORCE_NOTIFY; spin_unlock_irqrestore(&ppd->lflags_lock, flags); } bail: return ret; } static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what) { int ret = 0; u64 val, ddr; if (!strncmp(what, "ibc", 3)) { ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback); val = 0; /* disable heart beat, so link will come up */ qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", ppd->dd->unit, ppd->port); } else if (!strncmp(what, "off", 3)) { ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback); /* enable heart beat again */ val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT; qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback (normal)\n", ppd->dd->unit, ppd->port); } else ret = -EINVAL; if (!ret) { qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl); ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT); ppd->cpspec->ibcddrctrl = ddr | val; qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl); qib_write_kreg(ppd->dd, kr_scratch, 0); } return ret; } static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, u32 updegr, u32 egrhd, u32 npkts) { if (updegr) qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); mmiowb(); qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); mmiowb(); } static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd) { u32 head, tail; head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); if (rcd->rcvhdrtail_kvaddr) tail = qib_get_rcvhdrtail(rcd); else tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); return head == tail; } /* * Modify the RCVCTRL register in chip-specific way. This * is a function because bit positions and (future) register * location is chip-specifc, but the needed operations are * generic. <op> is a bit-mask because we often want to * do multiple modifications. */ static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op, int ctxt) { struct qib_devdata *dd = ppd->dd; u64 mask, val; unsigned long flags; spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); if (op & QIB_RCVCTRL_TAILUPD_ENB) dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT); if (op & QIB_RCVCTRL_TAILUPD_DIS) dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT); if (op & QIB_RCVCTRL_PKEY_ENB) dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT); if (op & QIB_RCVCTRL_PKEY_DIS) dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT); if (ctxt < 0) mask = (1ULL << dd->ctxtcnt) - 1; else mask = (1ULL << ctxt); if (op & QIB_RCVCTRL_CTXT_ENB) { /* always done for specific ctxt */ dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable)); if (!(dd->flags & QIB_NODMA_RTAIL)) dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT; /* Write these registers before the context is enabled. */ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, dd->rcd[ctxt]->rcvhdrqtailaddr_phys); qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, dd->rcd[ctxt]->rcvhdrq_phys); dd->rcd[ctxt]->seq_cnt = 1; } if (op & QIB_RCVCTRL_CTXT_DIS) dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable)); if (op & QIB_RCVCTRL_INTRAVAIL_ENB) dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT); if (op & QIB_RCVCTRL_INTRAVAIL_DIS) dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT); qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) { /* arm rcv interrupt */ val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) | dd->rhdrhead_intr_off; qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); } if (op & QIB_RCVCTRL_CTXT_ENB) { /* * Init the context registers also; if we were * disabled, tail and head should both be zero * already from the enable, but since we don't * know, we have to do it explicitly. */ val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); dd->rcd[ctxt]->head = val; /* If kctxt, interrupt on next receive. */ if (ctxt < dd->first_user_ctxt) val |= dd->rhdrhead_intr_off; qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); } if (op & QIB_RCVCTRL_CTXT_DIS) { if (ctxt >= 0) { qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0); qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0); } else { unsigned i; for (i = 0; i < dd->cfgctxts; i++) { qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, i, 0); qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0); } } } spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); } /* * Modify the SENDCTRL register in chip-specific way. This * is a function there may be multiple such registers with * slightly different layouts. To start, we assume the * "canonical" register layout of the first chips. * Chip requires no back-back sendctrl writes, so write * scratch register after writing sendctrl */ static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op) { struct qib_devdata *dd = ppd->dd; u64 tmp_dd_sendctrl; unsigned long flags; spin_lock_irqsave(&dd->sendctrl_lock, flags); /* First the ones that are "sticky", saved in shadow */ if (op & QIB_SENDCTRL_CLEAR) dd->sendctrl = 0; if (op & QIB_SENDCTRL_SEND_DIS) dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable); else if (op & QIB_SENDCTRL_SEND_ENB) { dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable); if (dd->flags & QIB_USE_SPCL_TRIG) dd->sendctrl |= SYM_MASK(SendCtrl, SSpecialTriggerEn); } if (op & QIB_SENDCTRL_AVAIL_DIS) dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); else if (op & QIB_SENDCTRL_AVAIL_ENB) dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd); if (op & QIB_SENDCTRL_DISARM_ALL) { u32 i, last; tmp_dd_sendctrl = dd->sendctrl; /* * disarm any that are not yet launched, disabling sends * and updates until done. */ last = dd->piobcnt2k + dd->piobcnt4k; tmp_dd_sendctrl &= ~(SYM_MASK(SendCtrl, SPioEnable) | SYM_MASK(SendCtrl, SendBufAvailUpd)); for (i = 0; i < last; i++) { qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl | SYM_MASK(SendCtrl, Disarm) | i); qib_write_kreg(dd, kr_scratch, 0); } } tmp_dd_sendctrl = dd->sendctrl; if (op & QIB_SENDCTRL_FLUSH) tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort); if (op & QIB_SENDCTRL_DISARM) tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) | ((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) << SYM_LSB(SendCtrl, DisarmPIOBuf)); if ((op & QIB_SENDCTRL_AVAIL_BLIP) && (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl); qib_write_kreg(dd, kr_scratch, 0); if (op & QIB_SENDCTRL_AVAIL_BLIP) { qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); qib_write_kreg(dd, kr_scratch, 0); } spin_unlock_irqrestore(&dd->sendctrl_lock, flags); if (op & QIB_SENDCTRL_FLUSH) { u32 v; /* * ensure writes have hit chip, then do a few * more reads, to allow DMA of pioavail registers * to occur, so in-memory copy is in sync with * the chip. Not always safe to sleep. */ v = qib_read_kreg32(dd, kr_scratch); qib_write_kreg(dd, kr_scratch, v); v = qib_read_kreg32(dd, kr_scratch); qib_write_kreg(dd, kr_scratch, v); qib_read_kreg32(dd, kr_scratch); } } /** * qib_portcntr_7220 - read a per-port counter * @dd: the qlogic_ib device * @creg: the counter to snapshot */ static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg) { u64 ret = 0ULL; struct qib_devdata *dd = ppd->dd; u16 creg; /* 0xffff for unimplemented or synthesized counters */ static const u16 xlator[] = { [QIBPORTCNTR_PKTSEND] = cr_pktsend, [QIBPORTCNTR_WORDSEND] = cr_wordsend, [QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount, [QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount, [QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount, [QIBPORTCNTR_SENDSTALL] = cr_sendstall, [QIBPORTCNTR_PKTRCV] = cr_pktrcv, [QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount, [QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount, [QIBPORTCNTR_RCVEBP] = cr_rcvebp, [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl, [QIBPORTCNTR_WORDRCV] = cr_wordrcv, [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt, [QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr, [QIBPORTCNTR_RXVLERR] = cr_rxvlerr, [QIBPORTCNTR_ERRICRC] = cr_erricrc, [QIBPORTCNTR_ERRVCRC] = cr_errvcrc, [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc, [QIBPORTCNTR_BADFORMAT] = cr_badformat, [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen, [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr, [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen, [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl, [QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl, [QIBPORTCNTR_ERRLINK] = cr_errlink, [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown, [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov, [QIBPORTCNTR_LLI] = cr_locallinkintegrityerr, [QIBPORTCNTR_PSINTERVAL] = cr_psinterval, [QIBPORTCNTR_PSSTART] = cr_psstart, [QIBPORTCNTR_PSSTAT] = cr_psstat, [QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt, [QIBPORTCNTR_ERRPKEY] = cr_errpkey, [QIBPORTCNTR_KHDROVFL] = 0xffff, }; if (reg >= ARRAY_SIZE(xlator)) { qib_devinfo(ppd->dd->pcidev, "Unimplemented portcounter %u\n", reg); goto done; } creg = xlator[reg]; if (reg == QIBPORTCNTR_KHDROVFL) { int i; /* sum over all kernel contexts */ for (i = 0; i < dd->first_user_ctxt; i++) ret += read_7220_creg32(dd, cr_portovfl + i); } if (creg == 0xffff) goto done; /* * only fast incrementing counters are 64bit; use 32 bit reads to * avoid two independent reads when on opteron */ if ((creg == cr_wordsend || creg == cr_wordrcv || creg == cr_pktsend || creg == cr_pktrcv)) ret = read_7220_creg(dd, creg); else ret = read_7220_creg32(dd, creg); if (creg == cr_ibsymbolerr) { if (dd->pport->cpspec->ibdeltainprog) ret -= ret - ppd->cpspec->ibsymsnap; ret -= dd->pport->cpspec->ibsymdelta; } else if (creg == cr_iblinkerrrecov) { if (dd->pport->cpspec->ibdeltainprog) ret -= ret - ppd->cpspec->iblnkerrsnap; ret -= dd->pport->cpspec->iblnkerrdelta; } done: return ret; } /* * Device counter names (not port-specific), one line per stat, * single string. Used by utilities like ipathstats to print the stats * in a way which works for different versions of drivers, without changing * the utility. Names need to be 12 chars or less (w/o newline), for proper * display by utility. * Non-error counters are first. * Start of "error" conters is indicated by a leading "E " on the first * "error" counter, and doesn't count in label length. * The EgrOvfl list needs to be last so we truncate them at the configured * context count for the device. * cntr7220indices contains the corresponding register indices. */ static const char cntr7220names[] = "Interrupts\n" "HostBusStall\n" "E RxTIDFull\n" "RxTIDInvalid\n" "Ctxt0EgrOvfl\n" "Ctxt1EgrOvfl\n" "Ctxt2EgrOvfl\n" "Ctxt3EgrOvfl\n" "Ctxt4EgrOvfl\n" "Ctxt5EgrOvfl\n" "Ctxt6EgrOvfl\n" "Ctxt7EgrOvfl\n" "Ctxt8EgrOvfl\n" "Ctxt9EgrOvfl\n" "Ctx10EgrOvfl\n" "Ctx11EgrOvfl\n" "Ctx12EgrOvfl\n" "Ctx13EgrOvfl\n" "Ctx14EgrOvfl\n" "Ctx15EgrOvfl\n" "Ctx16EgrOvfl\n"; static const size_t cntr7220indices[] = { cr_lbint, cr_lbflowstall, cr_errtidfull, cr_errtidvalid, cr_portovfl + 0, cr_portovfl + 1, cr_portovfl + 2, cr_portovfl + 3, cr_portovfl + 4, cr_portovfl + 5, cr_portovfl + 6, cr_portovfl + 7, cr_portovfl + 8, cr_portovfl + 9, cr_portovfl + 10, cr_portovfl + 11, cr_portovfl + 12, cr_portovfl + 13, cr_portovfl + 14, cr_portovfl + 15, cr_portovfl + 16, }; /* * same as cntr7220names and cntr7220indices, but for port-specific counters. * portcntr7220indices is somewhat complicated by some registers needing * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG */ static const char portcntr7220names[] = "TxPkt\n" "TxFlowPkt\n" "TxWords\n" "RxPkt\n" "RxFlowPkt\n" "RxWords\n" "TxFlowStall\n" "TxDmaDesc\n" /* 7220 and 7322-only */ "E RxDlidFltr\n" /* 7220 and 7322-only */ "IBStatusChng\n" "IBLinkDown\n" "IBLnkRecov\n" "IBRxLinkErr\n" "IBSymbolErr\n" "RxLLIErr\n" "RxBadFormat\n" "RxBadLen\n" "RxBufOvrfl\n" "RxEBP\n" "RxFlowCtlErr\n" "RxICRCerr\n" "RxLPCRCerr\n" "RxVCRCerr\n" "RxInvalLen\n" "RxInvalPKey\n" "RxPktDropped\n" "TxBadLength\n" "TxDropped\n" "TxInvalLen\n" "TxUnderrun\n" "TxUnsupVL\n" "RxLclPhyErr\n" /* 7220 and 7322-only */ "RxVL15Drop\n" /* 7220 and 7322-only */ "RxVlErr\n" /* 7220 and 7322-only */ "XcessBufOvfl\n" /* 7220 and 7322-only */ ; #define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */ static const size_t portcntr7220indices[] = { QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG, cr_pktsendflow, QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG, QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG, cr_pktrcvflowctrl, QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG, QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG, cr_txsdmadesc, cr_rxdlidfltr, cr_ibstatuschange, QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG, QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG, QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG, QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG, QIBPORTCNTR_LLI | _PORT_VIRT_FLAG, QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG, QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG, QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG, QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG, cr_rcvflowctrl_err, QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG, QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG, QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG, QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG, QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG, QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG, cr_invalidslen, cr_senddropped, cr_errslen, cr_sendunderrun, cr_txunsupvl, QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG, QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG, QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG, QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG, }; /* do all the setup to make the counter reads efficient later */ static void init_7220_cntrnames(struct qib_devdata *dd) { int i, j = 0; char *s; for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts; i++) { /* we always have at least one counter before the egrovfl */ if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12)) j = 1; s = strchr(s + 1, '\n'); if (s && j) j++; } dd->cspec->ncntrs = i; if (!s) /* full list; size is without terminating null */ dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1; else dd->cspec->cntrnamelen = 1 + s - cntr7220names; dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs * sizeof(u64), GFP_KERNEL); if (!dd->cspec->cntrs) qib_dev_err(dd, "Failed allocation for counters\n"); for (i = 0, s = (char *)portcntr7220names; s; i++) s = strchr(s + 1, '\n'); dd->cspec->nportcntrs = i - 1; dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1; dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs * sizeof(u64), GFP_KERNEL); if (!dd->cspec->portcntrs) qib_dev_err(dd, "Failed allocation for portcounters\n"); } static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep, u64 **cntrp) { u32 ret; if (!dd->cspec->cntrs) { ret = 0; goto done; } if (namep) { *namep = (char *)cntr7220names; ret = dd->cspec->cntrnamelen; if (pos >= ret) ret = 0; /* final read after getting everything */ } else { u64 *cntr = dd->cspec->cntrs; int i; ret = dd->cspec->ncntrs * sizeof(u64); if (!cntr || pos >= ret) { /* everything read, or couldn't get memory */ ret = 0; goto done; } *cntrp = cntr; for (i = 0; i < dd->cspec->ncntrs; i++) *cntr++ = read_7220_creg32(dd, cntr7220indices[i]); } done: return ret; } static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port, char **namep, u64 **cntrp) { u32 ret; if (!dd->cspec->portcntrs) { ret = 0; goto done; } if (namep) { *namep = (char *)portcntr7220names; ret = dd->cspec->portcntrnamelen; if (pos >= ret) ret = 0; /* final read after getting everything */ } else { u64 *cntr = dd->cspec->portcntrs; struct qib_pportdata *ppd = &dd->pport[port]; int i; ret = dd->cspec->nportcntrs * sizeof(u64); if (!cntr || pos >= ret) { /* everything read, or couldn't get memory */ ret = 0; goto done; } *cntrp = cntr; for (i = 0; i < dd->cspec->nportcntrs; i++) { if (portcntr7220indices[i] & _PORT_VIRT_FLAG) *cntr++ = qib_portcntr_7220(ppd, portcntr7220indices[i] & ~_PORT_VIRT_FLAG); else *cntr++ = read_7220_creg32(dd, portcntr7220indices[i]); } } done: return ret; } /** * qib_get_7220_faststats - get word counters from chip before they overflow * @opaque - contains a pointer to the qlogic_ib device qib_devdata * * This needs more work; in particular, decision on whether we really * need traffic_wds done the way it is * called from add_timer */ static void qib_get_7220_faststats(unsigned long opaque) { struct qib_devdata *dd = (struct qib_devdata *) opaque; struct qib_pportdata *ppd = dd->pport; unsigned long flags; u64 traffic_wds; /* * don't access the chip while running diags, or memory diags can * fail */ if (!(dd->flags & QIB_INITTED) || dd->diag_client) /* but re-arm the timer, for diags case; won't hurt other */ goto done; /* * We now try to maintain an activity timer, based on traffic * exceeding a threshold, so we need to check the word-counts * even if they are 64-bit. */ traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) + qib_portcntr_7220(ppd, cr_wordrcv); spin_lock_irqsave(&dd->eep_st_lock, flags); traffic_wds -= dd->traffic_wds; dd->traffic_wds += traffic_wds; if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) atomic_add(5, &dd->active_time); /* S/B #define */ spin_unlock_irqrestore(&dd->eep_st_lock, flags); done: mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); } /* * If we are using MSI, try to fallback to INTx. */ static int qib_7220_intr_fallback(struct qib_devdata *dd) { if (!dd->msi_lo) return 0; qib_devinfo(dd->pcidev, "MSI interrupt not detected, trying INTx interrupts\n"); qib_7220_free_irq(dd); qib_enable_intx(dd->pcidev); /* * Some newer kernels require free_irq before disable_msi, * and irq can be changed during disable and INTx enable * and we need to therefore use the pcidev->irq value, * not our saved MSI value. */ dd->cspec->irq = dd->pcidev->irq; qib_setup_7220_interrupt(dd); return 1; } /* * Reset the XGXS (between serdes and IBC). Slightly less intrusive * than resetting the IBC or external link state, and useful in some * cases to cause some retraining. To do this right, we reset IBC * as well. */ static void qib_7220_xgxs_reset(struct qib_pportdata *ppd) { u64 val, prev_val; struct qib_devdata *dd = ppd->dd; prev_val = qib_read_kreg64(dd, kr_xgxs_cfg); val = prev_val | QLOGIC_IB_XGXS_RESET; prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */ qib_write_kreg(dd, kr_control, dd->control & ~QLOGIC_IB_C_LINKENABLE); qib_write_kreg(dd, kr_xgxs_cfg, val); qib_read_kreg32(dd, kr_scratch); qib_write_kreg(dd, kr_xgxs_cfg, prev_val); qib_write_kreg(dd, kr_control, dd->control); } /* * For this chip, we want to use the same buffer every time * when we are trying to bring the link up (they are always VL15 * packets). At that link state the packet should always go out immediately * (or at least be discarded at the tx interface if the link is down). * If it doesn't, and the buffer isn't available, that means some other * sender has gotten ahead of us, and is preventing our packet from going * out. In that case, we flush all packets, and try again. If that still * fails, we fail the request, and hope things work the next time around. * * We don't need very complicated heuristics on whether the packet had * time to go out or not, since even at SDR 1X, it goes out in very short * time periods, covered by the chip reads done here and as part of the * flush. */ static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum) { u32 __iomem *buf; u32 lbuf = ppd->dd->cspec->lastbuf_for_pio; int do_cleanup; unsigned long flags; /* * always blip to get avail list updated, since it's almost * always needed, and is fairly cheap. */ sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP); qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */ buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf); if (buf) goto done; spin_lock_irqsave(&ppd->sdma_lock, flags); if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle && ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) { __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down); do_cleanup = 0; } else { do_cleanup = 1; qib_7220_sdma_hw_clean_up(ppd); } spin_unlock_irqrestore(&ppd->sdma_lock, flags); if (do_cleanup) { qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */ buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf); } done: return buf; } /* * This code for non-IBTA-compliant IB speed negotiation is only known to * work for the SDR to DDR transition, and only between an HCA and a switch * with recent firmware. It is based on observed heuristics, rather than * actual knowledge of the non-compliant speed negotiation. * It has a number of hard-coded fields, since the hope is to rewrite this * when a spec is available on how the negoation is intended to work. */ static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr, u32 dcnt, u32 *data) { int i; u64 pbc; u32 __iomem *piobuf; u32 pnum; struct qib_devdata *dd = ppd->dd; i = 0; pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */ pbc |= PBC_7220_VL15_SEND; while (!(piobuf = get_7220_link_buf(ppd, &pnum))) { if (i++ > 5) return; udelay(2); } sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum)); writeq(pbc, piobuf); qib_flush_wc(); qib_pio_copy(piobuf + 2, hdr, 7); qib_pio_copy(piobuf + 9, data, dcnt); if (dd->flags & QIB_USE_SPCL_TRIG) { u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023; qib_flush_wc(); __raw_writel(0xaebecede, piobuf + spcl_off); } qib_flush_wc(); qib_sendbuf_done(dd, pnum); } /* * _start packet gets sent twice at start, _done gets sent twice at end */ static void autoneg_7220_send(struct qib_pportdata *ppd, int which) { struct qib_devdata *dd = ppd->dd; static u32 swapped; u32 dw, i, hcnt, dcnt, *data; static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba }; static u32 madpayload_start[0x40] = { 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */ }; static u32 madpayload_done[0x40] = { 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x40000001, 0x1388, 0x15e, /* rest 0's */ }; dcnt = ARRAY_SIZE(madpayload_start); hcnt = ARRAY_SIZE(hdr); if (!swapped) { /* for maintainability, do it at runtime */ for (i = 0; i < hcnt; i++) { dw = (__force u32) cpu_to_be32(hdr[i]); hdr[i] = dw; } for (i = 0; i < dcnt; i++) { dw = (__force u32) cpu_to_be32(madpayload_start[i]); madpayload_start[i] = dw; dw = (__force u32) cpu_to_be32(madpayload_done[i]); madpayload_done[i] = dw; } swapped = 1; } data = which ? madpayload_done : madpayload_start; autoneg_7220_sendpkt(ppd, hdr, dcnt, data); qib_read_kreg64(dd, kr_scratch); udelay(2); autoneg_7220_sendpkt(ppd, hdr, dcnt, data); qib_read_kreg64(dd, kr_scratch); udelay(2); } /* * Do the absolute minimum to cause an IB speed change, and make it * ready, but don't actually trigger the change. The caller will * do that when ready (if link is in Polling training state, it will * happen immediately, otherwise when link next goes down) * * This routine should only be used as part of the DDR autonegotation * code for devices that are not compliant with IB 1.2 (or code that * fixes things up for same). * * When link has gone down, and autoneg enabled, or autoneg has * failed and we give up until next time we set both speeds, and * then we want IBTA enabled as well as "use max enabled speed. */ static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) { ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK | IBA7220_IBC_IBTA_1_2_MASK); if (speed == (QIB_IB_SDR | QIB_IB_DDR)) ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK | IBA7220_IBC_IBTA_1_2_MASK; else ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ? IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR; qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl); qib_write_kreg(ppd->dd, kr_scratch, 0); } /* * This routine is only used when we are not talking to another * IB 1.2-compliant device that we think can do DDR. * (This includes all existing switch chips as of Oct 2007.) * 1.2-compliant devices go directly to DDR prior to reaching INIT */ static void try_7220_autoneg(struct qib_pportdata *ppd) { unsigned long flags; /* * Required for older non-IB1.2 DDR switches. Newer * non-IB-compliant switches don't need it, but so far, * aren't bothered by it either. "Magic constant" */ qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07); spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags |= QIBL_IB_AUTONEG_INPROG; spin_unlock_irqrestore(&ppd->lflags_lock, flags); autoneg_7220_send(ppd, 0); set_7220_ibspeed_fast(ppd, QIB_IB_DDR); toggle_7220_rclkrls(ppd->dd); /* 2 msec is minimum length of a poll cycle */ queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, msecs_to_jiffies(2)); } /* * Handle the empirically determined mechanism for auto-negotiation * of DDR speed with switches. */ static void autoneg_7220_work(struct work_struct *work) { struct qib_pportdata *ppd; struct qib_devdata *dd; u64 startms; u32 i; unsigned long flags; ppd = &container_of(work, struct qib_chippport_specific, autoneg_work.work)->pportdata; dd = ppd->dd; startms = jiffies_to_msecs(jiffies); /* * Busy wait for this first part, it should be at most a * few hundred usec, since we scheduled ourselves for 2msec. */ for (i = 0; i < 25; i++) { if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState) == IB_7220_LT_STATE_POLLQUIET) { qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE); break; } udelay(100); } if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) goto done; /* we got there early or told to stop */ /* we expect this to timeout */ if (wait_event_timeout(ppd->cpspec->autoneg_wait, !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), msecs_to_jiffies(90))) goto done; toggle_7220_rclkrls(dd); /* we expect this to timeout */ if (wait_event_timeout(ppd->cpspec->autoneg_wait, !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), msecs_to_jiffies(1700))) goto done; set_7220_ibspeed_fast(ppd, QIB_IB_SDR); toggle_7220_rclkrls(dd); /* * Wait up to 250 msec for link to train and get to INIT at DDR; * this should terminate early. */ wait_event_timeout(ppd->cpspec->autoneg_wait, !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), msecs_to_jiffies(250)); done: if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) { spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; if (dd->cspec->autoneg_tries == AUTONEG_TRIES) { ppd->lflags |= QIBL_IB_AUTONEG_FAILED; dd->cspec->autoneg_tries = 0; } spin_unlock_irqrestore(&ppd->lflags_lock, flags); set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled); } } static u32 qib_7220_iblink_state(u64 ibcs) { u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState); switch (state) { case IB_7220_L_STATE_INIT: state = IB_PORT_INIT; break; case IB_7220_L_STATE_ARM: state = IB_PORT_ARMED; break; case IB_7220_L_STATE_ACTIVE: /* fall through */ case IB_7220_L_STATE_ACT_DEFER: state = IB_PORT_ACTIVE; break; default: /* fall through */ case IB_7220_L_STATE_DOWN: state = IB_PORT_DOWN; break; } return state; } /* returns the IBTA port state, rather than the IBC link training state */ static u8 qib_7220_phys_portstate(u64 ibcs) { u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState); return qib_7220_physportstate[state]; } static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) { int ret = 0, symadj = 0; struct qib_devdata *dd = ppd->dd; unsigned long flags; spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY; spin_unlock_irqrestore(&ppd->lflags_lock, flags); if (!ibup) { /* * When the link goes down we don't want AEQ running, so it * won't interfere with IBC training, etc., and we need * to go back to the static SerDes preset values. */ if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | QIBL_IB_AUTONEG_INPROG))) set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled); if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { qib_sd7220_presets(dd); qib_cancel_sends(ppd); /* initial disarm, etc. */ spin_lock_irqsave(&ppd->sdma_lock, flags); if (__qib_sdma_running(ppd)) __qib_sdma_process_event(ppd, qib_sdma_event_e70_go_idle); spin_unlock_irqrestore(&ppd->sdma_lock, flags); } /* this might better in qib_sd7220_presets() */ set_7220_relock_poll(dd, ibup); } else { if (qib_compat_ddr_negotiate && !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | QIBL_IB_AUTONEG_INPROG)) && ppd->link_speed_active == QIB_IB_SDR && (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) == (QIB_IB_DDR | QIB_IB_SDR) && dd->cspec->autoneg_tries < AUTONEG_TRIES) { /* we are SDR, and DDR auto-negotiation enabled */ ++dd->cspec->autoneg_tries; if (!ppd->cpspec->ibdeltainprog) { ppd->cpspec->ibdeltainprog = 1; ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr); ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd, cr_iblinkerrrecov); } try_7220_autoneg(ppd); ret = 1; /* no other IB status change processing */ } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && ppd->link_speed_active == QIB_IB_SDR) { autoneg_7220_send(ppd, 1); set_7220_ibspeed_fast(ppd, QIB_IB_DDR); udelay(2); toggle_7220_rclkrls(dd); ret = 1; /* no other IB status change processing */ } else { if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && (ppd->link_speed_active & QIB_IB_DDR)) { spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG | QIBL_IB_AUTONEG_FAILED); spin_unlock_irqrestore(&ppd->lflags_lock, flags); dd->cspec->autoneg_tries = 0; /* re-enable SDR, for next link down */ set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled); wake_up(&ppd->cpspec->autoneg_wait); symadj = 1; } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) { /* * Clear autoneg failure flag, and do setup * so we'll try next time link goes down and * back to INIT (possibly connected to a * different device). */ spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; spin_unlock_irqrestore(&ppd->lflags_lock, flags); ppd->cpspec->ibcddrctrl |= IBA7220_IBC_IBTA_1_2_MASK; qib_write_kreg(dd, kr_ncmodectrl, 0); symadj = 1; } } if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) symadj = 1; if (!ret) { ppd->delay_mult = rate_to_delay [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1] [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1]; set_7220_relock_poll(dd, ibup); spin_lock_irqsave(&ppd->sdma_lock, flags); /* * Unlike 7322, the 7220 needs this, due to lack of * interrupt in some cases when we have sdma active * when the link goes down. */ if (ppd->sdma_state.current_state != qib_sdma_state_s20_idle) __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down); spin_unlock_irqrestore(&ppd->sdma_lock, flags); } } if (symadj) { if (ppd->cpspec->ibdeltainprog) { ppd->cpspec->ibdeltainprog = 0; ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd, cr_ibsymbolerr) - ppd->cpspec->ibsymsnap; ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd, cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap; } } else if (!ibup && qib_compat_ddr_negotiate && !ppd->cpspec->ibdeltainprog && !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { ppd->cpspec->ibdeltainprog = 1; ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd, cr_ibsymbolerr); ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd, cr_iblinkerrrecov); } if (!ret) qib_setup_7220_setextled(ppd, ibup); return ret; } /* * Does read/modify/write to appropriate registers to * set output and direction bits selected by mask. * these are in their canonical postions (e.g. lsb of * dir will end up in D48 of extctrl on existing chips). * returns contents of GP Inputs. */ static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask) { u64 read_val, new_out; unsigned long flags; if (mask) { /* some bits being written, lock access to GPIO */ dir &= mask; out &= mask; spin_lock_irqsave(&dd->cspec->gpio_lock, flags); dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe)); dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe)); new_out = (dd->cspec->gpio_out & ~mask) | out; qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); qib_write_kreg(dd, kr_gpio_out, new_out); dd->cspec->gpio_out = new_out; spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); } /* * It is unlikely that a read at this time would get valid * data on a pin whose direction line was set in the same * call to this function. We include the read here because * that allows us to potentially combine a change on one pin with * a read on another, and because the old code did something like * this. */ read_val = qib_read_kreg64(dd, kr_extstatus); return SYM_FIELD(read_val, EXTStatus, GPIOIn); } /* * Read fundamental info we need to use the chip. These are * the registers that describe chip capabilities, and are * saved in shadow registers. */ static void get_7220_chip_params(struct qib_devdata *dd) { u64 val; u32 piobufs; int mtu; dd->uregbase = qib_read_kreg32(dd, kr_userregbase); dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt); dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase); dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase); dd->palign = qib_read_kreg32(dd, kr_palign); dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase); dd->pio2k_bufbase = dd->piobufbase & 0xffffffff; val = qib_read_kreg64(dd, kr_sendpiosize); dd->piosize2k = val & ~0U; dd->piosize4k = val >> 32; mtu = ib_mtu_enum_to_int(qib_ibmtu); if (mtu == -1) mtu = QIB_DEFAULT_MTU; dd->pport->ibmtu = (u32)mtu; val = qib_read_kreg64(dd, kr_sendpiobufcnt); dd->piobcnt2k = val & ~0U; dd->piobcnt4k = val >> 32; /* these may be adjusted in init_chip_wc_pat() */ dd->pio2kbase = (u32 __iomem *) ((char __iomem *) dd->kregbase + dd->pio2k_bufbase); if (dd->piobcnt4k) { dd->pio4kbase = (u32 __iomem *) ((char __iomem *) dd->kregbase + (dd->piobufbase >> 32)); /* * 4K buffers take 2 pages; we use roundup just to be * paranoid; we calculate it once here, rather than on * ever buf allocate */ dd->align4k = ALIGN(dd->piosize4k, dd->palign); } piobufs = dd->piobcnt4k + dd->piobcnt2k; dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / (sizeof(u64) * BITS_PER_BYTE / 2); } /* * The chip base addresses in cspec and cpspec have to be set * after possible init_chip_wc_pat(), rather than in * qib_get_7220_chip_params(), so split out as separate function */ static void set_7220_baseaddrs(struct qib_devdata *dd) { u32 cregbase; /* init after possible re-map in init_chip_wc_pat() */ cregbase = qib_read_kreg32(dd, kr_counterregbase); dd->cspec->cregbase = (u64 __iomem *) ((char __iomem *) dd->kregbase + cregbase); dd->egrtidbase = (u64 __iomem *) ((char __iomem *) dd->kregbase + dd->rcvegrbase); } #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \ SYM_MASK(SendCtrl, SPioEnable) | \ SYM_MASK(SendCtrl, SSpecialTriggerEn) | \ SYM_MASK(SendCtrl, SendBufAvailUpd) | \ SYM_MASK(SendCtrl, AvailUpdThld) | \ SYM_MASK(SendCtrl, SDmaEnable) | \ SYM_MASK(SendCtrl, SDmaIntEnable) | \ SYM_MASK(SendCtrl, SDmaHalt) | \ SYM_MASK(SendCtrl, SDmaSingleDescriptor)) static int sendctrl_hook(struct qib_devdata *dd, const struct diag_observer *op, u32 offs, u64 *data, u64 mask, int only_32) { unsigned long flags; unsigned idx = offs / sizeof(u64); u64 local_data, all_bits; if (idx != kr_sendctrl) { qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n", offs, only_32 ? "32" : "64"); return 0; } all_bits = ~0ULL; if (only_32) all_bits >>= 32; spin_lock_irqsave(&dd->sendctrl_lock, flags); if ((mask & all_bits) != all_bits) { /* * At least some mask bits are zero, so we need * to read. The judgement call is whether from * reg or shadow. First-cut: read reg, and complain * if any bits which should be shadowed are different * from their shadowed value. */ if (only_32) local_data = (u64)qib_read_kreg32(dd, idx); else local_data = qib_read_kreg64(dd, idx); qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n", (u32)local_data, (u32)dd->sendctrl); if ((local_data & SENDCTRL_SHADOWED) != (dd->sendctrl & SENDCTRL_SHADOWED)) qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n", (u32)local_data, (u32) dd->sendctrl); *data = (local_data & ~mask) | (*data & mask); } if (mask) { /* * At least some mask bits are one, so we need * to write, but only shadow some bits. */ u64 sval, tval; /* Shadowed, transient */ /* * New shadow val is bits we don't want to touch, * ORed with bits we do, that are intended for shadow. */ sval = (dd->sendctrl & ~mask); sval |= *data & SENDCTRL_SHADOWED & mask; dd->sendctrl = sval; tval = sval | (*data & ~SENDCTRL_SHADOWED & mask); qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n", (u32)tval, (u32)sval); qib_write_kreg(dd, kr_sendctrl, tval); qib_write_kreg(dd, kr_scratch, 0Ull); } spin_unlock_irqrestore(&dd->sendctrl_lock, flags); return only_32 ? 4 : 8; } static const struct diag_observer sendctrl_observer = { sendctrl_hook, kr_sendctrl * sizeof(u64), kr_sendctrl * sizeof(u64) }; /* * write the final few registers that depend on some of the * init setup. Done late in init, just before bringing up * the serdes. */ static int qib_late_7220_initreg(struct qib_devdata *dd) { int ret = 0; u64 val; qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize); qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize); qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt); qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); val = qib_read_kreg64(dd, kr_sendpioavailaddr); if (val != dd->pioavailregs_phys) { qib_dev_err(dd, "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n", (unsigned long) dd->pioavailregs_phys, (unsigned long long) val); ret = -EINVAL; } qib_register_observer(dd, &sendctrl_observer); return ret; } static int qib_init_7220_variables(struct qib_devdata *dd) { struct qib_chippport_specific *cpspec; struct qib_pportdata *ppd; int ret = 0; u32 sbufs, updthresh; cpspec = (struct qib_chippport_specific *)(dd + 1); ppd = &cpspec->pportdata; dd->pport = ppd; dd->num_pports = 1; dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports); ppd->cpspec = cpspec; spin_lock_init(&dd->cspec->sdepb_lock); spin_lock_init(&dd->cspec->rcvmod_lock); spin_lock_init(&dd->cspec->gpio_lock); /* we haven't yet set QIB_PRESENT, so use read directly */ dd->revision = readq(&dd->kregbase[kr_revision]); if ((dd->revision & 0xffffffffU) == 0xffffffffU) { qib_dev_err(dd, "Revision register read failure, giving up initialization\n"); ret = -ENODEV; goto bail; } dd->flags |= QIB_PRESENT; /* now register routines work */ dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor); dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor); get_7220_chip_params(dd); qib_7220_boardname(dd); /* * GPIO bits for TWSI data and clock, * used for serial EEPROM. */ dd->gpio_sda_num = _QIB_GPIO_SDA_NUM; dd->gpio_scl_num = _QIB_GPIO_SCL_NUM; dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV; dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY | QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE; dd->flags |= qib_special_trigger ? QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA; /* * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity. * 2 is Some Misc, 3 is reserved for future. */ dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr); dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr); dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated); init_waitqueue_head(&cpspec->autoneg_wait); INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work); ret = qib_init_pportdata(ppd, dd, 0, 1); if (ret) goto bail; ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR; ppd->link_width_enabled = ppd->link_width_supported; ppd->link_speed_enabled = ppd->link_speed_supported; /* * Set the initial values to reasonable default, will be set * for real when link is up. */ ppd->link_width_active = IB_WIDTH_4X; ppd->link_speed_active = QIB_IB_SDR; ppd->delay_mult = rate_to_delay[0][1]; ppd->vls_supported = IB_VL_VL0; ppd->vls_operational = ppd->vls_supported; if (!qib_mini_init) qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP); init_timer(&ppd->cpspec->chase_timer); ppd->cpspec->chase_timer.function = reenable_7220_chase; ppd->cpspec->chase_timer.data = (unsigned long)ppd; qib_num_cfg_vls = 1; /* if any 7220's, only one VL */ dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); /* we always allocate at least 2048 bytes for eager buffers */ ret = ib_mtu_enum_to_int(qib_ibmtu); dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; BUG_ON(!is_power_of_2(dd->rcvegrbufsize)); dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); qib_7220_tidtemplate(dd); /* * We can request a receive interrupt for 1 or * more packets from current offset. For now, we set this * up for a single packet. */ dd->rhdrhead_intr_off = 1ULL << 32; /* setup the stats timer; the add_timer is done at end of init */ init_timer(&dd->stats_timer); dd->stats_timer.function = qib_get_7220_faststats; dd->stats_timer.data = (unsigned long) dd; dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ; /* * Control[4] has been added to change the arbitration within * the SDMA engine between favoring data fetches over descriptor * fetches. qib_sdma_fetch_arb==0 gives data fetches priority. */ if (qib_sdma_fetch_arb) dd->control |= 1 << 4; dd->ureg_align = 0x10000; /* 64KB alignment */ dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1; qib_7220_config_ctxts(dd); qib_set_ctxtcnt(dd); /* needed for PAT setup */ if (qib_wc_pat) { ret = init_chip_wc_pat(dd, 0); if (ret) goto bail; } set_7220_baseaddrs(dd); /* set chip access pointers now */ ret = 0; if (qib_mini_init) goto bail; ret = qib_create_ctxts(dd); init_7220_cntrnames(dd); /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA. * reserve the update threshold amount for other kernel use, such * as sending SMI, MAD, and ACKs, or 3, whichever is greater, * unless we aren't enabling SDMA, in which case we want to use * all the 4k bufs for the kernel. * if this was less than the update threshold, we could wait * a long time for an update. Coded this way because we * sometimes change the update threshold for various reasons, * and we want this to remain robust. */ updthresh = 8U; /* update threshold */ if (dd->flags & QIB_HAS_SEND_DMA) { dd->cspec->sdmabufcnt = dd->piobcnt4k; sbufs = updthresh > 3 ? updthresh : 3; } else { dd->cspec->sdmabufcnt = 0; sbufs = dd->piobcnt4k; } dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k - dd->cspec->sdmabufcnt; dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ dd->last_pio = dd->cspec->lastbuf_for_pio; dd->pbufsctxt = dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt); /* * if we are at 16 user contexts, we will have one 7 sbufs * per context, so drop the update threshold to match. We * want to update before we actually run out, at low pbufs/ctxt * so give ourselves some margin */ if ((dd->pbufsctxt - 2) < updthresh) updthresh = dd->pbufsctxt - 2; dd->cspec->updthresh_dflt = updthresh; dd->cspec->updthresh = updthresh; /* before full enable, no interrupts, no locking needed */ dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) << SYM_LSB(SendCtrl, AvailUpdThld); dd->psxmitwait_supported = 1; dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE; bail: return ret; } static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc, u32 *pbufnum) { u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK; struct qib_devdata *dd = ppd->dd; u32 __iomem *buf; if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) && !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE))) buf = get_7220_link_buf(ppd, pbufnum); else { if ((plen + 1) > dd->piosize2kmax_dwords) first = dd->piobcnt2k; else first = 0; /* try 4k if all 2k busy, so same last for both sizes */ last = dd->cspec->lastbuf_for_pio; buf = qib_getsendbuf_range(dd, pbufnum, first, last); } return buf; } /* these 2 "counters" are really control registers, and are always RW */ static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv, u32 start) { write_7220_creg(ppd->dd, cr_psinterval, intv); write_7220_creg(ppd->dd, cr_psstart, start); } /* * NOTE: no real attempt is made to generalize the SDMA stuff. * At some point "soon" we will have a new more generalized * set of sdma interface, and then we'll clean this up. */ /* Must be called with sdma_lock held, or before init finished */ static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail) { /* Commit writes to memory and advance the tail on the chip */ wmb(); ppd->sdma_descq_tail = tail; qib_write_kreg(ppd->dd, kr_senddmatail, tail); } static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt) { } static struct sdma_set_state_action sdma_7220_action_table[] = { [qib_sdma_state_s00_hw_down] = { .op_enable = 0, .op_intenable = 0, .op_halt = 0, .go_s99_running_tofalse = 1, }, [qib_sdma_state_s10_hw_start_up_wait] = { .op_enable = 1, .op_intenable = 1, .op_halt = 1, }, [qib_sdma_state_s20_idle] = { .op_enable = 1, .op_intenable = 1, .op_halt = 1, }, [qib_sdma_state_s30_sw_clean_up_wait] = { .op_enable = 0, .op_intenable = 1, .op_halt = 0, }, [qib_sdma_state_s40_hw_clean_up_wait] = { .op_enable = 1, .op_intenable = 1, .op_halt = 1, }, [qib_sdma_state_s50_hw_halt_wait] = { .op_enable = 1, .op_intenable = 1, .op_halt = 1, }, [qib_sdma_state_s99_running] = { .op_enable = 1, .op_intenable = 1, .op_halt = 0, .go_s99_running_totrue = 1, }, }; static void qib_7220_sdma_init_early(struct qib_pportdata *ppd) { ppd->sdma_state.set_state_action = sdma_7220_action_table; } static int init_sdma_7220_regs(struct qib_pportdata *ppd) { struct qib_devdata *dd = ppd->dd; unsigned i, n; u64 senddmabufmask[3] = { 0 }; /* Set SendDmaBase */ qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys); qib_sdma_7220_setlengen(ppd); qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */ /* Set SendDmaHeadAddr */ qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys); /* * Reserve all the former "kernel" piobufs, using high number range * so we get as many 4K buffers as possible */ n = dd->piobcnt2k + dd->piobcnt4k; i = n - dd->cspec->sdmabufcnt; for (; i < n; ++i) { unsigned word = i / 64; unsigned bit = i & 63; BUG_ON(word >= 3); senddmabufmask[word] |= 1ULL << bit; } qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]); qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]); qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]); ppd->sdma_state.first_sendbuf = i; ppd->sdma_state.last_sendbuf = n; return 0; } /* sdma_lock must be held */ static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd) { struct qib_devdata *dd = ppd->dd; int sane; int use_dmahead; u16 swhead; u16 swtail; u16 cnt; u16 hwhead; use_dmahead = __qib_sdma_running(ppd) && (dd->flags & QIB_HAS_SDMA_TIMEOUT); retry: hwhead = use_dmahead ? (u16)le64_to_cpu(*ppd->sdma_head_dma) : (u16)qib_read_kreg32(dd, kr_senddmahead); swhead = ppd->sdma_descq_head; swtail = ppd->sdma_descq_tail; cnt = ppd->sdma_descq_cnt; if (swhead < swtail) { /* not wrapped */ sane = (hwhead >= swhead) & (hwhead <= swtail); } else if (swhead > swtail) { /* wrapped around */ sane = ((hwhead >= swhead) && (hwhead < cnt)) || (hwhead <= swtail); } else { /* empty */ sane = (hwhead == swhead); } if (unlikely(!sane)) { if (use_dmahead) { /* try one more time, directly from the register */ use_dmahead = 0; goto retry; } /* assume no progress */ hwhead = swhead; } return hwhead; } static int qib_sdma_7220_busy(struct qib_pportdata *ppd) { u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus); return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) || (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) || (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) || !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty)); } /* * Compute the amount of delay before sending the next packet if the * port's send rate differs from the static rate set for the QP. * Since the delay affects this packet but the amount of the delay is * based on the length of the previous packet, use the last delay computed * and save the delay count for this packet to be used next time * we get here. */ static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen, u8 srate, u8 vl) { u8 snd_mult = ppd->delay_mult; u8 rcv_mult = ib_rate_to_delay[srate]; u32 ret = ppd->cpspec->last_delay_mult; ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ? (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0; /* Indicate VL15, if necessary */ if (vl == 15) ret |= PBC_7220_VL15_SEND_CTRL; return ret; } static void qib_7220_initvl15_bufs(struct qib_devdata *dd) { } static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd) { if (!rcd->ctxt) { rcd->rcvegrcnt = IBA7220_KRCVEGRCNT; rcd->rcvegr_tid_base = 0; } else { rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt; rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT + (rcd->ctxt - 1) * rcd->rcvegrcnt; } } static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start, u32 len, u32 which, struct qib_ctxtdata *rcd) { int i; unsigned long flags; switch (which) { case TXCHK_CHG_TYPE_KERN: /* see if we need to raise avail update threshold */ spin_lock_irqsave(&dd->uctxt_lock, flags); for (i = dd->first_user_ctxt; dd->cspec->updthresh != dd->cspec->updthresh_dflt && i < dd->cfgctxts; i++) if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt && ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1) < dd->cspec->updthresh_dflt) break; spin_unlock_irqrestore(&dd->uctxt_lock, flags); if (i == dd->cfgctxts) { spin_lock_irqsave(&dd->sendctrl_lock, flags); dd->cspec->updthresh = dd->cspec->updthresh_dflt; dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); dd->sendctrl |= (dd->cspec->updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) << SYM_LSB(SendCtrl, AvailUpdThld); spin_unlock_irqrestore(&dd->sendctrl_lock, flags); sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); } break; case TXCHK_CHG_TYPE_USER: spin_lock_irqsave(&dd->sendctrl_lock, flags); if (rcd && rcd->subctxt_cnt && ((rcd->piocnt / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) { dd->cspec->updthresh = (rcd->piocnt / rcd->subctxt_cnt) - 1; dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); dd->sendctrl |= (dd->cspec->updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) << SYM_LSB(SendCtrl, AvailUpdThld); spin_unlock_irqrestore(&dd->sendctrl_lock, flags); sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); } else spin_unlock_irqrestore(&dd->sendctrl_lock, flags); break; } } static void writescratch(struct qib_devdata *dd, u32 val) { qib_write_kreg(dd, kr_scratch, val); } #define VALID_TS_RD_REG_MASK 0xBF /** * qib_7220_tempsense_read - read register of temp sensor via TWSI * @dd: the qlogic_ib device * @regnum: register to read from * * returns reg contents (0..255) or < 0 for error */ static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum) { int ret; u8 rdata; if (regnum > 7) { ret = -EINVAL; goto bail; } /* return a bogus value for (the one) register we do not have */ if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) { ret = 0; goto bail; } ret = mutex_lock_interruptible(&dd->eep_lock); if (ret) goto bail; ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1); if (!ret) ret = rdata; mutex_unlock(&dd->eep_lock); /* * There are three possibilities here: * ret is actual value (0..255) * ret is -ENXIO or -EINVAL from twsi code or this file * ret is -EINTR from mutex_lock_interruptible. */ bail: return ret; } #ifdef CONFIG_INFINIBAND_QIB_DCA static int qib_7220_notify_dca(struct qib_devdata *dd, unsigned long event) { return 0; } #endif /* Dummy function, as 7220 boards never disable EEPROM Write */ static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen) { return 1; } /** * qib_init_iba7220_funcs - set up the chip-specific function pointers * @dev: the pci_dev for qlogic_ib device * @ent: pci_device_id struct for this dev * * This is global, and is called directly at init to set up the * chip-specific function pointers for later use. */ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev, const struct pci_device_id *ent) { struct qib_devdata *dd; int ret; u32 boardid, minwidth; dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) + sizeof(struct qib_chippport_specific)); if (IS_ERR(dd)) goto bail; dd->f_bringup_serdes = qib_7220_bringup_serdes; dd->f_cleanup = qib_setup_7220_cleanup; dd->f_clear_tids = qib_7220_clear_tids; dd->f_free_irq = qib_7220_free_irq; dd->f_get_base_info = qib_7220_get_base_info; dd->f_get_msgheader = qib_7220_get_msgheader; dd->f_getsendbuf = qib_7220_getsendbuf; dd->f_gpio_mod = gpio_7220_mod; dd->f_eeprom_wen = qib_7220_eeprom_wen; dd->f_hdrqempty = qib_7220_hdrqempty; dd->f_ib_updown = qib_7220_ib_updown; dd->f_init_ctxt = qib_7220_init_ctxt; dd->f_initvl15_bufs = qib_7220_initvl15_bufs; dd->f_intr_fallback = qib_7220_intr_fallback; dd->f_late_initreg = qib_late_7220_initreg; dd->f_setpbc_control = qib_7220_setpbc_control; dd->f_portcntr = qib_portcntr_7220; dd->f_put_tid = qib_7220_put_tid; dd->f_quiet_serdes = qib_7220_quiet_serdes; dd->f_rcvctrl = rcvctrl_7220_mod; dd->f_read_cntrs = qib_read_7220cntrs; dd->f_read_portcntrs = qib_read_7220portcntrs; dd->f_reset = qib_setup_7220_reset; dd->f_init_sdma_regs = init_sdma_7220_regs; dd->f_sdma_busy = qib_sdma_7220_busy; dd->f_sdma_gethead = qib_sdma_7220_gethead; dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl; dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt; dd->f_sdma_update_tail = qib_sdma_update_7220_tail; dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up; dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up; dd->f_sdma_init_early = qib_7220_sdma_init_early; dd->f_sendctrl = sendctrl_7220_mod; dd->f_set_armlaunch = qib_set_7220_armlaunch; dd->f_set_cntr_sample = qib_set_cntr_7220_sample; dd->f_iblink_state = qib_7220_iblink_state; dd->f_ibphys_portstate = qib_7220_phys_portstate; dd->f_get_ib_cfg = qib_7220_get_ib_cfg; dd->f_set_ib_cfg = qib_7220_set_ib_cfg; dd->f_set_ib_loopback = qib_7220_set_loopback; dd->f_set_intr_state = qib_7220_set_intr_state; dd->f_setextled = qib_setup_7220_setextled; dd->f_txchk_change = qib_7220_txchk_change; dd->f_update_usrhead = qib_update_7220_usrhead; dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr; dd->f_xgxs_reset = qib_7220_xgxs_reset; dd->f_writescratch = writescratch; dd->f_tempsense_rd = qib_7220_tempsense_rd; #ifdef CONFIG_INFINIBAND_QIB_DCA dd->f_notify_dca = qib_7220_notify_dca; #endif /* * Do remaining pcie setup and save pcie values in dd. * Any error printing is already done by the init code. * On return, we have the chip mapped, but chip registers * are not set up until start of qib_init_7220_variables. */ ret = qib_pcie_ddinit(dd, pdev, ent); if (ret < 0) goto bail_free; /* initialize chip-specific variables */ ret = qib_init_7220_variables(dd); if (ret) goto bail_cleanup; if (qib_mini_init) goto bail; boardid = SYM_FIELD(dd->revision, Revision, BoardID); switch (boardid) { case 0: case 2: case 10: case 12: minwidth = 16; /* x16 capable boards */ break; default: minwidth = 8; /* x8 capable boards */ break; } if (qib_pcie_params(dd, minwidth, NULL, NULL)) qib_dev_err(dd, "Failed to setup PCIe or interrupts; continuing anyway\n"); /* save IRQ for possible later use */ dd->cspec->irq = pdev->irq; if (qib_read_kreg64(dd, kr_hwerrstatus) & QLOGIC_IB_HWE_SERDESPLLFAILED) qib_write_kreg(dd, kr_hwerrclear, QLOGIC_IB_HWE_SERDESPLLFAILED); /* setup interrupt handler (interrupt type handled above) */ qib_setup_7220_interrupt(dd); qib_7220_init_hwerrors(dd); /* clear diagctrl register, in case diags were running and crashed */ qib_write_kreg(dd, kr_hwdiagctrl, 0); goto bail; bail_cleanup: qib_pcie_ddcleanup(dd); bail_free: qib_free_devdata(dd); dd = ERR_PTR(ret); bail: return dd; }
gpl-2.0
googyanas/GoogyMax-G4
fs/xfs/xfs_super.c
371
48807
/* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_dir2.h" #include "xfs_alloc.h" #include "xfs_quota.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_alloc_btree.h" #include "xfs_ialloc_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" #include "xfs_ialloc.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_itable.h" #include "xfs_fsops.h" #include "xfs_attr.h" #include "xfs_buf_item.h" #include "xfs_utils.h" #include "xfs_vnodeops.h" #include "xfs_log_priv.h" #include "xfs_trans_priv.h" #include "xfs_filestream.h" #include "xfs_da_btree.h" #include "xfs_extfree_item.h" #include "xfs_mru_cache.h" #include "xfs_inode_item.h" #include "xfs_icache.h" #include "xfs_trace.h" #include <linux/namei.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mount.h> #include <linux/mempool.h> #include <linux/writeback.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/parser.h> static const struct super_operations xfs_super_operations; static kmem_zone_t *xfs_ioend_zone; mempool_t *xfs_ioend_pool; #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ #define MNTOPT_LOGDEV "logdev" /* log device */ #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */ #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ #define MNTOPT_MTPT "mtpt" /* filesystem mount point */ #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */ #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */ #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */ #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */ #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */ #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and * unwritten extent conversion */ #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ #define MNTOPT_32BITINODE "inode32" /* inode allocation limited to * XFS_MAXINUMBER_32 */ #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */ #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes * in stat(). */ #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */ #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */ #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */ #define MNTOPT_QUOTA "quota" /* disk quotas (user) */ #define MNTOPT_NOQUOTA "noquota" /* no quotas */ #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */ #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */ #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */ #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */ #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */ #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */ #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */ #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ #define MNTOPT_DELAYLOG "delaylog" /* Delayed logging enabled */ #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */ #define MNTOPT_DISCARD "discard" /* Discard unused blocks */ #define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */ /* * Table driven mount option parser. * * Currently only used for remount, but it will be used for mount * in the future, too. */ enum { Opt_barrier, Opt_nobarrier, Opt_inode64, Opt_inode32, Opt_err }; static const match_table_t tokens = { {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_inode64, "inode64"}, {Opt_inode32, "inode32"}, {Opt_err, NULL} }; STATIC unsigned long suffix_kstrtoint(char *s, unsigned int base, int *res) { int last, shift_left_factor = 0, _res; char *value = s; last = strlen(value) - 1; if (value[last] == 'K' || value[last] == 'k') { shift_left_factor = 10; value[last] = '\0'; } if (value[last] == 'M' || value[last] == 'm') { shift_left_factor = 20; value[last] = '\0'; } if (value[last] == 'G' || value[last] == 'g') { shift_left_factor = 30; value[last] = '\0'; } if (kstrtoint(s, base, &_res)) return -EINVAL; *res = _res << shift_left_factor; return 0; } /* * This function fills in xfs_mount_t fields based on mount args. * Note: the superblock has _not_ yet been read in. * * Note that this function leaks the various device name allocations on * failure. The caller takes care of them. */ STATIC int xfs_parseargs( struct xfs_mount *mp, char *options) { struct super_block *sb = mp->m_super; char *this_char, *value; int dsunit = 0; int dswidth = 0; int iosize = 0; __uint8_t iosizelog = 0; /* * set up the mount name first so all the errors will refer to the * correct device. */ mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); if (!mp->m_fsname) return ENOMEM; mp->m_fsname_len = strlen(mp->m_fsname) + 1; /* * Copy binary VFS mount flags we are interested in. */ if (sb->s_flags & MS_RDONLY) mp->m_flags |= XFS_MOUNT_RDONLY; if (sb->s_flags & MS_DIRSYNC) mp->m_flags |= XFS_MOUNT_DIRSYNC; if (sb->s_flags & MS_SYNCHRONOUS) mp->m_flags |= XFS_MOUNT_WSYNC; /* * Set some default flags that could be cleared by the mount option * parsing. */ mp->m_flags |= XFS_MOUNT_BARRIER; mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; #if !XFS_BIG_INUMS mp->m_flags |= XFS_MOUNT_SMALL_INUMS; #endif /* * These can be overridden by the mount option parsing. */ mp->m_logbufs = -1; mp->m_logbsize = -1; if (!options) goto done; while ((this_char = strsep(&options, ",")) != NULL) { if (!*this_char) continue; if ((value = strchr(this_char, '=')) != NULL) *value++ = 0; if (!strcmp(this_char, MNTOPT_LOGBUFS)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } if (kstrtoint(value, 10, &mp->m_logbufs)) return EINVAL; } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } if (suffix_kstrtoint(value, 10, &mp->m_logbsize)) return EINVAL; } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); if (!mp->m_logname) return ENOMEM; } else if (!strcmp(this_char, MNTOPT_MTPT)) { xfs_warn(mp, "%s option not allowed on this system", this_char); return EINVAL; } else if (!strcmp(this_char, MNTOPT_RTDEV)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); if (!mp->m_rtname) return ENOMEM; } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } if (kstrtoint(value, 10, &iosize)) return EINVAL; iosizelog = ffs(iosize) - 1; } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } if (suffix_kstrtoint(value, 10, &iosize)) return EINVAL; iosizelog = ffs(iosize) - 1; } else if (!strcmp(this_char, MNTOPT_GRPID) || !strcmp(this_char, MNTOPT_BSDGROUPS)) { mp->m_flags |= XFS_MOUNT_GRPID; } else if (!strcmp(this_char, MNTOPT_NOGRPID) || !strcmp(this_char, MNTOPT_SYSVGROUPS)) { mp->m_flags &= ~XFS_MOUNT_GRPID; } else if (!strcmp(this_char, MNTOPT_WSYNC)) { mp->m_flags |= XFS_MOUNT_WSYNC; } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { mp->m_flags |= XFS_MOUNT_NORECOVERY; } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { mp->m_flags |= XFS_MOUNT_NOALIGN; } else if (!strcmp(this_char, MNTOPT_SWALLOC)) { mp->m_flags |= XFS_MOUNT_SWALLOC; } else if (!strcmp(this_char, MNTOPT_SUNIT)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } if (kstrtoint(value, 10, &dsunit)) return EINVAL; } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } if (kstrtoint(value, 10, &dswidth)) return EINVAL; } else if (!strcmp(this_char, MNTOPT_32BITINODE)) { mp->m_flags |= XFS_MOUNT_SMALL_INUMS; } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; #if !XFS_BIG_INUMS xfs_warn(mp, "%s option not allowed on this system", this_char); return EINVAL; #endif } else if (!strcmp(this_char, MNTOPT_NOUUID)) { mp->m_flags |= XFS_MOUNT_NOUUID; } else if (!strcmp(this_char, MNTOPT_BARRIER)) { mp->m_flags |= XFS_MOUNT_BARRIER; } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { mp->m_flags &= ~XFS_MOUNT_BARRIER; } else if (!strcmp(this_char, MNTOPT_IKEEP)) { mp->m_flags |= XFS_MOUNT_IKEEP; } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { mp->m_flags &= ~XFS_MOUNT_IKEEP; } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE; } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; } else if (!strcmp(this_char, MNTOPT_ATTR2)) { mp->m_flags |= XFS_MOUNT_ATTR2; } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { mp->m_flags &= ~XFS_MOUNT_ATTR2; mp->m_flags |= XFS_MOUNT_NOATTR2; } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { mp->m_flags |= XFS_MOUNT_FILESTREAMS; } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE; } else if (!strcmp(this_char, MNTOPT_QUOTA) || !strcmp(this_char, MNTOPT_UQUOTA) || !strcmp(this_char, MNTOPT_USRQUOTA)) { mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | XFS_UQUOTA_ENFD); } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || !strcmp(this_char, MNTOPT_UQUOTANOENF)) { mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); mp->m_qflags &= ~XFS_UQUOTA_ENFD; } else if (!strcmp(this_char, MNTOPT_PQUOTA) || !strcmp(this_char, MNTOPT_PRJQUOTA)) { mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | XFS_OQUOTA_ENFD); } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); mp->m_qflags &= ~XFS_OQUOTA_ENFD; } else if (!strcmp(this_char, MNTOPT_GQUOTA) || !strcmp(this_char, MNTOPT_GRPQUOTA)) { mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | XFS_OQUOTA_ENFD); } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); mp->m_qflags &= ~XFS_OQUOTA_ENFD; } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { xfs_warn(mp, "delaylog is the default now, option is deprecated."); } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { xfs_warn(mp, "nodelaylog support has been removed, option is deprecated."); } else if (!strcmp(this_char, MNTOPT_DISCARD)) { mp->m_flags |= XFS_MOUNT_DISCARD; } else if (!strcmp(this_char, MNTOPT_NODISCARD)) { mp->m_flags &= ~XFS_MOUNT_DISCARD; } else if (!strcmp(this_char, "ihashsize")) { xfs_warn(mp, "ihashsize no longer used, option is deprecated."); } else if (!strcmp(this_char, "osyncisdsync")) { xfs_warn(mp, "osyncisdsync has no effect, option is deprecated."); } else if (!strcmp(this_char, "osyncisosync")) { xfs_warn(mp, "osyncisosync has no effect, option is deprecated."); } else if (!strcmp(this_char, "irixsgid")) { xfs_warn(mp, "irixsgid is now a sysctl(2) variable, option is deprecated."); } else { xfs_warn(mp, "unknown mount option [%s].", this_char); return EINVAL; } } /* * no recovery flag requires a read-only mount */ if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && !(mp->m_flags & XFS_MOUNT_RDONLY)) { xfs_warn(mp, "no-recovery mounts must be read-only."); return EINVAL; } if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { xfs_warn(mp, "sunit and swidth options incompatible with the noalign option"); return EINVAL; } #ifndef CONFIG_XFS_QUOTA if (XFS_IS_QUOTA_RUNNING(mp)) { xfs_warn(mp, "quota support not available in this kernel."); return EINVAL; } #endif if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) { xfs_warn(mp, "cannot mount with both project and group quota"); return EINVAL; } if ((dsunit && !dswidth) || (!dsunit && dswidth)) { xfs_warn(mp, "sunit and swidth must be specified together"); return EINVAL; } if (dsunit && (dswidth % dsunit != 0)) { xfs_warn(mp, "stripe width (%d) must be a multiple of the stripe unit (%d)", dswidth, dsunit); return EINVAL; } done: if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) { /* * At this point the superblock has not been read * in, therefore we do not know the block size. * Before the mount call ends we will convert * these to FSBs. */ if (dsunit) { mp->m_dalign = dsunit; mp->m_flags |= XFS_MOUNT_RETERR; } if (dswidth) mp->m_swidth = dswidth; } if (mp->m_logbufs != -1 && mp->m_logbufs != 0 && (mp->m_logbufs < XLOG_MIN_ICLOGS || mp->m_logbufs > XLOG_MAX_ICLOGS)) { xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); return XFS_ERROR(EINVAL); } if (mp->m_logbsize != -1 && mp->m_logbsize != 0 && (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || !is_power_of_2(mp->m_logbsize))) { xfs_warn(mp, "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", mp->m_logbsize); return XFS_ERROR(EINVAL); } if (iosizelog) { if (iosizelog > XFS_MAX_IO_LOG || iosizelog < XFS_MIN_IO_LOG) { xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", iosizelog, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG); return XFS_ERROR(EINVAL); } mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; mp->m_readio_log = iosizelog; mp->m_writeio_log = iosizelog; } return 0; } struct proc_xfs_info { int flag; char *str; }; STATIC int xfs_showargs( struct xfs_mount *mp, struct seq_file *m) { static struct proc_xfs_info xfs_info_set[] = { /* the few simple ones we can get from the mount struct */ { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP }, { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 }, { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, { XFS_MOUNT_DISCARD, "," MNTOPT_DISCARD }, { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_32BITINODE }, { 0, NULL } }; static struct proc_xfs_info xfs_info_unset[] = { /* the few simple ones we can get from the mount struct */ { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO }, { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER }, { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE }, { 0, NULL } }; struct proc_xfs_info *xfs_infop; for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { if (mp->m_flags & xfs_infop->flag) seq_puts(m, xfs_infop->str); } for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) { if (!(mp->m_flags & xfs_infop->flag)) seq_puts(m, xfs_infop->str); } if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk", (int)(1 << mp->m_writeio_log) >> 10); if (mp->m_logbufs > 0) seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); if (mp->m_logbsize > 0) seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); if (mp->m_logname) seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); if (mp->m_rtname) seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); if (mp->m_dalign > 0) seq_printf(m, "," MNTOPT_SUNIT "=%d", (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); if (mp->m_swidth > 0) seq_printf(m, "," MNTOPT_SWIDTH "=%d", (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD)) seq_puts(m, "," MNTOPT_USRQUOTA); else if (mp->m_qflags & XFS_UQUOTA_ACCT) seq_puts(m, "," MNTOPT_UQUOTANOENF); /* Either project or group quotas can be active, not both */ if (mp->m_qflags & XFS_PQUOTA_ACCT) { if (mp->m_qflags & XFS_OQUOTA_ENFD) seq_puts(m, "," MNTOPT_PRJQUOTA); else seq_puts(m, "," MNTOPT_PQUOTANOENF); } else if (mp->m_qflags & XFS_GQUOTA_ACCT) { if (mp->m_qflags & XFS_OQUOTA_ENFD) seq_puts(m, "," MNTOPT_GRPQUOTA); else seq_puts(m, "," MNTOPT_GQUOTANOENF); } if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) seq_puts(m, "," MNTOPT_NOQUOTA); return 0; } __uint64_t xfs_max_file_offset( unsigned int blockshift) { unsigned int pagefactor = 1; unsigned int bitshift = BITS_PER_LONG - 1; /* Figure out maximum filesize, on Linux this can depend on * the filesystem blocksize (on 32 bit platforms). * __block_write_begin does this in an [unsigned] long... * page->index << (PAGE_CACHE_SHIFT - bbits) * So, for page sized blocks (4K on 32 bit platforms), * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) * but for smaller blocksizes it is less (bbits = log2 bsize). * Note1: get_block_t takes a long (implicit cast from above) * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch * can optionally convert the [unsigned] long from above into * an [unsigned] long long. */ #if BITS_PER_LONG == 32 # if defined(CONFIG_LBDAF) ASSERT(sizeof(sector_t) == 8); pagefactor = PAGE_CACHE_SIZE; bitshift = BITS_PER_LONG; # else pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift); # endif #endif return (((__uint64_t)pagefactor) << bitshift) - 1; } xfs_agnumber_t xfs_set_inode32(struct xfs_mount *mp) { xfs_agnumber_t index = 0; xfs_agnumber_t maxagi = 0; xfs_sb_t *sbp = &mp->m_sb; xfs_agnumber_t max_metadata; xfs_agino_t agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks -1, 0); xfs_ino_t ino = XFS_AGINO_TO_INO(mp, sbp->sb_agcount -1, agino); xfs_perag_t *pag; /* Calculate how much should be reserved for inodes to meet * the max inode percentage. */ if (mp->m_maxicount) { __uint64_t icount; icount = sbp->sb_dblocks * sbp->sb_imax_pct; do_div(icount, 100); icount += sbp->sb_agblocks - 1; do_div(icount, sbp->sb_agblocks); max_metadata = icount; } else { max_metadata = sbp->sb_agcount; } for (index = 0; index < sbp->sb_agcount; index++) { ino = XFS_AGINO_TO_INO(mp, index, agino); if (ino > XFS_MAXINUMBER_32) { pag = xfs_perag_get(mp, index); pag->pagi_inodeok = 0; pag->pagf_metadata = 0; xfs_perag_put(pag); continue; } pag = xfs_perag_get(mp, index); pag->pagi_inodeok = 1; maxagi++; if (index < max_metadata) pag->pagf_metadata = 1; xfs_perag_put(pag); } mp->m_flags |= (XFS_MOUNT_32BITINODES | XFS_MOUNT_SMALL_INUMS); return maxagi; } xfs_agnumber_t xfs_set_inode64(struct xfs_mount *mp) { xfs_agnumber_t index = 0; for (index = 0; index < mp->m_sb.sb_agcount; index++) { struct xfs_perag *pag; pag = xfs_perag_get(mp, index); pag->pagi_inodeok = 1; pag->pagf_metadata = 0; xfs_perag_put(pag); } /* There is no need for lock protection on m_flags, * the rw_semaphore of the VFS superblock is locked * during mount/umount/remount operations, so this is * enough to avoid concurency on the m_flags field */ mp->m_flags &= ~(XFS_MOUNT_32BITINODES | XFS_MOUNT_SMALL_INUMS); return index; } STATIC int xfs_blkdev_get( xfs_mount_t *mp, const char *name, struct block_device **bdevp) { int error = 0; *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, mp); if (IS_ERR(*bdevp)) { error = PTR_ERR(*bdevp); xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error); } return -error; } STATIC void xfs_blkdev_put( struct block_device *bdev) { if (bdev) blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } void xfs_blkdev_issue_flush( xfs_buftarg_t *buftarg) { blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL); } STATIC void xfs_close_devices( struct xfs_mount *mp) { if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { struct block_device *logdev = mp->m_logdev_targp->bt_bdev; xfs_free_buftarg(mp, mp->m_logdev_targp); xfs_blkdev_put(logdev); } if (mp->m_rtdev_targp) { struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; xfs_free_buftarg(mp, mp->m_rtdev_targp); xfs_blkdev_put(rtdev); } xfs_free_buftarg(mp, mp->m_ddev_targp); } /* * The file system configurations are: * (1) device (partition) with data and internal log * (2) logical volume with data and log subvolumes. * (3) logical volume with data, log, and realtime subvolumes. * * We only have to handle opening the log and realtime volumes here if * they are present. The data subvolume has already been opened by * get_sb_bdev() and is stored in sb->s_bdev. */ STATIC int xfs_open_devices( struct xfs_mount *mp) { struct block_device *ddev = mp->m_super->s_bdev; struct block_device *logdev = NULL, *rtdev = NULL; int error; /* * Open real time and log devices - order is important. */ if (mp->m_logname) { error = xfs_blkdev_get(mp, mp->m_logname, &logdev); if (error) goto out; } if (mp->m_rtname) { error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); if (error) goto out_close_logdev; if (rtdev == ddev || rtdev == logdev) { xfs_warn(mp, "Cannot mount filesystem with identical rtdev and ddev/logdev."); error = EINVAL; goto out_close_rtdev; } } /* * Setup xfs_mount buffer target pointers */ error = ENOMEM; mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname); if (!mp->m_ddev_targp) goto out_close_rtdev; if (rtdev) { mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1, mp->m_fsname); if (!mp->m_rtdev_targp) goto out_free_ddev_targ; } if (logdev && logdev != ddev) { mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1, mp->m_fsname); if (!mp->m_logdev_targp) goto out_free_rtdev_targ; } else { mp->m_logdev_targp = mp->m_ddev_targp; } return 0; out_free_rtdev_targ: if (mp->m_rtdev_targp) xfs_free_buftarg(mp, mp->m_rtdev_targp); out_free_ddev_targ: xfs_free_buftarg(mp, mp->m_ddev_targp); out_close_rtdev: if (rtdev) xfs_blkdev_put(rtdev); out_close_logdev: if (logdev && logdev != ddev) xfs_blkdev_put(logdev); out: return error; } /* * Setup xfs_mount buffer target pointers based on superblock */ STATIC int xfs_setup_devices( struct xfs_mount *mp) { int error; error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize, mp->m_sb.sb_sectsize); if (error) return error; if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { unsigned int log_sector_size = BBSIZE; if (xfs_sb_version_hassector(&mp->m_sb)) log_sector_size = mp->m_sb.sb_logsectsize; error = xfs_setsize_buftarg(mp->m_logdev_targp, mp->m_sb.sb_blocksize, log_sector_size); if (error) return error; } if (mp->m_rtdev_targp) { error = xfs_setsize_buftarg(mp->m_rtdev_targp, mp->m_sb.sb_blocksize, mp->m_sb.sb_sectsize); if (error) return error; } return 0; } STATIC int xfs_init_mount_workqueues( struct xfs_mount *mp) { mp->m_data_workqueue = alloc_workqueue("xfs-data/%s", WQ_MEM_RECLAIM, 0, mp->m_fsname); if (!mp->m_data_workqueue) goto out; mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", WQ_MEM_RECLAIM, 0, mp->m_fsname); if (!mp->m_unwritten_workqueue) goto out_destroy_data_iodone_queue; mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s", WQ_MEM_RECLAIM, 0, mp->m_fsname); if (!mp->m_cil_workqueue) goto out_destroy_unwritten; mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", WQ_NON_REENTRANT, 0, mp->m_fsname); if (!mp->m_reclaim_workqueue) goto out_destroy_cil; mp->m_log_workqueue = alloc_workqueue("xfs-log/%s", WQ_NON_REENTRANT, 0, mp->m_fsname); if (!mp->m_log_workqueue) goto out_destroy_reclaim; mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s", WQ_NON_REENTRANT, 0, mp->m_fsname); if (!mp->m_eofblocks_workqueue) goto out_destroy_log; return 0; out_destroy_log: destroy_workqueue(mp->m_log_workqueue); out_destroy_reclaim: destroy_workqueue(mp->m_reclaim_workqueue); out_destroy_cil: destroy_workqueue(mp->m_cil_workqueue); out_destroy_unwritten: destroy_workqueue(mp->m_unwritten_workqueue); out_destroy_data_iodone_queue: destroy_workqueue(mp->m_data_workqueue); out: return -ENOMEM; } STATIC void xfs_destroy_mount_workqueues( struct xfs_mount *mp) { destroy_workqueue(mp->m_eofblocks_workqueue); destroy_workqueue(mp->m_log_workqueue); destroy_workqueue(mp->m_reclaim_workqueue); destroy_workqueue(mp->m_cil_workqueue); destroy_workqueue(mp->m_data_workqueue); destroy_workqueue(mp->m_unwritten_workqueue); } /* * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting * for IO to complete so that we effectively throttle multiple callers to the * rate at which IO is completing. */ void xfs_flush_inodes( struct xfs_mount *mp) { struct super_block *sb = mp->m_super; if (down_read_trylock(&sb->s_umount)) { sync_inodes_sb(sb); up_read(&sb->s_umount); } } /* Catch misguided souls that try to use this interface on XFS */ STATIC struct inode * xfs_fs_alloc_inode( struct super_block *sb) { BUG(); return NULL; } /* * Now that the generic code is guaranteed not to be accessing * the linux inode, we can reclaim the inode. */ STATIC void xfs_fs_destroy_inode( struct inode *inode) { struct xfs_inode *ip = XFS_I(inode); trace_xfs_destroy_inode(ip); XFS_STATS_INC(vn_reclaim); /* bad inode, get out here ASAP */ if (is_bad_inode(inode)) goto out_reclaim; ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); /* * We should never get here with one of the reclaim flags already set. */ ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); /* * We always use background reclaim here because even if the * inode is clean, it still may be under IO and hence we have * to take the flush lock. The background reclaim path handles * this more efficiently than we can here, so simply let background * reclaim tear down all inodes. */ out_reclaim: xfs_inode_set_reclaim_tag(ip); } /* * Slab object creation initialisation for the XFS inode. * This covers only the idempotent fields in the XFS inode; * all other fields need to be initialised on allocation * from the slab. This avoids the need to repeatedly initialise * fields in the xfs inode that left in the initialise state * when freeing the inode. */ STATIC void xfs_fs_inode_init_once( void *inode) { struct xfs_inode *ip = inode; memset(ip, 0, sizeof(struct xfs_inode)); /* vfs inode */ inode_init_once(VFS_I(ip)); /* xfs inode */ atomic_set(&ip->i_pincount, 0); spin_lock_init(&ip->i_flags_lock); mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, "xfsino", ip->i_ino); } STATIC void xfs_fs_evict_inode( struct inode *inode) { xfs_inode_t *ip = XFS_I(inode); ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); trace_xfs_evict_inode(ip); truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); XFS_STATS_INC(vn_rele); XFS_STATS_INC(vn_remove); XFS_STATS_DEC(vn_active); xfs_inactive(ip); } /* * We do an unlocked check for XFS_IDONTCACHE here because we are already * serialised against cache hits here via the inode->i_lock and igrab() in * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be * racing with us, and it avoids needing to grab a spinlock here for every inode * we drop the final reference on. */ STATIC int xfs_fs_drop_inode( struct inode *inode) { struct xfs_inode *ip = XFS_I(inode); return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE); } STATIC void xfs_free_fsname( struct xfs_mount *mp) { kfree(mp->m_fsname); kfree(mp->m_rtname); kfree(mp->m_logname); } STATIC void xfs_fs_put_super( struct super_block *sb) { struct xfs_mount *mp = XFS_M(sb); xfs_filestream_unmount(mp); xfs_unmountfs(mp); xfs_freesb(mp); xfs_icsb_destroy_counters(mp); xfs_destroy_mount_workqueues(mp); xfs_close_devices(mp); xfs_free_fsname(mp); kfree(mp); } STATIC int xfs_fs_sync_fs( struct super_block *sb, int wait) { struct xfs_mount *mp = XFS_M(sb); /* * Doing anything during the async pass would be counterproductive. */ if (!wait) return 0; xfs_log_force(mp, XFS_LOG_SYNC); if (laptop_mode) { /* * The disk must be active because we're syncing. * We schedule log work now (now that the disk is * active) instead of later (when it might not be). */ flush_delayed_work(&mp->m_log->l_work); } return 0; } STATIC int xfs_fs_statfs( struct dentry *dentry, struct kstatfs *statp) { struct xfs_mount *mp = XFS_M(dentry->d_sb); xfs_sb_t *sbp = &mp->m_sb; struct xfs_inode *ip = XFS_I(dentry->d_inode); __uint64_t fakeinos, id; xfs_extlen_t lsize; __int64_t ffree; statp->f_type = XFS_SB_MAGIC; statp->f_namelen = MAXNAMELEN - 1; id = huge_encode_dev(mp->m_ddev_targp->bt_dev); statp->f_fsid.val[0] = (u32)id; statp->f_fsid.val[1] = (u32)(id >> 32); xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); spin_lock(&mp->m_sb_lock); statp->f_bsize = sbp->sb_blocksize; lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; statp->f_blocks = sbp->sb_dblocks - lsize; statp->f_bfree = statp->f_bavail = sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); fakeinos = statp->f_bfree << sbp->sb_inopblog; statp->f_files = MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); if (mp->m_maxicount) statp->f_files = min_t(typeof(statp->f_files), statp->f_files, mp->m_maxicount); /* make sure statp->f_ffree does not underflow */ ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); statp->f_ffree = max_t(__int64_t, ffree, 0); spin_unlock(&mp->m_sb_lock); if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) == (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD)) xfs_qm_statvfs(ip, statp); return 0; } STATIC void xfs_save_resvblks(struct xfs_mount *mp) { __uint64_t resblks = 0; mp->m_resblks_save = mp->m_resblks; xfs_reserve_blocks(mp, &resblks, NULL); } STATIC void xfs_restore_resvblks(struct xfs_mount *mp) { __uint64_t resblks; if (mp->m_resblks_save) { resblks = mp->m_resblks_save; mp->m_resblks_save = 0; } else resblks = xfs_default_resblks(mp); xfs_reserve_blocks(mp, &resblks, NULL); } /* * Trigger writeback of all the dirty metadata in the file system. * * This ensures that the metadata is written to their location on disk rather * than just existing in transactions in the log. This means after a quiesce * there is no log replay required to write the inodes to disk - this is the * primary difference between a sync and a quiesce. * * Note: xfs_log_quiesce() stops background log work - the callers must ensure * it is started again when appropriate. */ void xfs_quiesce_attr( struct xfs_mount *mp) { int error = 0; /* wait for all modifications to complete */ while (atomic_read(&mp->m_active_trans) > 0) delay(100); /* force the log to unpin objects from the now complete transactions */ xfs_log_force(mp, XFS_LOG_SYNC); /* reclaim inodes to do any IO before the freeze completes */ xfs_reclaim_inodes(mp, 0); xfs_reclaim_inodes(mp, SYNC_WAIT); /* Push the superblock and write an unmount record */ error = xfs_log_sbcount(mp); if (error) xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. " "Frozen image may not be consistent."); /* * Just warn here till VFS can correctly support * read-only remount without racing. */ WARN_ON(atomic_read(&mp->m_active_trans) != 0); xfs_log_quiesce(mp); } STATIC int xfs_fs_remount( struct super_block *sb, int *flags, char *options) { struct xfs_mount *mp = XFS_M(sb); substring_t args[MAX_OPT_ARGS]; char *p; int error; sync_filesystem(sb); while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_barrier: mp->m_flags |= XFS_MOUNT_BARRIER; break; case Opt_nobarrier: mp->m_flags &= ~XFS_MOUNT_BARRIER; break; case Opt_inode64: mp->m_maxagi = xfs_set_inode64(mp); break; case Opt_inode32: mp->m_maxagi = xfs_set_inode32(mp); break; default: /* * Logically we would return an error here to prevent * users from believing they might have changed * mount options using remount which can't be changed. * * But unfortunately mount(8) adds all options from * mtab and fstab to the mount arguments in some cases * so we can't blindly reject options, but have to * check for each specified option if it actually * differs from the currently set option and only * reject it if that's the case. * * Until that is implemented we return success for * every remount request, and silently ignore all * options that we can't actually change. */ #if 0 xfs_info(mp, "mount option \"%s\" not supported for remount\n", p); return -EINVAL; #else break; #endif } } /* ro -> rw */ if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { mp->m_flags &= ~XFS_MOUNT_RDONLY; /* * If this is the first remount to writeable state we * might have some superblock changes to update. */ if (mp->m_update_flags) { error = xfs_mount_log_sb(mp, mp->m_update_flags); if (error) { xfs_warn(mp, "failed to write sb changes"); return error; } mp->m_update_flags = 0; } /* * Fill out the reserve pool if it is empty. Use the stashed * value if it is non-zero, otherwise go with the default. */ xfs_restore_resvblks(mp); xfs_log_work_queue(mp); } /* rw -> ro */ if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { /* * Before we sync the metadata, we need to free up the reserve * block pool so that the used block count in the superblock on * disk is correct at the end of the remount. Stash the current * reserve pool size so that if we get remounted rw, we can * return it to the same size. */ xfs_save_resvblks(mp); xfs_quiesce_attr(mp); mp->m_flags |= XFS_MOUNT_RDONLY; } return 0; } /* * Second stage of a freeze. The data is already frozen so we only * need to take care of the metadata. Once that's done write a dummy * record to dirty the log in case of a crash while frozen. */ STATIC int xfs_fs_freeze( struct super_block *sb) { struct xfs_mount *mp = XFS_M(sb); xfs_save_resvblks(mp); xfs_quiesce_attr(mp); return -xfs_fs_log_dummy(mp); } STATIC int xfs_fs_unfreeze( struct super_block *sb) { struct xfs_mount *mp = XFS_M(sb); xfs_restore_resvblks(mp); xfs_log_work_queue(mp); return 0; } STATIC int xfs_fs_show_options( struct seq_file *m, struct dentry *root) { return -xfs_showargs(XFS_M(root->d_sb), m); } /* * This function fills in xfs_mount_t fields based on mount args. * Note: the superblock _has_ now been read in. */ STATIC int xfs_finish_flags( struct xfs_mount *mp) { int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); /* Fail a mount where the logbuf is smaller than the log stripe */ if (xfs_sb_version_haslogv2(&mp->m_sb)) { if (mp->m_logbsize <= 0 && mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { mp->m_logbsize = mp->m_sb.sb_logsunit; } else if (mp->m_logbsize > 0 && mp->m_logbsize < mp->m_sb.sb_logsunit) { xfs_warn(mp, "logbuf size must be greater than or equal to log stripe size"); return XFS_ERROR(EINVAL); } } else { /* Fail a mount if the logbuf is larger than 32K */ if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { xfs_warn(mp, "logbuf size for version 1 logs must be 16K or 32K"); return XFS_ERROR(EINVAL); } } /* * V5 filesystems always use attr2 format for attributes. */ if (xfs_sb_version_hascrc(&mp->m_sb) && (mp->m_flags & XFS_MOUNT_NOATTR2)) { xfs_warn(mp, "Cannot mount a V5 filesystem as %s. %s is always enabled for V5 filesystems.", MNTOPT_NOATTR2, MNTOPT_ATTR2); return XFS_ERROR(EINVAL); } /* * mkfs'ed attr2 will turn on attr2 mount unless explicitly * told by noattr2 to turn it off */ if (xfs_sb_version_hasattr2(&mp->m_sb) && !(mp->m_flags & XFS_MOUNT_NOATTR2)) mp->m_flags |= XFS_MOUNT_ATTR2; /* * prohibit r/w mounts of read-only filesystems */ if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { xfs_warn(mp, "cannot mount a read-only filesystem as read-write"); return XFS_ERROR(EROFS); } return 0; } STATIC int xfs_fs_fill_super( struct super_block *sb, void *data, int silent) { struct inode *root; struct xfs_mount *mp = NULL; int flags = 0, error = ENOMEM; mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); if (!mp) goto out; spin_lock_init(&mp->m_sb_lock); mutex_init(&mp->m_growlock); atomic_set(&mp->m_active_trans, 0); INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker); mp->m_super = sb; sb->s_fs_info = mp; error = xfs_parseargs(mp, (char *)data); if (error) goto out_free_fsname; sb_min_blocksize(sb, BBSIZE); sb->s_xattr = xfs_xattr_handlers; sb->s_export_op = &xfs_export_operations; #ifdef CONFIG_XFS_QUOTA sb->s_qcop = &xfs_quotactl_operations; #endif sb->s_op = &xfs_super_operations; if (silent) flags |= XFS_MFSI_QUIET; error = xfs_open_devices(mp); if (error) goto out_free_fsname; error = xfs_init_mount_workqueues(mp); if (error) goto out_close_devices; error = xfs_icsb_init_counters(mp); if (error) goto out_destroy_workqueues; error = xfs_readsb(mp, flags); if (error) goto out_destroy_counters; error = xfs_finish_flags(mp); if (error) goto out_free_sb; error = xfs_setup_devices(mp); if (error) goto out_free_sb; error = xfs_filestream_mount(mp); if (error) goto out_free_sb; /* * we must configure the block size in the superblock before we run the * full mount process as the mount process can lookup and cache inodes. */ sb->s_magic = XFS_SB_MAGIC; sb->s_blocksize = mp->m_sb.sb_blocksize; sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); sb->s_max_links = XFS_MAXLINK; sb->s_time_gran = 1; set_posix_acl_flag(sb); error = xfs_mountfs(mp); if (error) goto out_filestream_unmount; root = igrab(VFS_I(mp->m_rootip)); if (!root) { error = ENOENT; goto out_unmount; } if (is_bad_inode(root)) { error = EINVAL; goto out_unmount; } sb->s_root = d_make_root(root); if (!sb->s_root) { error = ENOMEM; goto out_unmount; } return 0; out_filestream_unmount: xfs_filestream_unmount(mp); out_free_sb: xfs_freesb(mp); out_destroy_counters: xfs_icsb_destroy_counters(mp); out_destroy_workqueues: xfs_destroy_mount_workqueues(mp); out_close_devices: xfs_close_devices(mp); out_free_fsname: xfs_free_fsname(mp); kfree(mp); out: return -error; out_unmount: xfs_filestream_unmount(mp); xfs_unmountfs(mp); goto out_free_sb; } STATIC struct dentry * xfs_fs_mount( struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); } static int xfs_fs_nr_cached_objects( struct super_block *sb) { return xfs_reclaim_inodes_count(XFS_M(sb)); } static void xfs_fs_free_cached_objects( struct super_block *sb, int nr_to_scan) { xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan); } static const struct super_operations xfs_super_operations = { .alloc_inode = xfs_fs_alloc_inode, .destroy_inode = xfs_fs_destroy_inode, .evict_inode = xfs_fs_evict_inode, .drop_inode = xfs_fs_drop_inode, .put_super = xfs_fs_put_super, .sync_fs = xfs_fs_sync_fs, .freeze_fs = xfs_fs_freeze, .unfreeze_fs = xfs_fs_unfreeze, .statfs = xfs_fs_statfs, .remount_fs = xfs_fs_remount, .show_options = xfs_fs_show_options, .nr_cached_objects = xfs_fs_nr_cached_objects, .free_cached_objects = xfs_fs_free_cached_objects, }; static struct file_system_type xfs_fs_type = { .owner = THIS_MODULE, .name = "xfs", .mount = xfs_fs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("xfs"); STATIC int __init xfs_init_zones(void) { xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend"); if (!xfs_ioend_zone) goto out; xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE, xfs_ioend_zone); if (!xfs_ioend_pool) goto out_destroy_ioend_zone; xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t), "xfs_log_ticket"); if (!xfs_log_ticket_zone) goto out_destroy_ioend_pool; xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t), "xfs_bmap_free_item"); if (!xfs_bmap_free_item_zone) goto out_destroy_log_ticket_zone; xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), "xfs_btree_cur"); if (!xfs_btree_cur_zone) goto out_destroy_bmap_free_item_zone; xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state"); if (!xfs_da_state_zone) goto out_destroy_btree_cur_zone; xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); if (!xfs_ifork_zone) goto out_destroy_da_state_zone; xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); if (!xfs_trans_zone) goto out_destroy_ifork_zone; xfs_log_item_desc_zone = kmem_zone_init(sizeof(struct xfs_log_item_desc), "xfs_log_item_desc"); if (!xfs_log_item_desc_zone) goto out_destroy_trans_zone; /* * The size of the zone allocated buf log item is the maximum * size possible under XFS. This wastes a little bit of memory, * but it is much faster. */ xfs_buf_item_zone = kmem_zone_init(sizeof(struct xfs_buf_log_item), "xfs_buf_item"); if (!xfs_buf_item_zone) goto out_destroy_log_item_desc_zone; xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + ((XFS_EFD_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), "xfs_efd_item"); if (!xfs_efd_zone) goto out_destroy_buf_item_zone; xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + ((XFS_EFI_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), "xfs_efi_item"); if (!xfs_efi_zone) goto out_destroy_efd_zone; xfs_inode_zone = kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD, xfs_fs_inode_init_once); if (!xfs_inode_zone) goto out_destroy_efi_zone; xfs_ili_zone = kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", KM_ZONE_SPREAD, NULL); if (!xfs_ili_zone) goto out_destroy_inode_zone; return 0; out_destroy_inode_zone: kmem_zone_destroy(xfs_inode_zone); out_destroy_efi_zone: kmem_zone_destroy(xfs_efi_zone); out_destroy_efd_zone: kmem_zone_destroy(xfs_efd_zone); out_destroy_buf_item_zone: kmem_zone_destroy(xfs_buf_item_zone); out_destroy_log_item_desc_zone: kmem_zone_destroy(xfs_log_item_desc_zone); out_destroy_trans_zone: kmem_zone_destroy(xfs_trans_zone); out_destroy_ifork_zone: kmem_zone_destroy(xfs_ifork_zone); out_destroy_da_state_zone: kmem_zone_destroy(xfs_da_state_zone); out_destroy_btree_cur_zone: kmem_zone_destroy(xfs_btree_cur_zone); out_destroy_bmap_free_item_zone: kmem_zone_destroy(xfs_bmap_free_item_zone); out_destroy_log_ticket_zone: kmem_zone_destroy(xfs_log_ticket_zone); out_destroy_ioend_pool: mempool_destroy(xfs_ioend_pool); out_destroy_ioend_zone: kmem_zone_destroy(xfs_ioend_zone); out: return -ENOMEM; } STATIC void xfs_destroy_zones(void) { /* * Make sure all delayed rcu free are flushed before we * destroy caches. */ rcu_barrier(); kmem_zone_destroy(xfs_ili_zone); kmem_zone_destroy(xfs_inode_zone); kmem_zone_destroy(xfs_efi_zone); kmem_zone_destroy(xfs_efd_zone); kmem_zone_destroy(xfs_buf_item_zone); kmem_zone_destroy(xfs_log_item_desc_zone); kmem_zone_destroy(xfs_trans_zone); kmem_zone_destroy(xfs_ifork_zone); kmem_zone_destroy(xfs_da_state_zone); kmem_zone_destroy(xfs_btree_cur_zone); kmem_zone_destroy(xfs_bmap_free_item_zone); kmem_zone_destroy(xfs_log_ticket_zone); mempool_destroy(xfs_ioend_pool); kmem_zone_destroy(xfs_ioend_zone); } STATIC int __init xfs_init_workqueues(void) { /* * The allocation workqueue can be used in memory reclaim situations * (writepage path), and parallelism is only limited by the number of * AGs in all the filesystems mounted. Hence use the default large * max_active value for this workqueue. */ xfs_alloc_wq = alloc_workqueue("xfsalloc", WQ_MEM_RECLAIM, 0); if (!xfs_alloc_wq) return -ENOMEM; return 0; } STATIC void xfs_destroy_workqueues(void) { destroy_workqueue(xfs_alloc_wq); } STATIC int __init init_xfs_fs(void) { int error; printk(KERN_INFO XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n"); xfs_dir_startup(); error = xfs_init_zones(); if (error) goto out; error = xfs_init_workqueues(); if (error) goto out_destroy_zones; error = xfs_mru_cache_init(); if (error) goto out_destroy_wq; error = xfs_filestream_init(); if (error) goto out_mru_cache_uninit; error = xfs_buf_init(); if (error) goto out_filestream_uninit; error = xfs_init_procfs(); if (error) goto out_buf_terminate; error = xfs_sysctl_register(); if (error) goto out_cleanup_procfs; error = xfs_qm_init(); if (error) goto out_sysctl_unregister; error = register_filesystem(&xfs_fs_type); if (error) goto out_qm_exit; return 0; out_qm_exit: xfs_qm_exit(); out_sysctl_unregister: xfs_sysctl_unregister(); out_cleanup_procfs: xfs_cleanup_procfs(); out_buf_terminate: xfs_buf_terminate(); out_filestream_uninit: xfs_filestream_uninit(); out_mru_cache_uninit: xfs_mru_cache_uninit(); out_destroy_wq: xfs_destroy_workqueues(); out_destroy_zones: xfs_destroy_zones(); out: return error; } STATIC void __exit exit_xfs_fs(void) { xfs_qm_exit(); unregister_filesystem(&xfs_fs_type); xfs_sysctl_unregister(); xfs_cleanup_procfs(); xfs_buf_terminate(); xfs_filestream_uninit(); xfs_mru_cache_uninit(); xfs_destroy_workqueues(); xfs_destroy_zones(); } module_init(init_xfs_fs); module_exit(exit_xfs_fs); MODULE_AUTHOR("Silicon Graphics, Inc."); MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); MODULE_LICENSE("GPL");
gpl-2.0
TV-LP51-Devices/hells-Core-N6
drivers/staging/android/ion/compat_ion.c
371
4795
/* * drivers/gpu/ion/compat_ion.c * * Copyright (C) 2013 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/compat.h> #include <linux/fs.h> #include <linux/uaccess.h> #include "ion.h" #include "compat_ion.h" /* See drivers/staging/android/uapi/ion.h for the definition of these structs */ struct compat_ion_allocation_data { compat_size_t len; compat_size_t align; compat_uint_t heap_mask; compat_uint_t flags; compat_int_t handle; }; struct compat_ion_custom_data { compat_uint_t cmd; compat_ulong_t arg; }; struct compat_ion_handle_data { compat_int_t handle; }; #define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ struct compat_ion_allocation_data) #define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \ struct compat_ion_handle_data) #define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \ struct compat_ion_custom_data) static int compat_get_ion_allocation_data( struct compat_ion_allocation_data __user *data32, struct ion_allocation_data __user *data) { compat_size_t s; compat_uint_t u; compat_int_t i; int err; err = get_user(s, &data32->len); err |= put_user(s, &data->len); err |= get_user(s, &data32->align); err |= put_user(s, &data->align); err |= get_user(u, &data32->heap_mask); err |= put_user(u, &data->heap_mask); err |= get_user(u, &data32->flags); err |= put_user(u, &data->flags); err |= get_user(i, &data32->handle); err |= put_user(i, &data->handle); return err; } static int compat_get_ion_handle_data( struct compat_ion_handle_data __user *data32, struct ion_handle_data __user *data) { compat_int_t i; int err; err = get_user(i, &data32->handle); err |= put_user(i, &data->handle); return err; } static int compat_put_ion_allocation_data( struct compat_ion_allocation_data __user *data32, struct ion_allocation_data __user *data) { compat_size_t s; compat_uint_t u; compat_int_t i; int err; err = get_user(s, &data->len); err |= put_user(s, &data32->len); err |= get_user(s, &data->align); err |= put_user(s, &data32->align); err |= get_user(u, &data->heap_mask); err |= put_user(u, &data32->heap_mask); err |= get_user(u, &data->flags); err |= put_user(u, &data32->flags); err |= get_user(i, &data->handle); err |= put_user(i, &data32->handle); return err; } static int compat_get_ion_custom_data( struct compat_ion_custom_data __user *data32, struct ion_custom_data __user *data) { compat_uint_t cmd; compat_ulong_t arg; int err; err = get_user(cmd, &data32->cmd); err |= put_user(cmd, &data->cmd); err |= get_user(arg, &data32->arg); err |= put_user(arg, &data->arg); return err; }; long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { long ret; if (!filp->f_op || !filp->f_op->unlocked_ioctl) return -ENOTTY; switch (cmd) { case COMPAT_ION_IOC_ALLOC: { struct compat_ion_allocation_data __user *data32; struct ion_allocation_data __user *data; int err; data32 = compat_ptr(arg); data = compat_alloc_user_space(sizeof(*data)); if (data == NULL) return -EFAULT; err = compat_get_ion_allocation_data(data32, data); if (err) return err; ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC, (unsigned long)data); err = compat_put_ion_allocation_data(data32, data); return ret ? ret : err; } case COMPAT_ION_IOC_FREE: { struct compat_ion_handle_data __user *data32; struct ion_handle_data __user *data; int err; data32 = compat_ptr(arg); data = compat_alloc_user_space(sizeof(*data)); if (data == NULL) return -EFAULT; err = compat_get_ion_handle_data(data32, data); if (err) return err; return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE, (unsigned long)data); } case COMPAT_ION_IOC_CUSTOM: { struct compat_ion_custom_data __user *data32; struct ion_custom_data __user *data; int err; data32 = compat_ptr(arg); data = compat_alloc_user_space(sizeof(*data)); if (data == NULL) return -EFAULT; err = compat_get_ion_custom_data(data32, data); if (err) return err; return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM, (unsigned long)data); } case ION_IOC_SHARE: case ION_IOC_MAP: case ION_IOC_IMPORT: case ION_IOC_SYNC: return filp->f_op->unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); default: return -ENOIOCTLCMD; } }
gpl-2.0
hggh/linux
drivers/uwb/driver.c
1651
4142
/* * Ultra Wide Band * Driver initialization, etc * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: docs * * Life cycle: FIXME: explain * * UWB radio controller: * * 1. alloc a uwb_rc, zero it * 2. call uwb_rc_init() on it to set it up + ops (won't do any * kind of allocation) * 3. register (now it is owned by the UWB stack--deregister before * freeing/destroying). * 4. It lives on it's own now (UWB stack handles)--when it * disconnects, call unregister() * 5. free it. * * Make sure you have a reference to the uwb_rc before calling * any of the UWB API functions. * * TODO: * * 1. Locking and life cycle management is crappy still. All entry * points to the UWB HCD API assume you have a reference on the * uwb_rc structure and that it won't go away. They mutex lock it * before doing anything. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/err.h> #include <linux/kdev_t.h> #include <linux/random.h> #include "uwb-internal.h" /* UWB stack attributes (or 'global' constants) */ /** * If a beacon disappears for longer than this, then we consider the * device who was represented by that beacon to be gone. * * ECMA-368[17.2.3, last para] establishes that a device must not * consider a device to be its neighbour if he doesn't receive a beacon * for more than mMaxLostBeacons. mMaxLostBeacons is defined in * ECMA-368[17.16] as 3; because we can get only one beacon per * superframe, that'd be 3 * 65ms = 195 ~ 200 ms. Let's give it time * for jitter and stuff and make it 500 ms. */ unsigned long beacon_timeout_ms = 500; static ssize_t beacon_timeout_ms_show(struct class *class, struct class_attribute *attr, char *buf) { return scnprintf(buf, PAGE_SIZE, "%lu\n", beacon_timeout_ms); } static ssize_t beacon_timeout_ms_store(struct class *class, struct class_attribute *attr, const char *buf, size_t size) { unsigned long bt; ssize_t result; result = sscanf(buf, "%lu", &bt); if (result != 1) return -EINVAL; beacon_timeout_ms = bt; return size; } static struct class_attribute uwb_class_attrs[] = { __ATTR(beacon_timeout_ms, S_IWUSR | S_IRUGO, beacon_timeout_ms_show, beacon_timeout_ms_store), __ATTR_NULL, }; /** Device model classes */ struct class uwb_rc_class = { .name = "uwb_rc", .class_attrs = uwb_class_attrs, }; static int __init uwb_subsys_init(void) { int result = 0; result = uwb_est_create(); if (result < 0) { printk(KERN_ERR "uwb: Can't initialize EST subsystem\n"); goto error_est_init; } result = class_register(&uwb_rc_class); if (result < 0) goto error_uwb_rc_class_register; /* Register the UWB bus */ result = bus_register(&uwb_bus_type); if (result) { pr_err("%s - registering bus driver failed\n", __func__); goto exit_bus; } uwb_dbg_init(); return 0; exit_bus: class_unregister(&uwb_rc_class); error_uwb_rc_class_register: uwb_est_destroy(); error_est_init: return result; } module_init(uwb_subsys_init); static void __exit uwb_subsys_exit(void) { uwb_dbg_exit(); bus_unregister(&uwb_bus_type); class_unregister(&uwb_rc_class); uwb_est_destroy(); return; } module_exit(uwb_subsys_exit); MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); MODULE_DESCRIPTION("Ultra Wide Band core"); MODULE_LICENSE("GPL");
gpl-2.0
heechul/linux
security/selinux/netport.c
1651
6770
/* * Network port table * * SELinux must keep a mapping of network ports to labels/SIDs. This * mapping is maintained as part of the normal policy but a fast cache is * needed to reduce the lookup overhead. * * Author: Paul Moore <paul@paul-moore.com> * * This code is heavily based on the "netif" concept originally developed by * James Morris <jmorris@redhat.com> * (see security/selinux/netif.c for more information) * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2008 * * This program is free software: you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/ip.h> #include <net/ipv6.h> #include "netport.h" #include "objsec.h" #define SEL_NETPORT_HASH_SIZE 256 #define SEL_NETPORT_HASH_BKT_LIMIT 16 struct sel_netport_bkt { int size; struct list_head list; }; struct sel_netport { struct netport_security_struct psec; struct list_head list; struct rcu_head rcu; }; /* NOTE: we are using a combined hash table for both IPv4 and IPv6, the reason * for this is that I suspect most users will not make heavy use of both * address families at the same time so one table will usually end up wasted, * if this becomes a problem we can always add a hash table for each address * family later */ static LIST_HEAD(sel_netport_list); static DEFINE_SPINLOCK(sel_netport_lock); static struct sel_netport_bkt sel_netport_hash[SEL_NETPORT_HASH_SIZE]; /** * sel_netport_hashfn - Hashing function for the port table * @pnum: port number * * Description: * This is the hashing function for the port table, it returns the bucket * number for the given port. * */ static unsigned int sel_netport_hashfn(u16 pnum) { return (pnum & (SEL_NETPORT_HASH_SIZE - 1)); } /** * sel_netport_find - Search for a port record * @protocol: protocol * @port: pnum * * Description: * Search the network port table and return the matching record. If an entry * can not be found in the table return NULL. * */ static struct sel_netport *sel_netport_find(u8 protocol, u16 pnum) { unsigned int idx; struct sel_netport *port; idx = sel_netport_hashfn(pnum); list_for_each_entry_rcu(port, &sel_netport_hash[idx].list, list) if (port->psec.port == pnum && port->psec.protocol == protocol) return port; return NULL; } /** * sel_netport_insert - Insert a new port into the table * @port: the new port record * * Description: * Add a new port record to the network address hash table. * */ static void sel_netport_insert(struct sel_netport *port) { unsigned int idx; /* we need to impose a limit on the growth of the hash table so check * this bucket to make sure it is within the specified bounds */ idx = sel_netport_hashfn(port->psec.port); list_add_rcu(&port->list, &sel_netport_hash[idx].list); if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) { struct sel_netport *tail; tail = list_entry( rcu_dereference_protected( sel_netport_hash[idx].list.prev, lockdep_is_held(&sel_netport_lock)), struct sel_netport, list); list_del_rcu(&tail->list); kfree_rcu(tail, rcu); } else sel_netport_hash[idx].size++; } /** * sel_netport_sid_slow - Lookup the SID of a network address using the policy * @protocol: protocol * @pnum: port * @sid: port SID * * Description: * This function determines the SID of a network port by quering the security * policy. The result is added to the network port table to speedup future * queries. Returns zero on success, negative values on failure. * */ static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid) { int ret = -ENOMEM; struct sel_netport *port; struct sel_netport *new = NULL; spin_lock_bh(&sel_netport_lock); port = sel_netport_find(protocol, pnum); if (port != NULL) { *sid = port->psec.sid; spin_unlock_bh(&sel_netport_lock); return 0; } new = kzalloc(sizeof(*new), GFP_ATOMIC); if (new == NULL) goto out; ret = security_port_sid(protocol, pnum, sid); if (ret != 0) goto out; new->psec.port = pnum; new->psec.protocol = protocol; new->psec.sid = *sid; sel_netport_insert(new); out: spin_unlock_bh(&sel_netport_lock); if (unlikely(ret)) { printk(KERN_WARNING "SELinux: failure in sel_netport_sid_slow()," " unable to determine network port label\n"); kfree(new); } return ret; } /** * sel_netport_sid - Lookup the SID of a network port * @protocol: protocol * @pnum: port * @sid: port SID * * Description: * This function determines the SID of a network port using the fastest method * possible. First the port table is queried, but if an entry can't be found * then the policy is queried and the result is added to the table to speedup * future queries. Returns zero on success, negative values on failure. * */ int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid) { struct sel_netport *port; rcu_read_lock(); port = sel_netport_find(protocol, pnum); if (port != NULL) { *sid = port->psec.sid; rcu_read_unlock(); return 0; } rcu_read_unlock(); return sel_netport_sid_slow(protocol, pnum, sid); } /** * sel_netport_flush - Flush the entire network port table * * Description: * Remove all entries from the network address table. * */ static void sel_netport_flush(void) { unsigned int idx; struct sel_netport *port, *port_tmp; spin_lock_bh(&sel_netport_lock); for (idx = 0; idx < SEL_NETPORT_HASH_SIZE; idx++) { list_for_each_entry_safe(port, port_tmp, &sel_netport_hash[idx].list, list) { list_del_rcu(&port->list); kfree_rcu(port, rcu); } sel_netport_hash[idx].size = 0; } spin_unlock_bh(&sel_netport_lock); } static int sel_netport_avc_callback(u32 event) { if (event == AVC_CALLBACK_RESET) { sel_netport_flush(); synchronize_net(); } return 0; } static __init int sel_netport_init(void) { int iter; int ret; if (!selinux_enabled) return 0; for (iter = 0; iter < SEL_NETPORT_HASH_SIZE; iter++) { INIT_LIST_HEAD(&sel_netport_hash[iter].list); sel_netport_hash[iter].size = 0; } ret = avc_add_callback(sel_netport_avc_callback, AVC_CALLBACK_RESET); if (ret != 0) panic("avc_add_callback() failed, error %d\n", ret); return ret; } __initcall(sel_netport_init);
gpl-2.0
getitnowmarketing/iconia-ics
drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
3187
4761
/****************************************************************************** * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-io.h" #include "iwl-helpers.h" #include "iwl-4965-hw.h" #include "iwl-4965.h" #include "iwl-4965-calib.h" #define IWL_AC_UNSET -1 /** * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, * using sample data 100 bytes apart. If these sample points are good, * it's a pretty good bet that everything between them is good, too. */ static int iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) { u32 val; int ret = 0; u32 errcnt = 0; u32 i; IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { /* read data comes through single port, auto-incr addr */ /* NOTE: Use the debugless read so we don't flood kernel log * if IWL_DL_IO is set */ iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, i + IWL4965_RTC_INST_LOWER_BOUND); val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); if (val != le32_to_cpu(*image)) { ret = -EIO; errcnt++; if (errcnt >= 3) break; } } return ret; } /** * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host, * looking at all data. */ static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len) { u32 val; u32 save_len = len; int ret = 0; u32 errcnt; IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, IWL4965_RTC_INST_LOWER_BOUND); errcnt = 0; for (; len > 0; len -= sizeof(u32), image++) { /* read data comes through single port, auto-incr addr */ /* NOTE: Use the debugless read so we don't flood kernel log * if IWL_DL_IO is set */ val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); if (val != le32_to_cpu(*image)) { IWL_ERR(priv, "uCode INST section is invalid at " "offset 0x%x, is 0x%x, s/b 0x%x\n", save_len - len, val, le32_to_cpu(*image)); ret = -EIO; errcnt++; if (errcnt >= 20) break; } } if (!errcnt) IWL_DEBUG_INFO(priv, "ucode image in INSTRUCTION memory is good\n"); return ret; } /** * iwl4965_verify_ucode - determine which instruction image is in SRAM, * and verify its contents */ int iwl4965_verify_ucode(struct iwl_priv *priv) { __le32 *image; u32 len; int ret; /* Try bootstrap */ image = (__le32 *)priv->ucode_boot.v_addr; len = priv->ucode_boot.len; ret = iwl4965_verify_inst_sparse(priv, image, len); if (!ret) { IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); return 0; } /* Try initialize */ image = (__le32 *)priv->ucode_init.v_addr; len = priv->ucode_init.len; ret = iwl4965_verify_inst_sparse(priv, image, len); if (!ret) { IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); return 0; } /* Try runtime/protocol */ image = (__le32 *)priv->ucode_code.v_addr; len = priv->ucode_code.len; ret = iwl4965_verify_inst_sparse(priv, image, len); if (!ret) { IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); return 0; } IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); /* Since nothing seems to match, show first several data entries in * instruction SRAM, so maybe visual inspection will give a clue. * Selection of bootstrap image (vs. other images) is arbitrary. */ image = (__le32 *)priv->ucode_boot.v_addr; len = priv->ucode_boot.len; ret = iwl4965_verify_inst_full(priv, image, len); return ret; }
gpl-2.0
fosser2/android-tegra-nv-3.1.10-rel-15r7
drivers/media/video/videobuf2-vmalloc.c
3187
2941
/* * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2 * * Copyright (C) 2010 Samsung Electronics * * Author: Pawel Osciak <pawel@osciak.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <media/videobuf2-core.h> #include <media/videobuf2-memops.h> struct vb2_vmalloc_buf { void *vaddr; unsigned long size; atomic_t refcount; struct vb2_vmarea_handler handler; }; static void vb2_vmalloc_put(void *buf_priv); static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size) { struct vb2_vmalloc_buf *buf; buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; buf->size = size; buf->vaddr = vmalloc_user(buf->size); buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_vmalloc_put; buf->handler.arg = buf; if (!buf->vaddr) { printk(KERN_ERR "vmalloc of size %ld failed\n", buf->size); kfree(buf); return NULL; } atomic_inc(&buf->refcount); printk(KERN_DEBUG "Allocated vmalloc buffer of size %ld at vaddr=%p\n", buf->size, buf->vaddr); return buf; } static void vb2_vmalloc_put(void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; if (atomic_dec_and_test(&buf->refcount)) { printk(KERN_DEBUG "%s: Freeing vmalloc mem at vaddr=%p\n", __func__, buf->vaddr); vfree(buf->vaddr); kfree(buf); } } static void *vb2_vmalloc_vaddr(void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; BUG_ON(!buf); if (!buf->vaddr) { printk(KERN_ERR "Address of an unallocated plane requested\n"); return NULL; } return buf->vaddr; } static unsigned int vb2_vmalloc_num_users(void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; return atomic_read(&buf->refcount); } static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma) { struct vb2_vmalloc_buf *buf = buf_priv; int ret; if (!buf) { printk(KERN_ERR "No memory to map\n"); return -EINVAL; } ret = remap_vmalloc_range(vma, buf->vaddr, 0); if (ret) { printk(KERN_ERR "Remapping vmalloc memory, error: %d\n", ret); return ret; } /* * Make sure that vm_areas for 2 buffers won't be merged together */ vma->vm_flags |= VM_DONTEXPAND; /* * Use common vm_area operations to track buffer refcount. */ vma->vm_private_data = &buf->handler; vma->vm_ops = &vb2_common_vm_ops; vma->vm_ops->open(vma); return 0; } const struct vb2_mem_ops vb2_vmalloc_memops = { .alloc = vb2_vmalloc_alloc, .put = vb2_vmalloc_put, .vaddr = vb2_vmalloc_vaddr, .mmap = vb2_vmalloc_mmap, .num_users = vb2_vmalloc_num_users, }; EXPORT_SYMBOL_GPL(vb2_vmalloc_memops); MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2"); MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>"); MODULE_LICENSE("GPL");
gpl-2.0
Kuzma30/kernel3NookTablet
fs/jffs2/summary.c
3187
24084
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, * Zoltan Sogor <weth@inf.u-szeged.hu>, * Patrik Kluba <pajko@halom.u-szeged.hu>, * University of Szeged, Hungary * 2006 KaiGai Kohei <kaigai@ak.jp.nec.com> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/pagemap.h> #include <linux/crc32.h> #include <linux/compiler.h> #include <linux/vmalloc.h> #include "nodelist.h" #include "debug.h" int jffs2_sum_init(struct jffs2_sb_info *c) { uint32_t sum_size = min_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE); c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); if (!c->summary) { JFFS2_WARNING("Can't allocate memory for summary information!\n"); return -ENOMEM; } c->summary->sum_buf = kmalloc(sum_size, GFP_KERNEL); if (!c->summary->sum_buf) { JFFS2_WARNING("Can't allocate buffer for writing out summary information!\n"); kfree(c->summary); return -ENOMEM; } dbg_summary("returned successfully\n"); return 0; } void jffs2_sum_exit(struct jffs2_sb_info *c) { dbg_summary("called\n"); jffs2_sum_disable_collecting(c->summary); kfree(c->summary->sum_buf); c->summary->sum_buf = NULL; kfree(c->summary); c->summary = NULL; } static int jffs2_sum_add_mem(struct jffs2_summary *s, union jffs2_sum_mem *item) { if (!s->sum_list_head) s->sum_list_head = (union jffs2_sum_mem *) item; if (s->sum_list_tail) s->sum_list_tail->u.next = (union jffs2_sum_mem *) item; s->sum_list_tail = (union jffs2_sum_mem *) item; switch (je16_to_cpu(item->u.nodetype)) { case JFFS2_NODETYPE_INODE: s->sum_size += JFFS2_SUMMARY_INODE_SIZE; s->sum_num++; dbg_summary("inode (%u) added to summary\n", je32_to_cpu(item->i.inode)); break; case JFFS2_NODETYPE_DIRENT: s->sum_size += JFFS2_SUMMARY_DIRENT_SIZE(item->d.nsize); s->sum_num++; dbg_summary("dirent (%u) added to summary\n", je32_to_cpu(item->d.ino)); break; #ifdef CONFIG_JFFS2_FS_XATTR case JFFS2_NODETYPE_XATTR: s->sum_size += JFFS2_SUMMARY_XATTR_SIZE; s->sum_num++; dbg_summary("xattr (xid=%u, version=%u) added to summary\n", je32_to_cpu(item->x.xid), je32_to_cpu(item->x.version)); break; case JFFS2_NODETYPE_XREF: s->sum_size += JFFS2_SUMMARY_XREF_SIZE; s->sum_num++; dbg_summary("xref added to summary\n"); break; #endif default: JFFS2_WARNING("UNKNOWN node type %u\n", je16_to_cpu(item->u.nodetype)); return 1; } return 0; } /* The following 3 functions are called from scan.c to collect summary info for not closed jeb */ int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size) { dbg_summary("called with %u\n", size); s->sum_padded += size; return 0; } int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs) { struct jffs2_sum_inode_mem *temp = kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL); if (!temp) return -ENOMEM; temp->nodetype = ri->nodetype; temp->inode = ri->ino; temp->version = ri->version; temp->offset = cpu_to_je32(ofs); /* relative offset from the beginning of the jeb */ temp->totlen = ri->totlen; temp->next = NULL; return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); } int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs) { struct jffs2_sum_dirent_mem *temp = kmalloc(sizeof(struct jffs2_sum_dirent_mem) + rd->nsize, GFP_KERNEL); if (!temp) return -ENOMEM; temp->nodetype = rd->nodetype; temp->totlen = rd->totlen; temp->offset = cpu_to_je32(ofs); /* relative from the beginning of the jeb */ temp->pino = rd->pino; temp->version = rd->version; temp->ino = rd->ino; temp->nsize = rd->nsize; temp->type = rd->type; temp->next = NULL; memcpy(temp->name, rd->name, rd->nsize); return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); } #ifdef CONFIG_JFFS2_FS_XATTR int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs) { struct jffs2_sum_xattr_mem *temp; temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL); if (!temp) return -ENOMEM; temp->nodetype = rx->nodetype; temp->xid = rx->xid; temp->version = rx->version; temp->offset = cpu_to_je32(ofs); temp->totlen = rx->totlen; temp->next = NULL; return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); } int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs) { struct jffs2_sum_xref_mem *temp; temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL); if (!temp) return -ENOMEM; temp->nodetype = rr->nodetype; temp->offset = cpu_to_je32(ofs); temp->next = NULL; return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); } #endif /* Cleanup every collected summary information */ static void jffs2_sum_clean_collected(struct jffs2_summary *s) { union jffs2_sum_mem *temp; if (!s->sum_list_head) { dbg_summary("already empty\n"); } while (s->sum_list_head) { temp = s->sum_list_head; s->sum_list_head = s->sum_list_head->u.next; kfree(temp); } s->sum_list_tail = NULL; s->sum_padded = 0; s->sum_num = 0; } void jffs2_sum_reset_collected(struct jffs2_summary *s) { dbg_summary("called\n"); jffs2_sum_clean_collected(s); s->sum_size = 0; } void jffs2_sum_disable_collecting(struct jffs2_summary *s) { dbg_summary("called\n"); jffs2_sum_clean_collected(s); s->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; } int jffs2_sum_is_disabled(struct jffs2_summary *s) { return (s->sum_size == JFFS2_SUMMARY_NOSUM_SIZE); } /* Move the collected summary information into sb (called from scan.c) */ void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s) { dbg_summary("oldsize=0x%x oldnum=%u => newsize=0x%x newnum=%u\n", c->summary->sum_size, c->summary->sum_num, s->sum_size, s->sum_num); c->summary->sum_size = s->sum_size; c->summary->sum_num = s->sum_num; c->summary->sum_padded = s->sum_padded; c->summary->sum_list_head = s->sum_list_head; c->summary->sum_list_tail = s->sum_list_tail; s->sum_list_head = s->sum_list_tail = NULL; } /* Called from wbuf.c to collect writed node info */ int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, uint32_t ofs) { union jffs2_node_union *node; struct jffs2_eraseblock *jeb; if (c->summary->sum_size == JFFS2_SUMMARY_NOSUM_SIZE) { dbg_summary("Summary is disabled for this jeb! Skipping summary info!\n"); return 0; } node = invecs[0].iov_base; jeb = &c->blocks[ofs / c->sector_size]; ofs -= jeb->offset; switch (je16_to_cpu(node->u.nodetype)) { case JFFS2_NODETYPE_INODE: { struct jffs2_sum_inode_mem *temp = kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL); if (!temp) goto no_mem; temp->nodetype = node->i.nodetype; temp->inode = node->i.ino; temp->version = node->i.version; temp->offset = cpu_to_je32(ofs); temp->totlen = node->i.totlen; temp->next = NULL; return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); } case JFFS2_NODETYPE_DIRENT: { struct jffs2_sum_dirent_mem *temp = kmalloc(sizeof(struct jffs2_sum_dirent_mem) + node->d.nsize, GFP_KERNEL); if (!temp) goto no_mem; temp->nodetype = node->d.nodetype; temp->totlen = node->d.totlen; temp->offset = cpu_to_je32(ofs); temp->pino = node->d.pino; temp->version = node->d.version; temp->ino = node->d.ino; temp->nsize = node->d.nsize; temp->type = node->d.type; temp->next = NULL; switch (count) { case 1: memcpy(temp->name,node->d.name,node->d.nsize); break; case 2: memcpy(temp->name,invecs[1].iov_base,node->d.nsize); break; default: BUG(); /* impossible count value */ break; } return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); } #ifdef CONFIG_JFFS2_FS_XATTR case JFFS2_NODETYPE_XATTR: { struct jffs2_sum_xattr_mem *temp; temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL); if (!temp) goto no_mem; temp->nodetype = node->x.nodetype; temp->xid = node->x.xid; temp->version = node->x.version; temp->totlen = node->x.totlen; temp->offset = cpu_to_je32(ofs); temp->next = NULL; return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); } case JFFS2_NODETYPE_XREF: { struct jffs2_sum_xref_mem *temp; temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL); if (!temp) goto no_mem; temp->nodetype = node->r.nodetype; temp->offset = cpu_to_je32(ofs); temp->next = NULL; return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); } #endif case JFFS2_NODETYPE_PADDING: dbg_summary("node PADDING\n"); c->summary->sum_padded += je32_to_cpu(node->u.totlen); break; case JFFS2_NODETYPE_CLEANMARKER: dbg_summary("node CLEANMARKER\n"); break; case JFFS2_NODETYPE_SUMMARY: dbg_summary("node SUMMARY\n"); break; default: /* If you implement a new node type you should also implement summary support for it or disable summary. */ BUG(); break; } return 0; no_mem: JFFS2_WARNING("MEMORY ALLOCATION ERROR!"); return -ENOMEM; } static struct jffs2_raw_node_ref *sum_link_node_ref(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t ofs, uint32_t len, struct jffs2_inode_cache *ic) { /* If there was a gap, mark it dirty */ if ((ofs & ~3) > c->sector_size - jeb->free_size) { /* Ew. Summary doesn't actually tell us explicitly about dirty space */ jffs2_scan_dirty_space(c, jeb, (ofs & ~3) - (c->sector_size - jeb->free_size)); } return jffs2_link_node_ref(c, jeb, jeb->offset + ofs, len, ic); } /* Process the stored summary information - helper function for jffs2_sum_scan_sumnode() */ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_summary *summary, uint32_t *pseudo_random) { struct jffs2_inode_cache *ic; struct jffs2_full_dirent *fd; void *sp; int i, ino; int err; sp = summary->sum; for (i=0; i<je32_to_cpu(summary->sum_num); i++) { dbg_summary("processing summary index %d\n", i); cond_resched(); /* Make sure there's a spare ref for dirty space */ err = jffs2_prealloc_raw_node_refs(c, jeb, 2); if (err) return err; switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) { case JFFS2_NODETYPE_INODE: { struct jffs2_sum_inode_flash *spi; spi = sp; ino = je32_to_cpu(spi->inode); dbg_summary("Inode at 0x%08x-0x%08x\n", jeb->offset + je32_to_cpu(spi->offset), jeb->offset + je32_to_cpu(spi->offset) + je32_to_cpu(spi->totlen)); ic = jffs2_scan_make_ino_cache(c, ino); if (!ic) { JFFS2_NOTICE("scan_make_ino_cache failed\n"); return -ENOMEM; } sum_link_node_ref(c, jeb, je32_to_cpu(spi->offset) | REF_UNCHECKED, PAD(je32_to_cpu(spi->totlen)), ic); *pseudo_random += je32_to_cpu(spi->version); sp += JFFS2_SUMMARY_INODE_SIZE; break; } case JFFS2_NODETYPE_DIRENT: { struct jffs2_sum_dirent_flash *spd; int checkedlen; spd = sp; dbg_summary("Dirent at 0x%08x-0x%08x\n", jeb->offset + je32_to_cpu(spd->offset), jeb->offset + je32_to_cpu(spd->offset) + je32_to_cpu(spd->totlen)); /* This should never happen, but https://dev.laptop.org/ticket/4184 */ checkedlen = strnlen(spd->name, spd->nsize); if (!checkedlen) { printk(KERN_ERR "Dirent at %08x has zero at start of name. Aborting mount.\n", jeb->offset + je32_to_cpu(spd->offset)); return -EIO; } if (checkedlen < spd->nsize) { printk(KERN_ERR "Dirent at %08x has zeroes in name. Truncating to %d chars\n", jeb->offset + je32_to_cpu(spd->offset), checkedlen); } fd = jffs2_alloc_full_dirent(checkedlen+1); if (!fd) return -ENOMEM; memcpy(&fd->name, spd->name, checkedlen); fd->name[checkedlen] = 0; ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino)); if (!ic) { jffs2_free_full_dirent(fd); return -ENOMEM; } fd->raw = sum_link_node_ref(c, jeb, je32_to_cpu(spd->offset) | REF_UNCHECKED, PAD(je32_to_cpu(spd->totlen)), ic); fd->next = NULL; fd->version = je32_to_cpu(spd->version); fd->ino = je32_to_cpu(spd->ino); fd->nhash = full_name_hash(fd->name, checkedlen); fd->type = spd->type; jffs2_add_fd_to_list(c, fd, &ic->scan_dents); *pseudo_random += je32_to_cpu(spd->version); sp += JFFS2_SUMMARY_DIRENT_SIZE(spd->nsize); break; } #ifdef CONFIG_JFFS2_FS_XATTR case JFFS2_NODETYPE_XATTR: { struct jffs2_xattr_datum *xd; struct jffs2_sum_xattr_flash *spx; spx = (struct jffs2_sum_xattr_flash *)sp; dbg_summary("xattr at %#08x-%#08x (xid=%u, version=%u)\n", jeb->offset + je32_to_cpu(spx->offset), jeb->offset + je32_to_cpu(spx->offset) + je32_to_cpu(spx->totlen), je32_to_cpu(spx->xid), je32_to_cpu(spx->version)); xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid), je32_to_cpu(spx->version)); if (IS_ERR(xd)) return PTR_ERR(xd); if (xd->version > je32_to_cpu(spx->version)) { /* node is not the newest one */ struct jffs2_raw_node_ref *raw = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED, PAD(je32_to_cpu(spx->totlen)), NULL); raw->next_in_ino = xd->node->next_in_ino; xd->node->next_in_ino = raw; } else { xd->version = je32_to_cpu(spx->version); sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED, PAD(je32_to_cpu(spx->totlen)), (void *)xd); } *pseudo_random += je32_to_cpu(spx->xid); sp += JFFS2_SUMMARY_XATTR_SIZE; break; } case JFFS2_NODETYPE_XREF: { struct jffs2_xattr_ref *ref; struct jffs2_sum_xref_flash *spr; spr = (struct jffs2_sum_xref_flash *)sp; dbg_summary("xref at %#08x-%#08x\n", jeb->offset + je32_to_cpu(spr->offset), jeb->offset + je32_to_cpu(spr->offset) + (uint32_t)PAD(sizeof(struct jffs2_raw_xref))); ref = jffs2_alloc_xattr_ref(); if (!ref) { JFFS2_NOTICE("allocation of xattr_datum failed\n"); return -ENOMEM; } ref->next = c->xref_temp; c->xref_temp = ref; sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED, PAD(sizeof(struct jffs2_raw_xref)), (void *)ref); *pseudo_random += ref->node->flash_offset; sp += JFFS2_SUMMARY_XREF_SIZE; break; } #endif default : { uint16_t nodetype = je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype); JFFS2_WARNING("Unsupported node type %x found in summary! Exiting...\n", nodetype); if ((nodetype & JFFS2_COMPAT_MASK) == JFFS2_FEATURE_INCOMPAT) return -EIO; /* For compatible node types, just fall back to the full scan */ c->wasted_size -= jeb->wasted_size; c->free_size += c->sector_size - jeb->free_size; c->used_size -= jeb->used_size; c->dirty_size -= jeb->dirty_size; jeb->wasted_size = jeb->used_size = jeb->dirty_size = 0; jeb->free_size = c->sector_size; jffs2_free_jeb_node_refs(c, jeb); return -ENOTRECOVERABLE; } } } return 0; } /* Process the summary node - called from jffs2_scan_eraseblock() */ int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_summary *summary, uint32_t sumsize, uint32_t *pseudo_random) { struct jffs2_unknown_node crcnode; int ret, ofs; uint32_t crc; ofs = c->sector_size - sumsize; dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n", jeb->offset, jeb->offset + ofs, sumsize); /* OK, now check for node validity and CRC */ crcnode.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); crcnode.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY); crcnode.totlen = summary->totlen; crc = crc32(0, &crcnode, sizeof(crcnode)-4); if (je32_to_cpu(summary->hdr_crc) != crc) { dbg_summary("Summary node header is corrupt (bad CRC or " "no summary at all)\n"); goto crc_err; } if (je32_to_cpu(summary->totlen) != sumsize) { dbg_summary("Summary node is corrupt (wrong erasesize?)\n"); goto crc_err; } crc = crc32(0, summary, sizeof(struct jffs2_raw_summary)-8); if (je32_to_cpu(summary->node_crc) != crc) { dbg_summary("Summary node is corrupt (bad CRC)\n"); goto crc_err; } crc = crc32(0, summary->sum, sumsize - sizeof(struct jffs2_raw_summary)); if (je32_to_cpu(summary->sum_crc) != crc) { dbg_summary("Summary node data is corrupt (bad CRC)\n"); goto crc_err; } if ( je32_to_cpu(summary->cln_mkr) ) { dbg_summary("Summary : CLEANMARKER node \n"); ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); if (ret) return ret; if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) { dbg_summary("CLEANMARKER node has totlen 0x%x != normal 0x%x\n", je32_to_cpu(summary->cln_mkr), c->cleanmarker_size); if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr))))) return ret; } else if (jeb->first_node) { dbg_summary("CLEANMARKER node not first node in block " "(0x%08x)\n", jeb->offset); if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr))))) return ret; } else { jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, je32_to_cpu(summary->cln_mkr), NULL); } } ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random); /* -ENOTRECOVERABLE isn't a fatal error -- it means we should do a full scan of this eraseblock. So return zero */ if (ret == -ENOTRECOVERABLE) return 0; if (ret) return ret; /* real error */ /* for PARANOIA_CHECK */ ret = jffs2_prealloc_raw_node_refs(c, jeb, 2); if (ret) return ret; sum_link_node_ref(c, jeb, ofs | REF_NORMAL, sumsize, NULL); if (unlikely(jeb->free_size)) { JFFS2_WARNING("Free size 0x%x bytes in eraseblock @0x%08x with summary?\n", jeb->free_size, jeb->offset); jeb->wasted_size += jeb->free_size; c->wasted_size += jeb->free_size; c->free_size -= jeb->free_size; jeb->free_size = 0; } return jffs2_scan_classify_jeb(c, jeb); crc_err: JFFS2_WARNING("Summary node crc error, skipping summary information.\n"); return 0; } /* Write summary data to flash - helper function for jffs2_sum_write_sumnode() */ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t infosize, uint32_t datasize, int padsize) { struct jffs2_raw_summary isum; union jffs2_sum_mem *temp; struct jffs2_sum_marker *sm; struct kvec vecs[2]; uint32_t sum_ofs; void *wpage; int ret; size_t retlen; if (padsize + datasize > MAX_SUMMARY_SIZE) { /* It won't fit in the buffer. Abort summary for this jeb */ jffs2_sum_disable_collecting(c->summary); JFFS2_WARNING("Summary too big (%d data, %d pad) in eraseblock at %08x\n", datasize, padsize, jeb->offset); /* Non-fatal */ return 0; } /* Is there enough space for summary? */ if (padsize < 0) { /* don't try to write out summary for this jeb */ jffs2_sum_disable_collecting(c->summary); JFFS2_WARNING("Not enough space for summary, padsize = %d\n", padsize); /* Non-fatal */ return 0; } memset(c->summary->sum_buf, 0xff, datasize); memset(&isum, 0, sizeof(isum)); isum.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); isum.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY); isum.totlen = cpu_to_je32(infosize); isum.hdr_crc = cpu_to_je32(crc32(0, &isum, sizeof(struct jffs2_unknown_node) - 4)); isum.padded = cpu_to_je32(c->summary->sum_padded); isum.cln_mkr = cpu_to_je32(c->cleanmarker_size); isum.sum_num = cpu_to_je32(c->summary->sum_num); wpage = c->summary->sum_buf; while (c->summary->sum_num) { temp = c->summary->sum_list_head; switch (je16_to_cpu(temp->u.nodetype)) { case JFFS2_NODETYPE_INODE: { struct jffs2_sum_inode_flash *sino_ptr = wpage; sino_ptr->nodetype = temp->i.nodetype; sino_ptr->inode = temp->i.inode; sino_ptr->version = temp->i.version; sino_ptr->offset = temp->i.offset; sino_ptr->totlen = temp->i.totlen; wpage += JFFS2_SUMMARY_INODE_SIZE; break; } case JFFS2_NODETYPE_DIRENT: { struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage; sdrnt_ptr->nodetype = temp->d.nodetype; sdrnt_ptr->totlen = temp->d.totlen; sdrnt_ptr->offset = temp->d.offset; sdrnt_ptr->pino = temp->d.pino; sdrnt_ptr->version = temp->d.version; sdrnt_ptr->ino = temp->d.ino; sdrnt_ptr->nsize = temp->d.nsize; sdrnt_ptr->type = temp->d.type; memcpy(sdrnt_ptr->name, temp->d.name, temp->d.nsize); wpage += JFFS2_SUMMARY_DIRENT_SIZE(temp->d.nsize); break; } #ifdef CONFIG_JFFS2_FS_XATTR case JFFS2_NODETYPE_XATTR: { struct jffs2_sum_xattr_flash *sxattr_ptr = wpage; temp = c->summary->sum_list_head; sxattr_ptr->nodetype = temp->x.nodetype; sxattr_ptr->xid = temp->x.xid; sxattr_ptr->version = temp->x.version; sxattr_ptr->offset = temp->x.offset; sxattr_ptr->totlen = temp->x.totlen; wpage += JFFS2_SUMMARY_XATTR_SIZE; break; } case JFFS2_NODETYPE_XREF: { struct jffs2_sum_xref_flash *sxref_ptr = wpage; temp = c->summary->sum_list_head; sxref_ptr->nodetype = temp->r.nodetype; sxref_ptr->offset = temp->r.offset; wpage += JFFS2_SUMMARY_XREF_SIZE; break; } #endif default : { if ((je16_to_cpu(temp->u.nodetype) & JFFS2_COMPAT_MASK) == JFFS2_FEATURE_RWCOMPAT_COPY) { dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n", je16_to_cpu(temp->u.nodetype)); jffs2_sum_disable_collecting(c->summary); } else { BUG(); /* unknown node in summary information */ } } } c->summary->sum_list_head = temp->u.next; kfree(temp); c->summary->sum_num--; } jffs2_sum_reset_collected(c->summary); wpage += padsize; sm = wpage; sm->offset = cpu_to_je32(c->sector_size - jeb->free_size); sm->magic = cpu_to_je32(JFFS2_SUM_MAGIC); isum.sum_crc = cpu_to_je32(crc32(0, c->summary->sum_buf, datasize)); isum.node_crc = cpu_to_je32(crc32(0, &isum, sizeof(isum) - 8)); vecs[0].iov_base = &isum; vecs[0].iov_len = sizeof(isum); vecs[1].iov_base = c->summary->sum_buf; vecs[1].iov_len = datasize; sum_ofs = jeb->offset + c->sector_size - jeb->free_size; dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n", sum_ofs); ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0); if (ret || (retlen != infosize)) { JFFS2_WARNING("Write of %u bytes at 0x%08x failed. returned %d, retlen %zd\n", infosize, sum_ofs, ret, retlen); if (retlen) { /* Waste remaining space */ spin_lock(&c->erase_completion_lock); jffs2_link_node_ref(c, jeb, sum_ofs | REF_OBSOLETE, infosize, NULL); spin_unlock(&c->erase_completion_lock); } c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; return 0; } spin_lock(&c->erase_completion_lock); jffs2_link_node_ref(c, jeb, sum_ofs | REF_NORMAL, infosize, NULL); spin_unlock(&c->erase_completion_lock); return 0; } /* Write out summary information - called from jffs2_do_reserve_space */ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c) { int datasize, infosize, padsize; struct jffs2_eraseblock *jeb; int ret = 0; dbg_summary("called\n"); spin_unlock(&c->erase_completion_lock); jeb = c->nextblock; jffs2_prealloc_raw_node_refs(c, jeb, 1); if (!c->summary->sum_num || !c->summary->sum_list_head) { JFFS2_WARNING("Empty summary info!!!\n"); BUG(); } datasize = c->summary->sum_size + sizeof(struct jffs2_sum_marker); infosize = sizeof(struct jffs2_raw_summary) + datasize; padsize = jeb->free_size - infosize; infosize += padsize; datasize += padsize; ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize); spin_lock(&c->erase_completion_lock); return ret; }
gpl-2.0