repo_name
string
path
string
copies
string
size
string
content
string
license
string
Red680812/android_44_KitKat_kernel_htc_dlxpul-1
drivers/usb/gadget/at91_udc.c
3220
52327
/* * at91_udc -- driver for at91-series USB peripheral controller * * Copyright (C) 2004 by Thomas Rathbone * Copyright (C) 2005 by HP Labs * Copyright (C) 2005 by David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #undef VERBOSE_DEBUG #undef PACKET_TRACE #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/prefetch.h> #include <linux/clk.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <asm/byteorder.h> #include <mach/hardware.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/gpio.h> #include <mach/board.h> #include <mach/cpu.h> #include <mach/at91sam9261_matrix.h> #include <mach/at91_matrix.h> #include "at91_udc.h" /* * This controller is simple and PIO-only. It's used in many AT91-series * full speed USB controllers, including the at91rm9200 (arm920T, with MMU), * at91sam926x (arm926ejs, with MMU), and several no-mmu versions. * * This driver expects the board has been wired with two GPIOs suppporting * a VBUS sensing IRQ, and a D+ pullup. (They may be omitted, but the * testing hasn't covered such cases.) * * The pullup is most important (so it's integrated on sam926x parts). It * provides software control over whether the host enumerates the device. * * The VBUS sensing helps during enumeration, and allows both USB clocks * (and the transceiver) to stay gated off until they're necessary, saving * power. During USB suspend, the 48 MHz clock is gated off in hardware; * it may also be gated off by software during some Linux sleep states. */ #define DRIVER_VERSION "3 May 2006" static const char driver_name [] = "at91_udc"; static const char ep0name[] = "ep0"; #define VBUS_POLL_TIMEOUT msecs_to_jiffies(1000) #define at91_udp_read(udc, reg) \ __raw_readl((udc)->udp_baseaddr + (reg)) #define at91_udp_write(udc, reg, val) \ __raw_writel((val), (udc)->udp_baseaddr + (reg)) /*-------------------------------------------------------------------------*/ #ifdef CONFIG_USB_GADGET_DEBUG_FILES #include <linux/seq_file.h> static const char debug_filename[] = "driver/udc"; #define FOURBITS "%s%s%s%s" #define EIGHTBITS FOURBITS FOURBITS static void proc_ep_show(struct seq_file *s, struct at91_ep *ep) { static char *types[] = { "control", "out-iso", "out-bulk", "out-int", "BOGUS", "in-iso", "in-bulk", "in-int"}; u32 csr; struct at91_request *req; unsigned long flags; struct at91_udc *udc = ep->udc; spin_lock_irqsave(&udc->lock, flags); csr = __raw_readl(ep->creg); /* NOTE: not collecting per-endpoint irq statistics... */ seq_printf(s, "\n"); seq_printf(s, "%s, maxpacket %d %s%s %s%s\n", ep->ep.name, ep->ep.maxpacket, ep->is_in ? "in" : "out", ep->is_iso ? " iso" : "", ep->is_pingpong ? (ep->fifo_bank ? "pong" : "ping") : "", ep->stopped ? " stopped" : ""); seq_printf(s, "csr %08x rxbytes=%d %s %s %s" EIGHTBITS "\n", csr, (csr & 0x07ff0000) >> 16, (csr & (1 << 15)) ? "enabled" : "disabled", (csr & (1 << 11)) ? "DATA1" : "DATA0", types[(csr & 0x700) >> 8], /* iff type is control then print current direction */ (!(csr & 0x700)) ? ((csr & (1 << 7)) ? " IN" : " OUT") : "", (csr & (1 << 6)) ? " rxdatabk1" : "", (csr & (1 << 5)) ? " forcestall" : "", (csr & (1 << 4)) ? " txpktrdy" : "", (csr & (1 << 3)) ? " stallsent" : "", (csr & (1 << 2)) ? " rxsetup" : "", (csr & (1 << 1)) ? " rxdatabk0" : "", (csr & (1 << 0)) ? " txcomp" : ""); if (list_empty (&ep->queue)) seq_printf(s, "\t(queue empty)\n"); else list_for_each_entry (req, &ep->queue, queue) { unsigned length = req->req.actual; seq_printf(s, "\treq %p len %d/%d buf %p\n", &req->req, length, req->req.length, req->req.buf); } spin_unlock_irqrestore(&udc->lock, flags); } static void proc_irq_show(struct seq_file *s, const char *label, u32 mask) { int i; seq_printf(s, "%s %04x:%s%s" FOURBITS, label, mask, (mask & (1 << 13)) ? " wakeup" : "", (mask & (1 << 12)) ? " endbusres" : "", (mask & (1 << 11)) ? " sofint" : "", (mask & (1 << 10)) ? " extrsm" : "", (mask & (1 << 9)) ? " rxrsm" : "", (mask & (1 << 8)) ? " rxsusp" : ""); for (i = 0; i < 8; i++) { if (mask & (1 << i)) seq_printf(s, " ep%d", i); } seq_printf(s, "\n"); } static int proc_udc_show(struct seq_file *s, void *unused) { struct at91_udc *udc = s->private; struct at91_ep *ep; u32 tmp; seq_printf(s, "%s: version %s\n", driver_name, DRIVER_VERSION); seq_printf(s, "vbus %s, pullup %s, %s powered%s, gadget %s\n\n", udc->vbus ? "present" : "off", udc->enabled ? (udc->vbus ? "active" : "enabled") : "disabled", udc->selfpowered ? "self" : "VBUS", udc->suspended ? ", suspended" : "", udc->driver ? udc->driver->driver.name : "(none)"); /* don't access registers when interface isn't clocked */ if (!udc->clocked) { seq_printf(s, "(not clocked)\n"); return 0; } tmp = at91_udp_read(udc, AT91_UDP_FRM_NUM); seq_printf(s, "frame %05x:%s%s frame=%d\n", tmp, (tmp & AT91_UDP_FRM_OK) ? " ok" : "", (tmp & AT91_UDP_FRM_ERR) ? " err" : "", (tmp & AT91_UDP_NUM)); tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT); seq_printf(s, "glbstate %02x:%s" FOURBITS "\n", tmp, (tmp & AT91_UDP_RMWUPE) ? " rmwupe" : "", (tmp & AT91_UDP_RSMINPR) ? " rsminpr" : "", (tmp & AT91_UDP_ESR) ? " esr" : "", (tmp & AT91_UDP_CONFG) ? " confg" : "", (tmp & AT91_UDP_FADDEN) ? " fadden" : ""); tmp = at91_udp_read(udc, AT91_UDP_FADDR); seq_printf(s, "faddr %03x:%s fadd=%d\n", tmp, (tmp & AT91_UDP_FEN) ? " fen" : "", (tmp & AT91_UDP_FADD)); proc_irq_show(s, "imr ", at91_udp_read(udc, AT91_UDP_IMR)); proc_irq_show(s, "isr ", at91_udp_read(udc, AT91_UDP_ISR)); if (udc->enabled && udc->vbus) { proc_ep_show(s, &udc->ep[0]); list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) { if (ep->desc) proc_ep_show(s, ep); } } return 0; } static int proc_udc_open(struct inode *inode, struct file *file) { return single_open(file, proc_udc_show, PDE(inode)->data); } static const struct file_operations proc_ops = { .owner = THIS_MODULE, .open = proc_udc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void create_debug_file(struct at91_udc *udc) { udc->pde = proc_create_data(debug_filename, 0, NULL, &proc_ops, udc); } static void remove_debug_file(struct at91_udc *udc) { if (udc->pde) remove_proc_entry(debug_filename, NULL); } #else static inline void create_debug_file(struct at91_udc *udc) {} static inline void remove_debug_file(struct at91_udc *udc) {} #endif /*-------------------------------------------------------------------------*/ static void done(struct at91_ep *ep, struct at91_request *req, int status) { unsigned stopped = ep->stopped; struct at91_udc *udc = ep->udc; list_del_init(&req->queue); if (req->req.status == -EINPROGRESS) req->req.status = status; else status = req->req.status; if (status && status != -ESHUTDOWN) VDBG("%s done %p, status %d\n", ep->ep.name, req, status); ep->stopped = 1; spin_unlock(&udc->lock); req->req.complete(&ep->ep, &req->req); spin_lock(&udc->lock); ep->stopped = stopped; /* ep0 is always ready; other endpoints need a non-empty queue */ if (list_empty(&ep->queue) && ep->int_mask != (1 << 0)) at91_udp_write(udc, AT91_UDP_IDR, ep->int_mask); } /*-------------------------------------------------------------------------*/ /* bits indicating OUT fifo has data ready */ #define RX_DATA_READY (AT91_UDP_RX_DATA_BK0 | AT91_UDP_RX_DATA_BK1) /* * Endpoint FIFO CSR bits have a mix of bits, making it unsafe to just write * back most of the value you just read (because of side effects, including * bits that may change after reading and before writing). * * Except when changing a specific bit, always write values which: * - clear SET_FX bits (setting them could change something) * - set CLR_FX bits (clearing them could change something) * * There are also state bits like FORCESTALL, EPEDS, DIR, and EPTYPE * that shouldn't normally be changed. * * NOTE at91sam9260 docs mention synch between UDPCK and MCK clock domains, * implying a need to wait for one write to complete (test relevant bits) * before starting the next write. This shouldn't be an issue given how * infrequently we write, except maybe for write-then-read idioms. */ #define SET_FX (AT91_UDP_TXPKTRDY) #define CLR_FX (RX_DATA_READY | AT91_UDP_RXSETUP \ | AT91_UDP_STALLSENT | AT91_UDP_TXCOMP) /* pull OUT packet data from the endpoint's fifo */ static int read_fifo (struct at91_ep *ep, struct at91_request *req) { u32 __iomem *creg = ep->creg; u8 __iomem *dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0)); u32 csr; u8 *buf; unsigned int count, bufferspace, is_done; buf = req->req.buf + req->req.actual; bufferspace = req->req.length - req->req.actual; /* * there might be nothing to read if ep_queue() calls us, * or if we already emptied both pingpong buffers */ rescan: csr = __raw_readl(creg); if ((csr & RX_DATA_READY) == 0) return 0; count = (csr & AT91_UDP_RXBYTECNT) >> 16; if (count > ep->ep.maxpacket) count = ep->ep.maxpacket; if (count > bufferspace) { DBG("%s buffer overflow\n", ep->ep.name); req->req.status = -EOVERFLOW; count = bufferspace; } __raw_readsb(dreg, buf, count); /* release and swap pingpong mem bank */ csr |= CLR_FX; if (ep->is_pingpong) { if (ep->fifo_bank == 0) { csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK0); ep->fifo_bank = 1; } else { csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK1); ep->fifo_bank = 0; } } else csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK0); __raw_writel(csr, creg); req->req.actual += count; is_done = (count < ep->ep.maxpacket); if (count == bufferspace) is_done = 1; PACKET("%s %p out/%d%s\n", ep->ep.name, &req->req, count, is_done ? " (done)" : ""); /* * avoid extra trips through IRQ logic for packets already in * the fifo ... maybe preventing an extra (expensive) OUT-NAK */ if (is_done) done(ep, req, 0); else if (ep->is_pingpong) { /* * One dummy read to delay the code because of a HW glitch: * CSR returns bad RXCOUNT when read too soon after updating * RX_DATA_BK flags. */ csr = __raw_readl(creg); bufferspace -= count; buf += count; goto rescan; } return is_done; } /* load fifo for an IN packet */ static int write_fifo(struct at91_ep *ep, struct at91_request *req) { u32 __iomem *creg = ep->creg; u32 csr = __raw_readl(creg); u8 __iomem *dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0)); unsigned total, count, is_last; u8 *buf; /* * TODO: allow for writing two packets to the fifo ... that'll * reduce the amount of IN-NAKing, but probably won't affect * throughput much. (Unlike preventing OUT-NAKing!) */ /* * If ep_queue() calls us, the queue is empty and possibly in * odd states like TXCOMP not yet cleared (we do it, saving at * least one IRQ) or the fifo not yet being free. Those aren't * issues normally (IRQ handler fast path). */ if (unlikely(csr & (AT91_UDP_TXCOMP | AT91_UDP_TXPKTRDY))) { if (csr & AT91_UDP_TXCOMP) { csr |= CLR_FX; csr &= ~(SET_FX | AT91_UDP_TXCOMP); __raw_writel(csr, creg); csr = __raw_readl(creg); } if (csr & AT91_UDP_TXPKTRDY) return 0; } buf = req->req.buf + req->req.actual; prefetch(buf); total = req->req.length - req->req.actual; if (ep->ep.maxpacket < total) { count = ep->ep.maxpacket; is_last = 0; } else { count = total; is_last = (count < ep->ep.maxpacket) || !req->req.zero; } /* * Write the packet, maybe it's a ZLP. * * NOTE: incrementing req->actual before we receive the ACK means * gadget driver IN bytecounts can be wrong in fault cases. That's * fixable with PIO drivers like this one (save "count" here, and * do the increment later on TX irq), but not for most DMA hardware. * * So all gadget drivers must accept that potential error. Some * hardware supports precise fifo status reporting, letting them * recover when the actual bytecount matters (e.g. for USB Test * and Measurement Class devices). */ __raw_writesb(dreg, buf, count); csr &= ~SET_FX; csr |= CLR_FX | AT91_UDP_TXPKTRDY; __raw_writel(csr, creg); req->req.actual += count; PACKET("%s %p in/%d%s\n", ep->ep.name, &req->req, count, is_last ? " (done)" : ""); if (is_last) done(ep, req, 0); return is_last; } static void nuke(struct at91_ep *ep, int status) { struct at91_request *req; /* terminate any request in the queue */ ep->stopped = 1; if (list_empty(&ep->queue)) return; VDBG("%s %s\n", __func__, ep->ep.name); while (!list_empty(&ep->queue)) { req = list_entry(ep->queue.next, struct at91_request, queue); done(ep, req, status); } } /*-------------------------------------------------------------------------*/ static int at91_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct at91_ep *ep = container_of(_ep, struct at91_ep, ep); struct at91_udc *udc = ep->udc; u16 maxpacket; u32 tmp; unsigned long flags; if (!_ep || !ep || !desc || ep->desc || _ep->name == ep0name || desc->bDescriptorType != USB_DT_ENDPOINT || (maxpacket = usb_endpoint_maxp(desc)) == 0 || maxpacket > ep->maxpacket) { DBG("bad ep or descriptor\n"); return -EINVAL; } if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) { DBG("bogus device state\n"); return -ESHUTDOWN; } tmp = usb_endpoint_type(desc); switch (tmp) { case USB_ENDPOINT_XFER_CONTROL: DBG("only one control endpoint\n"); return -EINVAL; case USB_ENDPOINT_XFER_INT: if (maxpacket > 64) goto bogus_max; break; case USB_ENDPOINT_XFER_BULK: switch (maxpacket) { case 8: case 16: case 32: case 64: goto ok; } bogus_max: DBG("bogus maxpacket %d\n", maxpacket); return -EINVAL; case USB_ENDPOINT_XFER_ISOC: if (!ep->is_pingpong) { DBG("iso requires double buffering\n"); return -EINVAL; } break; } ok: spin_lock_irqsave(&udc->lock, flags); /* initialize endpoint to match this descriptor */ ep->is_in = usb_endpoint_dir_in(desc); ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC); ep->stopped = 0; if (ep->is_in) tmp |= 0x04; tmp <<= 8; tmp |= AT91_UDP_EPEDS; __raw_writel(tmp, ep->creg); ep->desc = desc; ep->ep.maxpacket = maxpacket; /* * reset/init endpoint fifo. NOTE: leaves fifo_bank alone, * since endpoint resets don't reset hw pingpong state. */ at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask); at91_udp_write(udc, AT91_UDP_RST_EP, 0); spin_unlock_irqrestore(&udc->lock, flags); return 0; } static int at91_ep_disable (struct usb_ep * _ep) { struct at91_ep *ep = container_of(_ep, struct at91_ep, ep); struct at91_udc *udc = ep->udc; unsigned long flags; if (ep == &ep->udc->ep[0]) return -EINVAL; spin_lock_irqsave(&udc->lock, flags); nuke(ep, -ESHUTDOWN); /* restore the endpoint's pristine config */ ep->desc = NULL; ep->ep.desc = NULL; ep->ep.maxpacket = ep->maxpacket; /* reset fifos and endpoint */ if (ep->udc->clocked) { at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask); at91_udp_write(udc, AT91_UDP_RST_EP, 0); __raw_writel(0, ep->creg); } spin_unlock_irqrestore(&udc->lock, flags); return 0; } /* * this is a PIO-only driver, so there's nothing * interesting for request or buffer allocation. */ static struct usb_request * at91_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { struct at91_request *req; req = kzalloc(sizeof (struct at91_request), gfp_flags); if (!req) return NULL; INIT_LIST_HEAD(&req->queue); return &req->req; } static void at91_ep_free_request(struct usb_ep *_ep, struct usb_request *_req) { struct at91_request *req; req = container_of(_req, struct at91_request, req); BUG_ON(!list_empty(&req->queue)); kfree(req); } static int at91_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct at91_request *req; struct at91_ep *ep; struct at91_udc *udc; int status; unsigned long flags; req = container_of(_req, struct at91_request, req); ep = container_of(_ep, struct at91_ep, ep); if (!_req || !_req->complete || !_req->buf || !list_empty(&req->queue)) { DBG("invalid request\n"); return -EINVAL; } if (!_ep || (!ep->desc && ep->ep.name != ep0name)) { DBG("invalid ep\n"); return -EINVAL; } udc = ep->udc; if (!udc || !udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) { DBG("invalid device\n"); return -EINVAL; } _req->status = -EINPROGRESS; _req->actual = 0; spin_lock_irqsave(&udc->lock, flags); /* try to kickstart any empty and idle queue */ if (list_empty(&ep->queue) && !ep->stopped) { int is_ep0; /* * If this control request has a non-empty DATA stage, this * will start that stage. It works just like a non-control * request (until the status stage starts, maybe early). * * If the data stage is empty, then this starts a successful * IN/STATUS stage. (Unsuccessful ones use set_halt.) */ is_ep0 = (ep->ep.name == ep0name); if (is_ep0) { u32 tmp; if (!udc->req_pending) { status = -EINVAL; goto done; } /* * defer changing CONFG until after the gadget driver * reconfigures the endpoints. */ if (udc->wait_for_config_ack) { tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT); tmp ^= AT91_UDP_CONFG; VDBG("toggle config\n"); at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp); } if (req->req.length == 0) { ep0_in_status: PACKET("ep0 in/status\n"); status = 0; tmp = __raw_readl(ep->creg); tmp &= ~SET_FX; tmp |= CLR_FX | AT91_UDP_TXPKTRDY; __raw_writel(tmp, ep->creg); udc->req_pending = 0; goto done; } } if (ep->is_in) status = write_fifo(ep, req); else { status = read_fifo(ep, req); /* IN/STATUS stage is otherwise triggered by irq */ if (status && is_ep0) goto ep0_in_status; } } else status = 0; if (req && !status) { list_add_tail (&req->queue, &ep->queue); at91_udp_write(udc, AT91_UDP_IER, ep->int_mask); } done: spin_unlock_irqrestore(&udc->lock, flags); return (status < 0) ? status : 0; } static int at91_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct at91_ep *ep; struct at91_request *req; unsigned long flags; struct at91_udc *udc; ep = container_of(_ep, struct at91_ep, ep); if (!_ep || ep->ep.name == ep0name) return -EINVAL; udc = ep->udc; spin_lock_irqsave(&udc->lock, flags); /* make sure it's actually queued on this endpoint */ list_for_each_entry (req, &ep->queue, queue) { if (&req->req == _req) break; } if (&req->req != _req) { spin_unlock_irqrestore(&udc->lock, flags); return -EINVAL; } done(ep, req, -ECONNRESET); spin_unlock_irqrestore(&udc->lock, flags); return 0; } static int at91_ep_set_halt(struct usb_ep *_ep, int value) { struct at91_ep *ep = container_of(_ep, struct at91_ep, ep); struct at91_udc *udc = ep->udc; u32 __iomem *creg; u32 csr; unsigned long flags; int status = 0; if (!_ep || ep->is_iso || !ep->udc->clocked) return -EINVAL; creg = ep->creg; spin_lock_irqsave(&udc->lock, flags); csr = __raw_readl(creg); /* * fail with still-busy IN endpoints, ensuring correct sequencing * of data tx then stall. note that the fifo rx bytecount isn't * completely accurate as a tx bytecount. */ if (ep->is_in && (!list_empty(&ep->queue) || (csr >> 16) != 0)) status = -EAGAIN; else { csr |= CLR_FX; csr &= ~SET_FX; if (value) { csr |= AT91_UDP_FORCESTALL; VDBG("halt %s\n", ep->ep.name); } else { at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask); at91_udp_write(udc, AT91_UDP_RST_EP, 0); csr &= ~AT91_UDP_FORCESTALL; } __raw_writel(csr, creg); } spin_unlock_irqrestore(&udc->lock, flags); return status; } static const struct usb_ep_ops at91_ep_ops = { .enable = at91_ep_enable, .disable = at91_ep_disable, .alloc_request = at91_ep_alloc_request, .free_request = at91_ep_free_request, .queue = at91_ep_queue, .dequeue = at91_ep_dequeue, .set_halt = at91_ep_set_halt, /* there's only imprecise fifo status reporting */ }; /*-------------------------------------------------------------------------*/ static int at91_get_frame(struct usb_gadget *gadget) { struct at91_udc *udc = to_udc(gadget); if (!to_udc(gadget)->clocked) return -EINVAL; return at91_udp_read(udc, AT91_UDP_FRM_NUM) & AT91_UDP_NUM; } static int at91_wakeup(struct usb_gadget *gadget) { struct at91_udc *udc = to_udc(gadget); u32 glbstate; int status = -EINVAL; unsigned long flags; DBG("%s\n", __func__ ); spin_lock_irqsave(&udc->lock, flags); if (!udc->clocked || !udc->suspended) goto done; /* NOTE: some "early versions" handle ESR differently ... */ glbstate = at91_udp_read(udc, AT91_UDP_GLB_STAT); if (!(glbstate & AT91_UDP_ESR)) goto done; glbstate |= AT91_UDP_ESR; at91_udp_write(udc, AT91_UDP_GLB_STAT, glbstate); done: spin_unlock_irqrestore(&udc->lock, flags); return status; } /* reinit == restore initial software state */ static void udc_reinit(struct at91_udc *udc) { u32 i; INIT_LIST_HEAD(&udc->gadget.ep_list); INIT_LIST_HEAD(&udc->gadget.ep0->ep_list); for (i = 0; i < NUM_ENDPOINTS; i++) { struct at91_ep *ep = &udc->ep[i]; if (i != 0) list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); ep->desc = NULL; ep->stopped = 0; ep->fifo_bank = 0; ep->ep.maxpacket = ep->maxpacket; ep->creg = (void __iomem *) udc->udp_baseaddr + AT91_UDP_CSR(i); /* initialize one queue per endpoint */ INIT_LIST_HEAD(&ep->queue); } } static void stop_activity(struct at91_udc *udc) { struct usb_gadget_driver *driver = udc->driver; int i; if (udc->gadget.speed == USB_SPEED_UNKNOWN) driver = NULL; udc->gadget.speed = USB_SPEED_UNKNOWN; udc->suspended = 0; for (i = 0; i < NUM_ENDPOINTS; i++) { struct at91_ep *ep = &udc->ep[i]; ep->stopped = 1; nuke(ep, -ESHUTDOWN); } if (driver) { spin_unlock(&udc->lock); driver->disconnect(&udc->gadget); spin_lock(&udc->lock); } udc_reinit(udc); } static void clk_on(struct at91_udc *udc) { if (udc->clocked) return; udc->clocked = 1; clk_enable(udc->iclk); clk_enable(udc->fclk); } static void clk_off(struct at91_udc *udc) { if (!udc->clocked) return; udc->clocked = 0; udc->gadget.speed = USB_SPEED_UNKNOWN; clk_disable(udc->fclk); clk_disable(udc->iclk); } /* * activate/deactivate link with host; minimize power usage for * inactive links by cutting clocks and transceiver power. */ static void pullup(struct at91_udc *udc, int is_on) { int active = !udc->board.pullup_active_low; if (!udc->enabled || !udc->vbus) is_on = 0; DBG("%sactive\n", is_on ? "" : "in"); if (is_on) { clk_on(udc); at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_RXRSM); at91_udp_write(udc, AT91_UDP_TXVC, 0); if (cpu_is_at91rm9200()) gpio_set_value(udc->board.pullup_pin, active); else if (cpu_is_at91sam9260() || cpu_is_at91sam9263() || cpu_is_at91sam9g20()) { u32 txvc = at91_udp_read(udc, AT91_UDP_TXVC); txvc |= AT91_UDP_TXVC_PUON; at91_udp_write(udc, AT91_UDP_TXVC, txvc); } else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) { u32 usbpucr; usbpucr = at91_matrix_read(AT91_MATRIX_USBPUCR); usbpucr |= AT91_MATRIX_USBPUCR_PUON; at91_matrix_write(AT91_MATRIX_USBPUCR, usbpucr); } } else { stop_activity(udc); at91_udp_write(udc, AT91_UDP_IDR, AT91_UDP_RXRSM); at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS); if (cpu_is_at91rm9200()) gpio_set_value(udc->board.pullup_pin, !active); else if (cpu_is_at91sam9260() || cpu_is_at91sam9263() || cpu_is_at91sam9g20()) { u32 txvc = at91_udp_read(udc, AT91_UDP_TXVC); txvc &= ~AT91_UDP_TXVC_PUON; at91_udp_write(udc, AT91_UDP_TXVC, txvc); } else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) { u32 usbpucr; usbpucr = at91_matrix_read(AT91_MATRIX_USBPUCR); usbpucr &= ~AT91_MATRIX_USBPUCR_PUON; at91_matrix_write(AT91_MATRIX_USBPUCR, usbpucr); } clk_off(udc); } } /* vbus is here! turn everything on that's ready */ static int at91_vbus_session(struct usb_gadget *gadget, int is_active) { struct at91_udc *udc = to_udc(gadget); unsigned long flags; /* VDBG("vbus %s\n", is_active ? "on" : "off"); */ spin_lock_irqsave(&udc->lock, flags); udc->vbus = (is_active != 0); if (udc->driver) pullup(udc, is_active); else pullup(udc, 0); spin_unlock_irqrestore(&udc->lock, flags); return 0; } static int at91_pullup(struct usb_gadget *gadget, int is_on) { struct at91_udc *udc = to_udc(gadget); unsigned long flags; spin_lock_irqsave(&udc->lock, flags); udc->enabled = is_on = !!is_on; pullup(udc, is_on); spin_unlock_irqrestore(&udc->lock, flags); return 0; } static int at91_set_selfpowered(struct usb_gadget *gadget, int is_on) { struct at91_udc *udc = to_udc(gadget); unsigned long flags; spin_lock_irqsave(&udc->lock, flags); udc->selfpowered = (is_on != 0); spin_unlock_irqrestore(&udc->lock, flags); return 0; } static int at91_start(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)); static int at91_stop(struct usb_gadget_driver *driver); static const struct usb_gadget_ops at91_udc_ops = { .get_frame = at91_get_frame, .wakeup = at91_wakeup, .set_selfpowered = at91_set_selfpowered, .vbus_session = at91_vbus_session, .pullup = at91_pullup, .start = at91_start, .stop = at91_stop, /* * VBUS-powered devices may also also want to support bigger * power budgets after an appropriate SET_CONFIGURATION. */ /* .vbus_power = at91_vbus_power, */ }; /*-------------------------------------------------------------------------*/ static int handle_ep(struct at91_ep *ep) { struct at91_request *req; u32 __iomem *creg = ep->creg; u32 csr = __raw_readl(creg); if (!list_empty(&ep->queue)) req = list_entry(ep->queue.next, struct at91_request, queue); else req = NULL; if (ep->is_in) { if (csr & (AT91_UDP_STALLSENT | AT91_UDP_TXCOMP)) { csr |= CLR_FX; csr &= ~(SET_FX | AT91_UDP_STALLSENT | AT91_UDP_TXCOMP); __raw_writel(csr, creg); } if (req) return write_fifo(ep, req); } else { if (csr & AT91_UDP_STALLSENT) { /* STALLSENT bit == ISOERR */ if (ep->is_iso && req) req->req.status = -EILSEQ; csr |= CLR_FX; csr &= ~(SET_FX | AT91_UDP_STALLSENT); __raw_writel(csr, creg); csr = __raw_readl(creg); } if (req && (csr & RX_DATA_READY)) return read_fifo(ep, req); } return 0; } union setup { u8 raw[8]; struct usb_ctrlrequest r; }; static void handle_setup(struct at91_udc *udc, struct at91_ep *ep, u32 csr) { u32 __iomem *creg = ep->creg; u8 __iomem *dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0)); unsigned rxcount, i = 0; u32 tmp; union setup pkt; int status = 0; /* read and ack SETUP; hard-fail for bogus packets */ rxcount = (csr & AT91_UDP_RXBYTECNT) >> 16; if (likely(rxcount == 8)) { while (rxcount--) pkt.raw[i++] = __raw_readb(dreg); if (pkt.r.bRequestType & USB_DIR_IN) { csr |= AT91_UDP_DIR; ep->is_in = 1; } else { csr &= ~AT91_UDP_DIR; ep->is_in = 0; } } else { /* REVISIT this happens sometimes under load; why?? */ ERR("SETUP len %d, csr %08x\n", rxcount, csr); status = -EINVAL; } csr |= CLR_FX; csr &= ~(SET_FX | AT91_UDP_RXSETUP); __raw_writel(csr, creg); udc->wait_for_addr_ack = 0; udc->wait_for_config_ack = 0; ep->stopped = 0; if (unlikely(status != 0)) goto stall; #define w_index le16_to_cpu(pkt.r.wIndex) #define w_value le16_to_cpu(pkt.r.wValue) #define w_length le16_to_cpu(pkt.r.wLength) VDBG("SETUP %02x.%02x v%04x i%04x l%04x\n", pkt.r.bRequestType, pkt.r.bRequest, w_value, w_index, w_length); /* * A few standard requests get handled here, ones that touch * hardware ... notably for device and endpoint features. */ udc->req_pending = 1; csr = __raw_readl(creg); csr |= CLR_FX; csr &= ~SET_FX; switch ((pkt.r.bRequestType << 8) | pkt.r.bRequest) { case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8) | USB_REQ_SET_ADDRESS: __raw_writel(csr | AT91_UDP_TXPKTRDY, creg); udc->addr = w_value; udc->wait_for_addr_ack = 1; udc->req_pending = 0; /* FADDR is set later, when we ack host STATUS */ return; case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8) | USB_REQ_SET_CONFIGURATION: tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT) & AT91_UDP_CONFG; if (pkt.r.wValue) udc->wait_for_config_ack = (tmp == 0); else udc->wait_for_config_ack = (tmp != 0); if (udc->wait_for_config_ack) VDBG("wait for config\n"); /* CONFG is toggled later, if gadget driver succeeds */ break; /* * Hosts may set or clear remote wakeup status, and * devices may report they're VBUS powered. */ case ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8) | USB_REQ_GET_STATUS: tmp = (udc->selfpowered << USB_DEVICE_SELF_POWERED); if (at91_udp_read(udc, AT91_UDP_GLB_STAT) & AT91_UDP_ESR) tmp |= (1 << USB_DEVICE_REMOTE_WAKEUP); PACKET("get device status\n"); __raw_writeb(tmp, dreg); __raw_writeb(0, dreg); goto write_in; /* then STATUS starts later, automatically */ case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8) | USB_REQ_SET_FEATURE: if (w_value != USB_DEVICE_REMOTE_WAKEUP) goto stall; tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT); tmp |= AT91_UDP_ESR; at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp); goto succeed; case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8) | USB_REQ_CLEAR_FEATURE: if (w_value != USB_DEVICE_REMOTE_WAKEUP) goto stall; tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT); tmp &= ~AT91_UDP_ESR; at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp); goto succeed; /* * Interfaces have no feature settings; this is pretty useless. * we won't even insist the interface exists... */ case ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE) << 8) | USB_REQ_GET_STATUS: PACKET("get interface status\n"); __raw_writeb(0, dreg); __raw_writeb(0, dreg); goto write_in; /* then STATUS starts later, automatically */ case ((USB_TYPE_STANDARD|USB_RECIP_INTERFACE) << 8) | USB_REQ_SET_FEATURE: case ((USB_TYPE_STANDARD|USB_RECIP_INTERFACE) << 8) | USB_REQ_CLEAR_FEATURE: goto stall; /* * Hosts may clear bulk/intr endpoint halt after the gadget * driver sets it (not widely used); or set it (for testing) */ case ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT) << 8) | USB_REQ_GET_STATUS: tmp = w_index & USB_ENDPOINT_NUMBER_MASK; ep = &udc->ep[tmp]; if (tmp >= NUM_ENDPOINTS || (tmp && !ep->desc)) goto stall; if (tmp) { if ((w_index & USB_DIR_IN)) { if (!ep->is_in) goto stall; } else if (ep->is_in) goto stall; } PACKET("get %s status\n", ep->ep.name); if (__raw_readl(ep->creg) & AT91_UDP_FORCESTALL) tmp = (1 << USB_ENDPOINT_HALT); else tmp = 0; __raw_writeb(tmp, dreg); __raw_writeb(0, dreg); goto write_in; /* then STATUS starts later, automatically */ case ((USB_TYPE_STANDARD|USB_RECIP_ENDPOINT) << 8) | USB_REQ_SET_FEATURE: tmp = w_index & USB_ENDPOINT_NUMBER_MASK; ep = &udc->ep[tmp]; if (w_value != USB_ENDPOINT_HALT || tmp >= NUM_ENDPOINTS) goto stall; if (!ep->desc || ep->is_iso) goto stall; if ((w_index & USB_DIR_IN)) { if (!ep->is_in) goto stall; } else if (ep->is_in) goto stall; tmp = __raw_readl(ep->creg); tmp &= ~SET_FX; tmp |= CLR_FX | AT91_UDP_FORCESTALL; __raw_writel(tmp, ep->creg); goto succeed; case ((USB_TYPE_STANDARD|USB_RECIP_ENDPOINT) << 8) | USB_REQ_CLEAR_FEATURE: tmp = w_index & USB_ENDPOINT_NUMBER_MASK; ep = &udc->ep[tmp]; if (w_value != USB_ENDPOINT_HALT || tmp >= NUM_ENDPOINTS) goto stall; if (tmp == 0) goto succeed; if (!ep->desc || ep->is_iso) goto stall; if ((w_index & USB_DIR_IN)) { if (!ep->is_in) goto stall; } else if (ep->is_in) goto stall; at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask); at91_udp_write(udc, AT91_UDP_RST_EP, 0); tmp = __raw_readl(ep->creg); tmp |= CLR_FX; tmp &= ~(SET_FX | AT91_UDP_FORCESTALL); __raw_writel(tmp, ep->creg); if (!list_empty(&ep->queue)) handle_ep(ep); goto succeed; } #undef w_value #undef w_index #undef w_length /* pass request up to the gadget driver */ if (udc->driver) { spin_unlock(&udc->lock); status = udc->driver->setup(&udc->gadget, &pkt.r); spin_lock(&udc->lock); } else status = -ENODEV; if (status < 0) { stall: VDBG("req %02x.%02x protocol STALL; stat %d\n", pkt.r.bRequestType, pkt.r.bRequest, status); csr |= AT91_UDP_FORCESTALL; __raw_writel(csr, creg); udc->req_pending = 0; } return; succeed: /* immediate successful (IN) STATUS after zero length DATA */ PACKET("ep0 in/status\n"); write_in: csr |= AT91_UDP_TXPKTRDY; __raw_writel(csr, creg); udc->req_pending = 0; } static void handle_ep0(struct at91_udc *udc) { struct at91_ep *ep0 = &udc->ep[0]; u32 __iomem *creg = ep0->creg; u32 csr = __raw_readl(creg); struct at91_request *req; if (unlikely(csr & AT91_UDP_STALLSENT)) { nuke(ep0, -EPROTO); udc->req_pending = 0; csr |= CLR_FX; csr &= ~(SET_FX | AT91_UDP_STALLSENT | AT91_UDP_FORCESTALL); __raw_writel(csr, creg); VDBG("ep0 stalled\n"); csr = __raw_readl(creg); } if (csr & AT91_UDP_RXSETUP) { nuke(ep0, 0); udc->req_pending = 0; handle_setup(udc, ep0, csr); return; } if (list_empty(&ep0->queue)) req = NULL; else req = list_entry(ep0->queue.next, struct at91_request, queue); /* host ACKed an IN packet that we sent */ if (csr & AT91_UDP_TXCOMP) { csr |= CLR_FX; csr &= ~(SET_FX | AT91_UDP_TXCOMP); /* write more IN DATA? */ if (req && ep0->is_in) { if (handle_ep(ep0)) udc->req_pending = 0; /* * Ack after: * - last IN DATA packet (including GET_STATUS) * - IN/STATUS for OUT DATA * - IN/STATUS for any zero-length DATA stage * except for the IN DATA case, the host should send * an OUT status later, which we'll ack. */ } else { udc->req_pending = 0; __raw_writel(csr, creg); /* * SET_ADDRESS takes effect only after the STATUS * (to the original address) gets acked. */ if (udc->wait_for_addr_ack) { u32 tmp; at91_udp_write(udc, AT91_UDP_FADDR, AT91_UDP_FEN | udc->addr); tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT); tmp &= ~AT91_UDP_FADDEN; if (udc->addr) tmp |= AT91_UDP_FADDEN; at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp); udc->wait_for_addr_ack = 0; VDBG("address %d\n", udc->addr); } } } /* OUT packet arrived ... */ else if (csr & AT91_UDP_RX_DATA_BK0) { csr |= CLR_FX; csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK0); /* OUT DATA stage */ if (!ep0->is_in) { if (req) { if (handle_ep(ep0)) { /* send IN/STATUS */ PACKET("ep0 in/status\n"); csr = __raw_readl(creg); csr &= ~SET_FX; csr |= CLR_FX | AT91_UDP_TXPKTRDY; __raw_writel(csr, creg); udc->req_pending = 0; } } else if (udc->req_pending) { /* * AT91 hardware has a hard time with this * "deferred response" mode for control-OUT * transfers. (For control-IN it's fine.) * * The normal solution leaves OUT data in the * fifo until the gadget driver is ready. * We couldn't do that here without disabling * the IRQ that tells about SETUP packets, * e.g. when the host gets impatient... * * Working around it by copying into a buffer * would almost be a non-deferred response, * except that it wouldn't permit reliable * stalling of the request. Instead, demand * that gadget drivers not use this mode. */ DBG("no control-OUT deferred responses!\n"); __raw_writel(csr | AT91_UDP_FORCESTALL, creg); udc->req_pending = 0; } /* STATUS stage for control-IN; ack. */ } else { PACKET("ep0 out/status ACK\n"); __raw_writel(csr, creg); /* "early" status stage */ if (req) done(ep0, req, 0); } } } static irqreturn_t at91_udc_irq (int irq, void *_udc) { struct at91_udc *udc = _udc; u32 rescans = 5; int disable_clock = 0; unsigned long flags; spin_lock_irqsave(&udc->lock, flags); if (!udc->clocked) { clk_on(udc); disable_clock = 1; } while (rescans--) { u32 status; status = at91_udp_read(udc, AT91_UDP_ISR) & at91_udp_read(udc, AT91_UDP_IMR); if (!status) break; /* USB reset irq: not maskable */ if (status & AT91_UDP_ENDBUSRES) { at91_udp_write(udc, AT91_UDP_IDR, ~MINIMUS_INTERRUPTUS); at91_udp_write(udc, AT91_UDP_IER, MINIMUS_INTERRUPTUS); /* Atmel code clears this irq twice */ at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_ENDBUSRES); at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_ENDBUSRES); VDBG("end bus reset\n"); udc->addr = 0; stop_activity(udc); /* enable ep0 */ at91_udp_write(udc, AT91_UDP_CSR(0), AT91_UDP_EPEDS | AT91_UDP_EPTYPE_CTRL); udc->gadget.speed = USB_SPEED_FULL; udc->suspended = 0; at91_udp_write(udc, AT91_UDP_IER, AT91_UDP_EP(0)); /* * NOTE: this driver keeps clocks off unless the * USB host is present. That saves power, but for * boards that don't support VBUS detection, both * clocks need to be active most of the time. */ /* host initiated suspend (3+ms bus idle) */ } else if (status & AT91_UDP_RXSUSP) { at91_udp_write(udc, AT91_UDP_IDR, AT91_UDP_RXSUSP); at91_udp_write(udc, AT91_UDP_IER, AT91_UDP_RXRSM); at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_RXSUSP); /* VDBG("bus suspend\n"); */ if (udc->suspended) continue; udc->suspended = 1; /* * NOTE: when suspending a VBUS-powered device, the * gadget driver should switch into slow clock mode * and then into standby to avoid drawing more than * 500uA power (2500uA for some high-power configs). */ if (udc->driver && udc->driver->suspend) { spin_unlock(&udc->lock); udc->driver->suspend(&udc->gadget); spin_lock(&udc->lock); } /* host initiated resume */ } else if (status & AT91_UDP_RXRSM) { at91_udp_write(udc, AT91_UDP_IDR, AT91_UDP_RXRSM); at91_udp_write(udc, AT91_UDP_IER, AT91_UDP_RXSUSP); at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_RXRSM); /* VDBG("bus resume\n"); */ if (!udc->suspended) continue; udc->suspended = 0; /* * NOTE: for a VBUS-powered device, the gadget driver * would normally want to switch out of slow clock * mode into normal mode. */ if (udc->driver && udc->driver->resume) { spin_unlock(&udc->lock); udc->driver->resume(&udc->gadget); spin_lock(&udc->lock); } /* endpoint IRQs are cleared by handling them */ } else { int i; unsigned mask = 1; struct at91_ep *ep = &udc->ep[1]; if (status & mask) handle_ep0(udc); for (i = 1; i < NUM_ENDPOINTS; i++) { mask <<= 1; if (status & mask) handle_ep(ep); ep++; } } } if (disable_clock) clk_off(udc); spin_unlock_irqrestore(&udc->lock, flags); return IRQ_HANDLED; } /*-------------------------------------------------------------------------*/ static void nop_release(struct device *dev) { /* nothing to free */ } static struct at91_udc controller = { .gadget = { .ops = &at91_udc_ops, .ep0 = &controller.ep[0].ep, .name = driver_name, .dev = { .init_name = "gadget", .release = nop_release, } }, .ep[0] = { .ep = { .name = ep0name, .ops = &at91_ep_ops, }, .udc = &controller, .maxpacket = 8, .int_mask = 1 << 0, }, .ep[1] = { .ep = { .name = "ep1", .ops = &at91_ep_ops, }, .udc = &controller, .is_pingpong = 1, .maxpacket = 64, .int_mask = 1 << 1, }, .ep[2] = { .ep = { .name = "ep2", .ops = &at91_ep_ops, }, .udc = &controller, .is_pingpong = 1, .maxpacket = 64, .int_mask = 1 << 2, }, .ep[3] = { .ep = { /* could actually do bulk too */ .name = "ep3-int", .ops = &at91_ep_ops, }, .udc = &controller, .maxpacket = 8, .int_mask = 1 << 3, }, .ep[4] = { .ep = { .name = "ep4", .ops = &at91_ep_ops, }, .udc = &controller, .is_pingpong = 1, .maxpacket = 256, .int_mask = 1 << 4, }, .ep[5] = { .ep = { .name = "ep5", .ops = &at91_ep_ops, }, .udc = &controller, .is_pingpong = 1, .maxpacket = 256, .int_mask = 1 << 5, }, /* ep6 and ep7 are also reserved (custom silicon might use them) */ }; static void at91_vbus_update(struct at91_udc *udc, unsigned value) { value ^= udc->board.vbus_active_low; if (value != udc->vbus) at91_vbus_session(&udc->gadget, value); } static irqreturn_t at91_vbus_irq(int irq, void *_udc) { struct at91_udc *udc = _udc; /* vbus needs at least brief debouncing */ udelay(10); at91_vbus_update(udc, gpio_get_value(udc->board.vbus_pin)); return IRQ_HANDLED; } static void at91_vbus_timer_work(struct work_struct *work) { struct at91_udc *udc = container_of(work, struct at91_udc, vbus_timer_work); at91_vbus_update(udc, gpio_get_value_cansleep(udc->board.vbus_pin)); if (!timer_pending(&udc->vbus_timer)) mod_timer(&udc->vbus_timer, jiffies + VBUS_POLL_TIMEOUT); } static void at91_vbus_timer(unsigned long data) { struct at91_udc *udc = (struct at91_udc *)data; /* * If we are polling vbus it is likely that the gpio is on an * bus such as i2c or spi which may sleep, so schedule some work * to read the vbus gpio */ if (!work_pending(&udc->vbus_timer_work)) schedule_work(&udc->vbus_timer_work); } static int at91_start(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)) { struct at91_udc *udc = &controller; int retval; unsigned long flags; if (!driver || driver->max_speed < USB_SPEED_FULL || !bind || !driver->setup) { DBG("bad parameter.\n"); return -EINVAL; } if (udc->driver) { DBG("UDC already has a gadget driver\n"); return -EBUSY; } udc->driver = driver; udc->gadget.dev.driver = &driver->driver; dev_set_drvdata(&udc->gadget.dev, &driver->driver); udc->enabled = 1; udc->selfpowered = 1; retval = bind(&udc->gadget); if (retval) { DBG("bind() returned %d\n", retval); udc->driver = NULL; udc->gadget.dev.driver = NULL; dev_set_drvdata(&udc->gadget.dev, NULL); udc->enabled = 0; udc->selfpowered = 0; return retval; } spin_lock_irqsave(&udc->lock, flags); pullup(udc, 1); spin_unlock_irqrestore(&udc->lock, flags); DBG("bound to %s\n", driver->driver.name); return 0; } static int at91_stop(struct usb_gadget_driver *driver) { struct at91_udc *udc = &controller; unsigned long flags; if (!driver || driver != udc->driver || !driver->unbind) return -EINVAL; spin_lock_irqsave(&udc->lock, flags); udc->enabled = 0; at91_udp_write(udc, AT91_UDP_IDR, ~0); pullup(udc, 0); spin_unlock_irqrestore(&udc->lock, flags); driver->unbind(&udc->gadget); udc->gadget.dev.driver = NULL; dev_set_drvdata(&udc->gadget.dev, NULL); udc->driver = NULL; DBG("unbound from %s\n", driver->driver.name); return 0; } /*-------------------------------------------------------------------------*/ static void at91udc_shutdown(struct platform_device *dev) { struct at91_udc *udc = platform_get_drvdata(dev); unsigned long flags; /* force disconnect on reboot */ spin_lock_irqsave(&udc->lock, flags); pullup(platform_get_drvdata(dev), 0); spin_unlock_irqrestore(&udc->lock, flags); } static void __devinit at91udc_of_init(struct at91_udc *udc, struct device_node *np) { struct at91_udc_data *board = &udc->board; u32 val; enum of_gpio_flags flags; if (of_property_read_u32(np, "atmel,vbus-polled", &val) == 0) board->vbus_polled = 1; board->vbus_pin = of_get_named_gpio_flags(np, "atmel,vbus-gpio", 0, &flags); board->vbus_active_low = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0; board->pullup_pin = of_get_named_gpio_flags(np, "atmel,pullup-gpio", 0, &flags); board->pullup_active_low = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0; } static int __devinit at91udc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct at91_udc *udc; int retval; struct resource *res; if (!dev->platform_data) { /* small (so we copy it) but critical! */ DBG("missing platform_data\n"); return -ENODEV; } if (pdev->num_resources != 2) { DBG("invalid num_resources\n"); return -ENODEV; } if ((pdev->resource[0].flags != IORESOURCE_MEM) || (pdev->resource[1].flags != IORESOURCE_IRQ)) { DBG("invalid resource type\n"); return -ENODEV; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; if (!request_mem_region(res->start, resource_size(res), driver_name)) { DBG("someone's using UDC memory\n"); return -EBUSY; } /* init software state */ udc = &controller; udc->gadget.dev.parent = dev; if (pdev->dev.of_node) at91udc_of_init(udc, pdev->dev.of_node); else memcpy(&udc->board, dev->platform_data, sizeof(struct at91_udc_data)); udc->pdev = pdev; udc->enabled = 0; spin_lock_init(&udc->lock); /* rm9200 needs manual D+ pullup; off by default */ if (cpu_is_at91rm9200()) { if (gpio_is_valid(udc->board.pullup_pin)) { DBG("no D+ pullup?\n"); retval = -ENODEV; goto fail0; } retval = gpio_request(udc->board.pullup_pin, "udc_pullup"); if (retval) { DBG("D+ pullup is busy\n"); goto fail0; } gpio_direction_output(udc->board.pullup_pin, udc->board.pullup_active_low); } /* newer chips have more FIFO memory than rm9200 */ if (cpu_is_at91sam9260() || cpu_is_at91sam9g20()) { udc->ep[0].maxpacket = 64; udc->ep[3].maxpacket = 64; udc->ep[4].maxpacket = 512; udc->ep[5].maxpacket = 512; } else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) { udc->ep[3].maxpacket = 64; } else if (cpu_is_at91sam9263()) { udc->ep[0].maxpacket = 64; udc->ep[3].maxpacket = 64; } udc->udp_baseaddr = ioremap(res->start, resource_size(res)); if (!udc->udp_baseaddr) { retval = -ENOMEM; goto fail0a; } udc_reinit(udc); /* get interface and function clocks */ udc->iclk = clk_get(dev, "udc_clk"); udc->fclk = clk_get(dev, "udpck"); if (IS_ERR(udc->iclk) || IS_ERR(udc->fclk)) { DBG("clocks missing\n"); retval = -ENODEV; /* NOTE: we "know" here that refcounts on these are NOPs */ goto fail0b; } retval = device_register(&udc->gadget.dev); if (retval < 0) { put_device(&udc->gadget.dev); goto fail0b; } /* don't do anything until we have both gadget driver and VBUS */ clk_enable(udc->iclk); at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS); at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff); /* Clear all pending interrupts - UDP may be used by bootloader. */ at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff); clk_disable(udc->iclk); /* request UDC and maybe VBUS irqs */ udc->udp_irq = platform_get_irq(pdev, 0); retval = request_irq(udc->udp_irq, at91_udc_irq, 0, driver_name, udc); if (retval < 0) { DBG("request irq %d failed\n", udc->udp_irq); goto fail1; } if (gpio_is_valid(udc->board.vbus_pin)) { retval = gpio_request(udc->board.vbus_pin, "udc_vbus"); if (retval < 0) { DBG("request vbus pin failed\n"); goto fail2; } gpio_direction_input(udc->board.vbus_pin); /* * Get the initial state of VBUS - we cannot expect * a pending interrupt. */ udc->vbus = gpio_get_value_cansleep(udc->board.vbus_pin) ^ udc->board.vbus_active_low; if (udc->board.vbus_polled) { INIT_WORK(&udc->vbus_timer_work, at91_vbus_timer_work); setup_timer(&udc->vbus_timer, at91_vbus_timer, (unsigned long)udc); mod_timer(&udc->vbus_timer, jiffies + VBUS_POLL_TIMEOUT); } else { if (request_irq(gpio_to_irq(udc->board.vbus_pin), at91_vbus_irq, 0, driver_name, udc)) { DBG("request vbus irq %d failed\n", udc->board.vbus_pin); retval = -EBUSY; goto fail3; } } } else { DBG("no VBUS detection, assuming always-on\n"); udc->vbus = 1; } retval = usb_add_gadget_udc(dev, &udc->gadget); if (retval) goto fail4; dev_set_drvdata(dev, udc); device_init_wakeup(dev, 1); create_debug_file(udc); INFO("%s version %s\n", driver_name, DRIVER_VERSION); return 0; fail4: if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled) free_irq(gpio_to_irq(udc->board.vbus_pin), udc); fail3: if (gpio_is_valid(udc->board.vbus_pin)) gpio_free(udc->board.vbus_pin); fail2: free_irq(udc->udp_irq, udc); fail1: device_unregister(&udc->gadget.dev); fail0b: iounmap(udc->udp_baseaddr); fail0a: if (cpu_is_at91rm9200()) gpio_free(udc->board.pullup_pin); fail0: release_mem_region(res->start, resource_size(res)); DBG("%s probe failed, %d\n", driver_name, retval); return retval; } static int __exit at91udc_remove(struct platform_device *pdev) { struct at91_udc *udc = platform_get_drvdata(pdev); struct resource *res; unsigned long flags; DBG("remove\n"); usb_del_gadget_udc(&udc->gadget); if (udc->driver) return -EBUSY; spin_lock_irqsave(&udc->lock, flags); pullup(udc, 0); spin_unlock_irqrestore(&udc->lock, flags); device_init_wakeup(&pdev->dev, 0); remove_debug_file(udc); if (gpio_is_valid(udc->board.vbus_pin)) { free_irq(gpio_to_irq(udc->board.vbus_pin), udc); gpio_free(udc->board.vbus_pin); } free_irq(udc->udp_irq, udc); device_unregister(&udc->gadget.dev); iounmap(udc->udp_baseaddr); if (cpu_is_at91rm9200()) gpio_free(udc->board.pullup_pin); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); clk_put(udc->iclk); clk_put(udc->fclk); return 0; } #ifdef CONFIG_PM static int at91udc_suspend(struct platform_device *pdev, pm_message_t mesg) { struct at91_udc *udc = platform_get_drvdata(pdev); int wake = udc->driver && device_may_wakeup(&pdev->dev); unsigned long flags; /* Unless we can act normally to the host (letting it wake us up * whenever it has work for us) force disconnect. Wakeup requires * PLLB for USB events (signaling for reset, wakeup, or incoming * tokens) and VBUS irqs (on systems which support them). */ if ((!udc->suspended && udc->addr) || !wake || at91_suspend_entering_slow_clock()) { spin_lock_irqsave(&udc->lock, flags); pullup(udc, 0); wake = 0; spin_unlock_irqrestore(&udc->lock, flags); } else enable_irq_wake(udc->udp_irq); udc->active_suspend = wake; if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled && wake) enable_irq_wake(udc->board.vbus_pin); return 0; } static int at91udc_resume(struct platform_device *pdev) { struct at91_udc *udc = platform_get_drvdata(pdev); unsigned long flags; if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled && udc->active_suspend) disable_irq_wake(udc->board.vbus_pin); /* maybe reconnect to host; if so, clocks on */ if (udc->active_suspend) disable_irq_wake(udc->udp_irq); else { spin_lock_irqsave(&udc->lock, flags); pullup(udc, 1); spin_unlock_irqrestore(&udc->lock, flags); } return 0; } #else #define at91udc_suspend NULL #define at91udc_resume NULL #endif #if defined(CONFIG_OF) static const struct of_device_id at91_udc_dt_ids[] = { { .compatible = "atmel,at91rm9200-udc" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, at91_udc_dt_ids); #endif static struct platform_driver at91_udc_driver = { .remove = __exit_p(at91udc_remove), .shutdown = at91udc_shutdown, .suspend = at91udc_suspend, .resume = at91udc_resume, .driver = { .name = (char *) driver_name, .owner = THIS_MODULE, .of_match_table = of_match_ptr(at91_udc_dt_ids), }, }; static int __init udc_init_module(void) { return platform_driver_probe(&at91_udc_driver, at91udc_probe); } module_init(udc_init_module); static void __exit udc_exit_module(void) { platform_driver_unregister(&at91_udc_driver); } module_exit(udc_exit_module); MODULE_DESCRIPTION("AT91 udc driver"); MODULE_AUTHOR("Thomas Rathbone, David Brownell"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:at91_udc");
gpl-2.0
gundal/nobleltehk
drivers/media/pci/mantis/mantis_input.c
3732
3931
/* Mantis PCI bridge driver Copyright (C) Manu Abraham (abraham.manu@gmail.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #if 0 /* Currently unused */ #include <media/rc-core.h> #include <linux/pci.h> #include "dmxdev.h" #include "dvbdev.h" #include "dvb_demux.h" #include "dvb_frontend.h" #include "dvb_net.h" #include "mantis_common.h" #include "mantis_reg.h" #include "mantis_uart.h" #define MODULE_NAME "mantis_core" #define RC_MAP_MANTIS "rc-mantis" static struct rc_map_table mantis_ir_table[] = { { 0x29, KEY_POWER }, { 0x28, KEY_FAVORITES }, { 0x30, KEY_TEXT }, { 0x17, KEY_INFO }, /* Preview */ { 0x23, KEY_EPG }, { 0x3b, KEY_F22 }, /* Record List */ { 0x3c, KEY_1 }, { 0x3e, KEY_2 }, { 0x39, KEY_3 }, { 0x36, KEY_4 }, { 0x22, KEY_5 }, { 0x20, KEY_6 }, { 0x32, KEY_7 }, { 0x26, KEY_8 }, { 0x24, KEY_9 }, { 0x2a, KEY_0 }, { 0x33, KEY_CANCEL }, { 0x2c, KEY_BACK }, { 0x15, KEY_CLEAR }, { 0x3f, KEY_TAB }, { 0x10, KEY_ENTER }, { 0x14, KEY_UP }, { 0x0d, KEY_RIGHT }, { 0x0e, KEY_DOWN }, { 0x11, KEY_LEFT }, { 0x21, KEY_VOLUMEUP }, { 0x35, KEY_VOLUMEDOWN }, { 0x3d, KEY_CHANNELDOWN }, { 0x3a, KEY_CHANNELUP }, { 0x2e, KEY_RECORD }, { 0x2b, KEY_PLAY }, { 0x13, KEY_PAUSE }, { 0x25, KEY_STOP }, { 0x1f, KEY_REWIND }, { 0x2d, KEY_FASTFORWARD }, { 0x1e, KEY_PREVIOUS }, /* Replay |< */ { 0x1d, KEY_NEXT }, /* Skip >| */ { 0x0b, KEY_CAMERA }, /* Capture */ { 0x0f, KEY_LANGUAGE }, /* SAP */ { 0x18, KEY_MODE }, /* PIP */ { 0x12, KEY_ZOOM }, /* Full screen */ { 0x1c, KEY_SUBTITLE }, { 0x2f, KEY_MUTE }, { 0x16, KEY_F20 }, /* L/R */ { 0x38, KEY_F21 }, /* Hibernate */ { 0x37, KEY_SWITCHVIDEOMODE }, /* A/V */ { 0x31, KEY_AGAIN }, /* Recall */ { 0x1a, KEY_KPPLUS }, /* Zoom+ */ { 0x19, KEY_KPMINUS }, /* Zoom- */ { 0x27, KEY_RED }, { 0x0C, KEY_GREEN }, { 0x01, KEY_YELLOW }, { 0x00, KEY_BLUE }, }; static struct rc_map_list ir_mantis_map = { .map = { .scan = mantis_ir_table, .size = ARRAY_SIZE(mantis_ir_table), .rc_type = RC_TYPE_UNKNOWN, .name = RC_MAP_MANTIS, } }; int mantis_input_init(struct mantis_pci *mantis) { struct rc_dev *dev; int err; err = rc_map_register(&ir_mantis_map); if (err) goto out; dev = rc_allocate_device(); if (!dev) { dprintk(MANTIS_ERROR, 1, "Remote device allocation failed"); err = -ENOMEM; goto out_map; } sprintf(mantis->input_name, "Mantis %s IR receiver", mantis->hwconfig->model_name); sprintf(mantis->input_phys, "pci-%s/ir0", pci_name(mantis->pdev)); dev->input_name = mantis->input_name; dev->input_phys = mantis->input_phys; dev->input_id.bustype = BUS_PCI; dev->input_id.vendor = mantis->vendor_id; dev->input_id.product = mantis->device_id; dev->input_id.version = 1; dev->driver_name = MODULE_NAME; dev->map_name = RC_MAP_MANTIS; dev->dev.parent = &mantis->pdev->dev; err = rc_register_device(dev); if (err) { dprintk(MANTIS_ERROR, 1, "IR device registration failed, ret = %d", err); goto out_dev; } mantis->rc = dev; return 0; out_dev: rc_free_device(dev); out_map: rc_map_unregister(&ir_mantis_map); out: return err; } int mantis_init_exit(struct mantis_pci *mantis) { rc_unregister_device(mantis->rc); rc_map_unregister(&ir_mantis_map); return 0; } #endif
gpl-2.0
LonoCloud/coreos-linux
kernel/power/autosleep.c
3732
2649
/* * kernel/power/autosleep.c * * Opportunistic sleep support. * * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl> */ #include <linux/device.h> #include <linux/mutex.h> #include <linux/pm_wakeup.h> #include "power.h" static suspend_state_t autosleep_state; static struct workqueue_struct *autosleep_wq; /* * Note: it is only safe to mutex_lock(&autosleep_lock) if a wakeup_source * is active, otherwise a deadlock with try_to_suspend() is possible. * Alternatively mutex_lock_interruptible() can be used. This will then fail * if an auto_sleep cycle tries to freeze processes. */ static DEFINE_MUTEX(autosleep_lock); static struct wakeup_source *autosleep_ws; static void try_to_suspend(struct work_struct *work) { unsigned int initial_count, final_count; if (!pm_get_wakeup_count(&initial_count, true)) goto out; mutex_lock(&autosleep_lock); if (!pm_save_wakeup_count(initial_count) || system_state != SYSTEM_RUNNING) { mutex_unlock(&autosleep_lock); goto out; } if (autosleep_state == PM_SUSPEND_ON) { mutex_unlock(&autosleep_lock); return; } if (autosleep_state >= PM_SUSPEND_MAX) hibernate(); else pm_suspend(autosleep_state); mutex_unlock(&autosleep_lock); if (!pm_get_wakeup_count(&final_count, false)) goto out; /* * If the wakeup occured for an unknown reason, wait to prevent the * system from trying to suspend and waking up in a tight loop. */ if (final_count == initial_count) schedule_timeout_uninterruptible(HZ / 2); out: queue_up_suspend_work(); } static DECLARE_WORK(suspend_work, try_to_suspend); void queue_up_suspend_work(void) { if (autosleep_state > PM_SUSPEND_ON) queue_work(autosleep_wq, &suspend_work); } suspend_state_t pm_autosleep_state(void) { return autosleep_state; } int pm_autosleep_lock(void) { return mutex_lock_interruptible(&autosleep_lock); } void pm_autosleep_unlock(void) { mutex_unlock(&autosleep_lock); } int pm_autosleep_set_state(suspend_state_t state) { #ifndef CONFIG_HIBERNATION if (state >= PM_SUSPEND_MAX) return -EINVAL; #endif __pm_stay_awake(autosleep_ws); mutex_lock(&autosleep_lock); autosleep_state = state; __pm_relax(autosleep_ws); if (state > PM_SUSPEND_ON) { pm_wakep_autosleep_enabled(true); queue_up_suspend_work(); } else { pm_wakep_autosleep_enabled(false); } mutex_unlock(&autosleep_lock); return 0; } int __init pm_autosleep_init(void) { autosleep_ws = wakeup_source_register("autosleep"); if (!autosleep_ws) return -ENOMEM; autosleep_wq = alloc_ordered_workqueue("autosleep", 0); if (autosleep_wq) return 0; wakeup_source_unregister(autosleep_ws); return -ENOMEM; }
gpl-2.0
Tasssadar/android_kernel_asus_grouper
arch/xtensa/platforms/iss/console.c
3988
7023
/* * arch/xtensa/platforms/iss/console.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001-2005 Tensilica Inc. * Authors Christian Zankel, Joe Taylor */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/console.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/major.h> #include <linux/param.h> #include <linux/seq_file.h> #include <linux/serial.h> #include <linux/serialP.h> #include <asm/uaccess.h> #include <asm/irq.h> #include <platform/simcall.h> #include <linux/tty.h> #include <linux/tty_flip.h> #ifdef SERIAL_INLINE #define _INLINE_ inline #endif #define SERIAL_MAX_NUM_LINES 1 #define SERIAL_TIMER_VALUE (20 * HZ) static struct tty_driver *serial_driver; static struct timer_list serial_timer; static DEFINE_SPINLOCK(timer_lock); int errno; static int __simc (int a, int b, int c, int d, int e, int f) __attribute__((__noinline__)); static int __simc (int a, int b, int c, int d, int e, int f) { int ret; __asm__ __volatile__ ("simcall\n" "mov %0, a2\n" "mov %1, a3\n" : "=a" (ret), "=a" (errno) : : "a2", "a3"); return ret; } static char *serial_version = "0.1"; static char *serial_name = "ISS serial driver"; /* * This routine is called whenever a serial port is opened. It * enables interrupts for a serial port, linking in its async structure into * the IRQ chain. It also performs the serial-specific * initialization for the tty structure. */ static void rs_poll(unsigned long); static int rs_open(struct tty_struct *tty, struct file * filp) { int line = tty->index; if ((line < 0) || (line >= SERIAL_MAX_NUM_LINES)) return -ENODEV; spin_lock(&timer_lock); if (tty->count == 1) { init_timer(&serial_timer); serial_timer.data = (unsigned long) tty; serial_timer.function = rs_poll; mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE); } spin_unlock(&timer_lock); return 0; } /* * ------------------------------------------------------------ * iss_serial_close() * * This routine is called when the serial port gets closed. First, we * wait for the last remaining data to be sent. Then, we unlink its * async structure from the interrupt chain if necessary, and we free * that IRQ if nothing is left in the chain. * ------------------------------------------------------------ */ static void rs_close(struct tty_struct *tty, struct file * filp) { spin_lock(&timer_lock); if (tty->count == 1) del_timer_sync(&serial_timer); spin_unlock(&timer_lock); } static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count) { /* see drivers/char/serialX.c to reference original version */ __simc (SYS_write, 1, (unsigned long)buf, count, 0, 0); return count; } static void rs_poll(unsigned long priv) { struct tty_struct* tty = (struct tty_struct*) priv; struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; int i = 0; unsigned char c; spin_lock(&timer_lock); while (__simc(SYS_select_one, 0, XTISS_SELECT_ONE_READ, (int)&tv,0,0)){ __simc (SYS_read, 0, (unsigned long)&c, 1, 0, 0); tty_insert_flip_char(tty, c, TTY_NORMAL); i++; } if (i) tty_flip_buffer_push(tty); mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE); spin_unlock(&timer_lock); } static int rs_put_char(struct tty_struct *tty, unsigned char ch) { char buf[2]; buf[0] = ch; buf[1] = '\0'; /* Is this NULL necessary? */ __simc (SYS_write, 1, (unsigned long) buf, 1, 0, 0); return 1; } static void rs_flush_chars(struct tty_struct *tty) { } static int rs_write_room(struct tty_struct *tty) { /* Let's say iss can always accept 2K characters.. */ return 2 * 1024; } static int rs_chars_in_buffer(struct tty_struct *tty) { /* the iss doesn't buffer characters */ return 0; } static void rs_hangup(struct tty_struct *tty) { /* Stub, once again.. */ } static void rs_wait_until_sent(struct tty_struct *tty, int timeout) { /* Stub, once again.. */ } static int rs_proc_show(struct seq_file *m, void *v) { seq_printf(m, "serinfo:1.0 driver:%s\n", serial_version); return 0; } static int rs_proc_open(struct inode *inode, struct file *file) { return single_open(file, rs_proc_show, NULL); } static const struct file_operations rs_proc_fops = { .owner = THIS_MODULE, .open = rs_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct tty_operations serial_ops = { .open = rs_open, .close = rs_close, .write = rs_write, .put_char = rs_put_char, .flush_chars = rs_flush_chars, .write_room = rs_write_room, .chars_in_buffer = rs_chars_in_buffer, .hangup = rs_hangup, .wait_until_sent = rs_wait_until_sent, .proc_fops = &rs_proc_fops, }; int __init rs_init(void) { serial_driver = alloc_tty_driver(1); printk ("%s %s\n", serial_name, serial_version); /* Initialize the tty_driver structure */ serial_driver->owner = THIS_MODULE; serial_driver->driver_name = "iss_serial"; serial_driver->name = "ttyS"; serial_driver->major = TTY_MAJOR; serial_driver->minor_start = 64; serial_driver->type = TTY_DRIVER_TYPE_SERIAL; serial_driver->subtype = SERIAL_TYPE_NORMAL; serial_driver->init_termios = tty_std_termios; serial_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; serial_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(serial_driver, &serial_ops); if (tty_register_driver(serial_driver)) panic("Couldn't register serial driver\n"); return 0; } static __exit void rs_exit(void) { int error; if ((error = tty_unregister_driver(serial_driver))) printk("ISS_SERIAL: failed to unregister serial driver (%d)\n", error); put_tty_driver(serial_driver); } /* We use `late_initcall' instead of just `__initcall' as a workaround for * the fact that (1) simcons_tty_init can't be called before tty_init, * (2) tty_init is called via `module_init', (3) if statically linked, * module_init == device_init, and (4) there's no ordering of init lists. * We can do this easily because simcons is always statically linked, but * other tty drivers that depend on tty_init and which must use * `module_init' to declare their init routines are likely to be broken. */ late_initcall(rs_init); #ifdef CONFIG_SERIAL_CONSOLE static void iss_console_write(struct console *co, const char *s, unsigned count) { int len = strlen(s); if (s != 0 && *s != 0) __simc (SYS_write, 1, (unsigned long)s, count < len ? count : len,0,0); } static struct tty_driver* iss_console_device(struct console *c, int *index) { *index = c->index; return serial_driver; } static struct console sercons = { .name = "ttyS", .write = iss_console_write, .device = iss_console_device, .flags = CON_PRINTBUFFER, .index = -1 }; static int __init iss_console_init(void) { register_console(&sercons); return 0; } console_initcall(iss_console_init); #endif /* CONFIG_SERIAL_CONSOLE */
gpl-2.0
zbwu/htc-kernel-ace
drivers/watchdog/wdrtas.c
4244
16951
/* * FIXME: add wdrtas_get_status and wdrtas_get_boot_status as soon as * RTAS calls are available */ /* * RTAS watchdog driver * * (C) Copyright IBM Corp. 2005 * device driver to exploit watchdog RTAS functions * * Authors : Utz Bacher <utz.bacher@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/types.h> #include <linux/watchdog.h> #include <linux/uaccess.h> #include <asm/rtas.h> #define WDRTAS_MAGIC_CHAR 42 #define WDRTAS_SUPPORTED_MASK (WDIOF_SETTIMEOUT | \ WDIOF_MAGICCLOSE) MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); MODULE_DESCRIPTION("RTAS watchdog driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS_MISCDEV(TEMP_MINOR); static int wdrtas_nowayout = WATCHDOG_NOWAYOUT; static atomic_t wdrtas_miscdev_open = ATOMIC_INIT(0); static char wdrtas_expect_close; static int wdrtas_interval; #define WDRTAS_THERMAL_SENSOR 3 static int wdrtas_token_get_sensor_state; #define WDRTAS_SURVEILLANCE_IND 9000 static int wdrtas_token_set_indicator; #define WDRTAS_SP_SPI 28 static int wdrtas_token_get_sp; static int wdrtas_token_event_scan; #define WDRTAS_DEFAULT_INTERVAL 300 #define WDRTAS_LOGBUFFER_LEN 128 static char wdrtas_logbuffer[WDRTAS_LOGBUFFER_LEN]; /*** watchdog access functions */ /** * wdrtas_set_interval - sets the watchdog interval * @interval: new interval * * returns 0 on success, <0 on failures * * wdrtas_set_interval sets the watchdog keepalive interval by calling the * RTAS function set-indicator (surveillance). The unit of interval is * seconds. */ static int wdrtas_set_interval(int interval) { long result; static int print_msg = 10; /* rtas uses minutes */ interval = (interval + 59) / 60; result = rtas_call(wdrtas_token_set_indicator, 3, 1, NULL, WDRTAS_SURVEILLANCE_IND, 0, interval); if (result < 0 && print_msg) { printk(KERN_ERR "wdrtas: setting the watchdog to %i " "timeout failed: %li\n", interval, result); print_msg--; } return result; } #define WDRTAS_SP_SPI_LEN 4 /** * wdrtas_get_interval - returns the current watchdog interval * @fallback_value: value (in seconds) to use, if the RTAS call fails * * returns the interval * * wdrtas_get_interval returns the current watchdog keepalive interval * as reported by the RTAS function ibm,get-system-parameter. The unit * of the return value is seconds. */ static int wdrtas_get_interval(int fallback_value) { long result; char value[WDRTAS_SP_SPI_LEN]; spin_lock(&rtas_data_buf_lock); memset(rtas_data_buf, 0, WDRTAS_SP_SPI_LEN); result = rtas_call(wdrtas_token_get_sp, 3, 1, NULL, WDRTAS_SP_SPI, __pa(rtas_data_buf), WDRTAS_SP_SPI_LEN); memcpy(value, rtas_data_buf, WDRTAS_SP_SPI_LEN); spin_unlock(&rtas_data_buf_lock); if (value[0] != 0 || value[1] != 2 || value[3] != 0 || result < 0) { printk(KERN_WARNING "wdrtas: could not get sp_spi watchdog " "timeout (%li). Continuing\n", result); return fallback_value; } /* rtas uses minutes */ return ((int)value[2]) * 60; } /** * wdrtas_timer_start - starts watchdog * * wdrtas_timer_start starts the watchdog by calling the RTAS function * set-interval (surveillance) */ static void wdrtas_timer_start(void) { wdrtas_set_interval(wdrtas_interval); } /** * wdrtas_timer_stop - stops watchdog * * wdrtas_timer_stop stops the watchdog timer by calling the RTAS function * set-interval (surveillance) */ static void wdrtas_timer_stop(void) { wdrtas_set_interval(0); } /** * wdrtas_log_scanned_event - logs an event we received during keepalive * * wdrtas_log_scanned_event prints a message to the log buffer dumping * the results of the last event-scan call */ static void wdrtas_log_scanned_event(void) { int i; for (i = 0; i < WDRTAS_LOGBUFFER_LEN; i += 16) printk(KERN_INFO "wdrtas: dumping event (line %i/%i), data = " "%02x %02x %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x\n", (i / 16) + 1, (WDRTAS_LOGBUFFER_LEN / 16), wdrtas_logbuffer[i + 0], wdrtas_logbuffer[i + 1], wdrtas_logbuffer[i + 2], wdrtas_logbuffer[i + 3], wdrtas_logbuffer[i + 4], wdrtas_logbuffer[i + 5], wdrtas_logbuffer[i + 6], wdrtas_logbuffer[i + 7], wdrtas_logbuffer[i + 8], wdrtas_logbuffer[i + 9], wdrtas_logbuffer[i + 10], wdrtas_logbuffer[i + 11], wdrtas_logbuffer[i + 12], wdrtas_logbuffer[i + 13], wdrtas_logbuffer[i + 14], wdrtas_logbuffer[i + 15]); } /** * wdrtas_timer_keepalive - resets watchdog timer to keep system alive * * wdrtas_timer_keepalive restarts the watchdog timer by calling the * RTAS function event-scan and repeats these calls as long as there are * events available. All events will be dumped. */ static void wdrtas_timer_keepalive(void) { long result; do { result = rtas_call(wdrtas_token_event_scan, 4, 1, NULL, RTAS_EVENT_SCAN_ALL_EVENTS, 0, (void *)__pa(wdrtas_logbuffer), WDRTAS_LOGBUFFER_LEN); if (result < 0) printk(KERN_ERR "wdrtas: event-scan failed: %li\n", result); if (result == 0) wdrtas_log_scanned_event(); } while (result == 0); } /** * wdrtas_get_temperature - returns current temperature * * returns temperature or <0 on failures * * wdrtas_get_temperature returns the current temperature in Fahrenheit. It * uses the RTAS call get-sensor-state, token 3 to do so */ static int wdrtas_get_temperature(void) { int result; int temperature = 0; result = rtas_get_sensor(WDRTAS_THERMAL_SENSOR, 0, &temperature); if (result < 0) printk(KERN_WARNING "wdrtas: reading the thermal sensor " "failed: %i\n", result); else temperature = ((temperature * 9) / 5) + 32; /* fahrenheit */ return temperature; } /** * wdrtas_get_status - returns the status of the watchdog * * returns a bitmask of defines WDIOF_... as defined in * include/linux/watchdog.h */ static int wdrtas_get_status(void) { return 0; /* TODO */ } /** * wdrtas_get_boot_status - returns the reason for the last boot * * returns a bitmask of defines WDIOF_... as defined in * include/linux/watchdog.h, indicating why the watchdog rebooted the system */ static int wdrtas_get_boot_status(void) { return 0; /* TODO */ } /*** watchdog API and operations stuff */ /* wdrtas_write - called when watchdog device is written to * @file: file structure * @buf: user buffer with data * @len: amount to data written * @ppos: position in file * * returns the number of successfully processed characters, which is always * the number of bytes passed to this function * * wdrtas_write processes all the data given to it and looks for the magic * character 'V'. This character allows the watchdog device to be closed * properly. */ static ssize_t wdrtas_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { int i; char c; if (!len) goto out; if (!wdrtas_nowayout) { wdrtas_expect_close = 0; /* look for 'V' */ for (i = 0; i < len; i++) { if (get_user(c, buf + i)) return -EFAULT; /* allow to close device */ if (c == 'V') wdrtas_expect_close = WDRTAS_MAGIC_CHAR; } } wdrtas_timer_keepalive(); out: return len; } /** * wdrtas_ioctl - ioctl function for the watchdog device * @file: file structure * @cmd: command for ioctl * @arg: argument pointer * * returns 0 on success, <0 on failure * * wdrtas_ioctl implements the watchdog API ioctls */ static long wdrtas_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int __user *argp = (void __user *)arg; int i; static const struct watchdog_info wdinfo = { .options = WDRTAS_SUPPORTED_MASK, .firmware_version = 0, .identity = "wdrtas", }; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user(argp, &wdinfo, sizeof(wdinfo))) return -EFAULT; return 0; case WDIOC_GETSTATUS: i = wdrtas_get_status(); return put_user(i, argp); case WDIOC_GETBOOTSTATUS: i = wdrtas_get_boot_status(); return put_user(i, argp); case WDIOC_GETTEMP: if (wdrtas_token_get_sensor_state == RTAS_UNKNOWN_SERVICE) return -EOPNOTSUPP; i = wdrtas_get_temperature(); return put_user(i, argp); case WDIOC_SETOPTIONS: if (get_user(i, argp)) return -EFAULT; if (i & WDIOS_DISABLECARD) wdrtas_timer_stop(); if (i & WDIOS_ENABLECARD) { wdrtas_timer_keepalive(); wdrtas_timer_start(); } /* not implemented. Done by H8 if (i & WDIOS_TEMPPANIC) { } */ return 0; case WDIOC_KEEPALIVE: wdrtas_timer_keepalive(); return 0; case WDIOC_SETTIMEOUT: if (get_user(i, argp)) return -EFAULT; if (wdrtas_set_interval(i)) return -EINVAL; wdrtas_timer_keepalive(); if (wdrtas_token_get_sp == RTAS_UNKNOWN_SERVICE) wdrtas_interval = i; else wdrtas_interval = wdrtas_get_interval(i); /* fallthrough */ case WDIOC_GETTIMEOUT: return put_user(wdrtas_interval, argp); default: return -ENOTTY; } } /** * wdrtas_open - open function of watchdog device * @inode: inode structure * @file: file structure * * returns 0 on success, -EBUSY if the file has been opened already, <0 on * other failures * * function called when watchdog device is opened */ static int wdrtas_open(struct inode *inode, struct file *file) { /* only open once */ if (atomic_inc_return(&wdrtas_miscdev_open) > 1) { atomic_dec(&wdrtas_miscdev_open); return -EBUSY; } wdrtas_timer_start(); wdrtas_timer_keepalive(); return nonseekable_open(inode, file); } /** * wdrtas_close - close function of watchdog device * @inode: inode structure * @file: file structure * * returns 0 on success * * close function. Always succeeds */ static int wdrtas_close(struct inode *inode, struct file *file) { /* only stop watchdog, if this was announced using 'V' before */ if (wdrtas_expect_close == WDRTAS_MAGIC_CHAR) wdrtas_timer_stop(); else { printk(KERN_WARNING "wdrtas: got unexpected close. Watchdog " "not stopped.\n"); wdrtas_timer_keepalive(); } wdrtas_expect_close = 0; atomic_dec(&wdrtas_miscdev_open); return 0; } /** * wdrtas_temp_read - gives back the temperature in fahrenheit * @file: file structure * @buf: user buffer * @count: number of bytes to be read * @ppos: position in file * * returns always 1 or -EFAULT in case of user space copy failures, <0 on * other failures * * wdrtas_temp_read gives the temperature to the users by copying this * value as one byte into the user space buffer. The unit is Fahrenheit... */ static ssize_t wdrtas_temp_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int temperature = 0; temperature = wdrtas_get_temperature(); if (temperature < 0) return temperature; if (copy_to_user(buf, &temperature, 1)) return -EFAULT; return 1; } /** * wdrtas_temp_open - open function of temperature device * @inode: inode structure * @file: file structure * * returns 0 on success, <0 on failure * * function called when temperature device is opened */ static int wdrtas_temp_open(struct inode *inode, struct file *file) { return nonseekable_open(inode, file); } /** * wdrtas_temp_close - close function of temperature device * @inode: inode structure * @file: file structure * * returns 0 on success * * close function. Always succeeds */ static int wdrtas_temp_close(struct inode *inode, struct file *file) { return 0; } /** * wdrtas_reboot - reboot notifier function * @nb: notifier block structure * @code: reboot code * @ptr: unused * * returns NOTIFY_DONE * * wdrtas_reboot stops the watchdog in case of a reboot */ static int wdrtas_reboot(struct notifier_block *this, unsigned long code, void *ptr) { if (code == SYS_DOWN || code == SYS_HALT) wdrtas_timer_stop(); return NOTIFY_DONE; } /*** initialization stuff */ static const struct file_operations wdrtas_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = wdrtas_write, .unlocked_ioctl = wdrtas_ioctl, .open = wdrtas_open, .release = wdrtas_close, }; static struct miscdevice wdrtas_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wdrtas_fops, }; static const struct file_operations wdrtas_temp_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = wdrtas_temp_read, .open = wdrtas_temp_open, .release = wdrtas_temp_close, }; static struct miscdevice wdrtas_tempdev = { .minor = TEMP_MINOR, .name = "temperature", .fops = &wdrtas_temp_fops, }; static struct notifier_block wdrtas_notifier = { .notifier_call = wdrtas_reboot, }; /** * wdrtas_get_tokens - reads in RTAS tokens * * returns 0 on success, <0 on failure * * wdrtas_get_tokens reads in the tokens for the RTAS calls used in * this watchdog driver. It tolerates, if "get-sensor-state" and * "ibm,get-system-parameter" are not available. */ static int wdrtas_get_tokens(void) { wdrtas_token_get_sensor_state = rtas_token("get-sensor-state"); if (wdrtas_token_get_sensor_state == RTAS_UNKNOWN_SERVICE) { printk(KERN_WARNING "wdrtas: couldn't get token for " "get-sensor-state. Trying to continue without " "temperature support.\n"); } wdrtas_token_get_sp = rtas_token("ibm,get-system-parameter"); if (wdrtas_token_get_sp == RTAS_UNKNOWN_SERVICE) { printk(KERN_WARNING "wdrtas: couldn't get token for " "ibm,get-system-parameter. Trying to continue with " "a default timeout value of %i seconds.\n", WDRTAS_DEFAULT_INTERVAL); } wdrtas_token_set_indicator = rtas_token("set-indicator"); if (wdrtas_token_set_indicator == RTAS_UNKNOWN_SERVICE) { printk(KERN_ERR "wdrtas: couldn't get token for " "set-indicator. Terminating watchdog code.\n"); return -EIO; } wdrtas_token_event_scan = rtas_token("event-scan"); if (wdrtas_token_event_scan == RTAS_UNKNOWN_SERVICE) { printk(KERN_ERR "wdrtas: couldn't get token for event-scan. " "Terminating watchdog code.\n"); return -EIO; } return 0; } /** * wdrtas_unregister_devs - unregisters the misc dev handlers * * wdrtas_register_devs unregisters the watchdog and temperature watchdog * misc devs */ static void wdrtas_unregister_devs(void) { misc_deregister(&wdrtas_miscdev); if (wdrtas_token_get_sensor_state != RTAS_UNKNOWN_SERVICE) misc_deregister(&wdrtas_tempdev); } /** * wdrtas_register_devs - registers the misc dev handlers * * returns 0 on success, <0 on failure * * wdrtas_register_devs registers the watchdog and temperature watchdog * misc devs */ static int wdrtas_register_devs(void) { int result; result = misc_register(&wdrtas_miscdev); if (result) { printk(KERN_ERR "wdrtas: couldn't register watchdog misc " "device. Terminating watchdog code.\n"); return result; } if (wdrtas_token_get_sensor_state != RTAS_UNKNOWN_SERVICE) { result = misc_register(&wdrtas_tempdev); if (result) { printk(KERN_WARNING "wdrtas: couldn't register " "watchdog temperature misc device. Continuing " "without temperature support.\n"); wdrtas_token_get_sensor_state = RTAS_UNKNOWN_SERVICE; } } return 0; } /** * wdrtas_init - init function of the watchdog driver * * returns 0 on success, <0 on failure * * registers the file handlers and the reboot notifier */ static int __init wdrtas_init(void) { if (wdrtas_get_tokens()) return -ENODEV; if (wdrtas_register_devs()) return -ENODEV; if (register_reboot_notifier(&wdrtas_notifier)) { printk(KERN_ERR "wdrtas: could not register reboot notifier. " "Terminating watchdog code.\n"); wdrtas_unregister_devs(); return -ENODEV; } if (wdrtas_token_get_sp == RTAS_UNKNOWN_SERVICE) wdrtas_interval = WDRTAS_DEFAULT_INTERVAL; else wdrtas_interval = wdrtas_get_interval(WDRTAS_DEFAULT_INTERVAL); return 0; } /** * wdrtas_exit - exit function of the watchdog driver * * unregisters the file handlers and the reboot notifier */ static void __exit wdrtas_exit(void) { if (!wdrtas_nowayout) wdrtas_timer_stop(); wdrtas_unregister_devs(); unregister_reboot_notifier(&wdrtas_notifier); } module_init(wdrtas_init); module_exit(wdrtas_exit);
gpl-2.0
CyanideL/android_kernel_htc_msm8974
arch/mips/ath79/mach-pb44.c
4500
2992
/* * Atheros PB44 reference board support * * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <linux/i2c/pcf857x.h> #include "machtypes.h" #include "dev-gpio-buttons.h" #include "dev-leds-gpio.h" #include "dev-spi.h" #include "dev-usb.h" #define PB44_GPIO_I2C_SCL 0 #define PB44_GPIO_I2C_SDA 1 #define PB44_GPIO_EXP_BASE 16 #define PB44_GPIO_SW_RESET (PB44_GPIO_EXP_BASE + 6) #define PB44_GPIO_SW_JUMP (PB44_GPIO_EXP_BASE + 8) #define PB44_GPIO_LED_JUMP1 (PB44_GPIO_EXP_BASE + 9) #define PB44_GPIO_LED_JUMP2 (PB44_GPIO_EXP_BASE + 10) #define PB44_KEYS_POLL_INTERVAL 20 /* msecs */ #define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL) static struct i2c_gpio_platform_data pb44_i2c_gpio_data = { .sda_pin = PB44_GPIO_I2C_SDA, .scl_pin = PB44_GPIO_I2C_SCL, }; static struct platform_device pb44_i2c_gpio_device = { .name = "i2c-gpio", .id = 0, .dev = { .platform_data = &pb44_i2c_gpio_data, } }; static struct pcf857x_platform_data pb44_pcf857x_data = { .gpio_base = PB44_GPIO_EXP_BASE, }; static struct i2c_board_info pb44_i2c_board_info[] __initdata = { { I2C_BOARD_INFO("pcf8575", 0x20), .platform_data = &pb44_pcf857x_data, }, }; static struct gpio_led pb44_leds_gpio[] __initdata = { { .name = "pb44:amber:jump1", .gpio = PB44_GPIO_LED_JUMP1, .active_low = 1, }, { .name = "pb44:green:jump2", .gpio = PB44_GPIO_LED_JUMP2, .active_low = 1, }, }; static struct gpio_keys_button pb44_gpio_keys[] __initdata = { { .desc = "soft_reset", .type = EV_KEY, .code = KEY_RESTART, .debounce_interval = PB44_KEYS_DEBOUNCE_INTERVAL, .gpio = PB44_GPIO_SW_RESET, .active_low = 1, } , { .desc = "jumpstart", .type = EV_KEY, .code = KEY_WPS_BUTTON, .debounce_interval = PB44_KEYS_DEBOUNCE_INTERVAL, .gpio = PB44_GPIO_SW_JUMP, .active_low = 1, } }; static struct spi_board_info pb44_spi_info[] = { { .bus_num = 0, .chip_select = 0, .max_speed_hz = 25000000, .modalias = "m25p64", }, }; static struct ath79_spi_platform_data pb44_spi_data = { .bus_num = 0, .num_chipselect = 1, }; static void __init pb44_init(void) { i2c_register_board_info(0, pb44_i2c_board_info, ARRAY_SIZE(pb44_i2c_board_info)); platform_device_register(&pb44_i2c_gpio_device); ath79_register_leds_gpio(-1, ARRAY_SIZE(pb44_leds_gpio), pb44_leds_gpio); ath79_register_gpio_keys_polled(-1, PB44_KEYS_POLL_INTERVAL, ARRAY_SIZE(pb44_gpio_keys), pb44_gpio_keys); ath79_register_spi(&pb44_spi_data, pb44_spi_info, ARRAY_SIZE(pb44_spi_info)); ath79_register_usb(); } MIPS_MACHINE(ATH79_MACH_PB44, "PB44", "Atheros PB44 reference board", pb44_init);
gpl-2.0
Sudokamikaze/Darkspell-taoshan
arch/arm/mach-omap2/board-generic.c
4756
4020
/* * Copyright (C) 2005 Nokia Corporation * Author: Paul Mundt <paul.mundt@nokia.com> * * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ * * Modified from the original mach-omap/omap2/board-generic.c did by Paul * to support the OMAP2+ device tree boards with an unique board file. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/irqdomain.h> #include <linux/i2c/twl.h> #include <mach/hardware.h> #include <asm/hardware/gic.h> #include <asm/mach/arch.h> #include <plat/board.h> #include "common.h" #include "common-board-devices.h" #if !(defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)) #define omap_intc_of_init NULL #endif #ifndef CONFIG_ARCH_OMAP4 #define gic_of_init NULL #endif static struct of_device_id irq_match[] __initdata = { { .compatible = "ti,omap2-intc", .data = omap_intc_of_init, }, { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, }, { } }; static void __init omap_init_irq(void) { of_irq_init(irq_match); } static struct of_device_id omap_dt_match_table[] __initdata = { { .compatible = "simple-bus", }, { .compatible = "ti,omap-infra", }, { } }; static void __init omap_generic_init(void) { omap_sdrc_init(NULL, NULL); of_platform_populate(NULL, omap_dt_match_table, NULL, NULL); } #ifdef CONFIG_SOC_OMAP2420 static const char *omap242x_boards_compat[] __initdata = { "ti,omap2420", NULL, }; DT_MACHINE_START(OMAP242X_DT, "Generic OMAP2420 (Flattened Device Tree)") .reserve = omap_reserve, .map_io = omap242x_map_io, .init_early = omap2420_init_early, .init_irq = omap_init_irq, .handle_irq = omap2_intc_handle_irq, .init_machine = omap_generic_init, .timer = &omap2_timer, .dt_compat = omap242x_boards_compat, .restart = omap_prcm_restart, MACHINE_END #endif #ifdef CONFIG_SOC_OMAP2430 static const char *omap243x_boards_compat[] __initdata = { "ti,omap2430", NULL, }; DT_MACHINE_START(OMAP243X_DT, "Generic OMAP2430 (Flattened Device Tree)") .reserve = omap_reserve, .map_io = omap243x_map_io, .init_early = omap2430_init_early, .init_irq = omap_init_irq, .handle_irq = omap2_intc_handle_irq, .init_machine = omap_generic_init, .timer = &omap2_timer, .dt_compat = omap243x_boards_compat, .restart = omap_prcm_restart, MACHINE_END #endif #ifdef CONFIG_ARCH_OMAP3 static struct twl4030_platform_data beagle_twldata = { .irq_base = TWL4030_IRQ_BASE, .irq_end = TWL4030_IRQ_END, }; static void __init omap3_i2c_init(void) { omap3_pmic_init("twl4030", &beagle_twldata); } static void __init omap3_init(void) { omap3_i2c_init(); omap_generic_init(); } static const char *omap3_boards_compat[] __initdata = { "ti,omap3", NULL, }; DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)") .reserve = omap_reserve, .map_io = omap3_map_io, .init_early = omap3430_init_early, .init_irq = omap_init_irq, .handle_irq = omap3_intc_handle_irq, .init_machine = omap3_init, .timer = &omap3_timer, .dt_compat = omap3_boards_compat, .restart = omap_prcm_restart, MACHINE_END #endif #ifdef CONFIG_ARCH_OMAP4 static struct twl4030_platform_data sdp4430_twldata = { .irq_base = TWL6030_IRQ_BASE, .irq_end = TWL6030_IRQ_END, }; static void __init omap4_i2c_init(void) { omap4_pmic_init("twl6030", &sdp4430_twldata, NULL, 0); } static void __init omap4_init(void) { omap4_i2c_init(); omap_generic_init(); } static const char *omap4_boards_compat[] __initdata = { "ti,omap4", NULL, }; DT_MACHINE_START(OMAP4_DT, "Generic OMAP4 (Flattened Device Tree)") .reserve = omap_reserve, .map_io = omap4_map_io, .init_early = omap4430_init_early, .init_irq = omap_init_irq, .handle_irq = gic_handle_irq, .init_machine = omap4_init, .timer = &omap4_timer, .dt_compat = omap4_boards_compat, .restart = omap_prcm_restart, MACHINE_END #endif
gpl-2.0
cmvienneau/android_kernel_htc_m4
drivers/net/wireless/rtlwifi/rtl8192de/rf.c
5012
19287
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "reg.h" #include "def.h" #include "phy.h" #include "rf.h" #include "dm.h" #include "hw.h" void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); u8 rfpath; switch (bandwidth) { case HT_CHANNEL_WIDTH_20: for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { rtlphy->rfreg_chnlval[rfpath] = ((rtlphy->rfreg_chnlval [rfpath] & 0xfffff3ff) | 0x0400); rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) | BIT(11), 0x01); RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "20M RF 0x18 = 0x%x\n", rtlphy->rfreg_chnlval[rfpath]); } break; case HT_CHANNEL_WIDTH_20_40: for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { rtlphy->rfreg_chnlval[rfpath] = ((rtlphy->rfreg_chnlval[rfpath] & 0xfffff3ff)); rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) | BIT(11), 0x00); RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "40M RF 0x18 = 0x%x\n", rtlphy->rfreg_chnlval[rfpath]); } break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "unknown bandwidth: %#X\n", bandwidth); break; } } void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u32 tx_agc[2] = {0, 0}, tmpval; bool turbo_scanoff = false; u8 idx1, idx2; u8 *ptr; if (rtlefuse->eeprom_regulatory != 0) turbo_scanoff = true; if (mac->act_scanning) { tx_agc[RF90_PATH_A] = 0x3f3f3f3f; tx_agc[RF90_PATH_B] = 0x3f3f3f3f; if (turbo_scanoff) { for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { tx_agc[idx1] = ppowerlevel[idx1] | (ppowerlevel[idx1] << 8) | (ppowerlevel[idx1] << 16) | (ppowerlevel[idx1] << 24); } } } else { for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { tx_agc[idx1] = ppowerlevel[idx1] | (ppowerlevel[idx1] << 8) | (ppowerlevel[idx1] << 16) | (ppowerlevel[idx1] << 24); } if (rtlefuse->eeprom_regulatory == 0) { tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][6]) + (rtlphy->mcs_txpwrlevel_origoffset[0][7] << 8); tx_agc[RF90_PATH_A] += tmpval; tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) + (rtlphy->mcs_txpwrlevel_origoffset[0][15] << 24); tx_agc[RF90_PATH_B] += tmpval; } } for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { ptr = (u8 *) (&(tx_agc[idx1])); for (idx2 = 0; idx2 < 4; idx2++) { if (*ptr > RF6052_MAX_TX_PWR) *ptr = RF6052_MAX_TX_PWR; ptr++; } } tmpval = tx_agc[RF90_PATH_A] & 0xff; rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, BMASKBYTE1, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_A_CCK1_MCS32); tmpval = tx_agc[RF90_PATH_A] >> 8; rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK11_A_CCK2_11); tmpval = tx_agc[RF90_PATH_B] >> 24; rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, BMASKBYTE0, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK11_A_CCK2_11); tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff; rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK1_55_MCS32); } static void _rtl92d_phy_get_power_base(struct ieee80211_hw *hw, u8 *ppowerlevel, u8 channel, u32 *ofdmbase, u32 *mcsbase) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u32 powerbase0, powerbase1; u8 legacy_pwrdiff, ht20_pwrdiff; u8 i, powerlevel[2]; for (i = 0; i < 2; i++) { powerlevel[i] = ppowerlevel[i]; legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1]; powerbase0 = powerlevel[i] + legacy_pwrdiff; powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) | (powerbase0 << 8) | powerbase0; *(ofdmbase + i) = powerbase0; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [OFDM power base index rf(%c) = 0x%x]\n", i == 0 ? 'A' : 'B', *(ofdmbase + i)); } for (i = 0; i < 2; i++) { if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) { ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1]; powerlevel[i] += ht20_pwrdiff; } powerbase1 = powerlevel[i]; powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) | (powerbase1 << 8) | powerbase1; *(mcsbase + i) = powerbase1; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [MCS power base index rf(%c) = 0x%x]\n", i == 0 ? 'A' : 'B', *(mcsbase + i)); } } static u8 _rtl92d_phy_get_chnlgroup_bypg(u8 chnlindex) { u8 group; u8 channel_info[59] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 149, 151, 153, 155, 157, 159, 161, 163, 165 }; if (channel_info[chnlindex] <= 3) /* Chanel 1-3 */ group = 0; else if (channel_info[chnlindex] <= 9) /* Channel 4-9 */ group = 1; else if (channel_info[chnlindex] <= 14) /* Channel 10-14 */ group = 2; else if (channel_info[chnlindex] <= 64) group = 6; else if (channel_info[chnlindex] <= 140) group = 7; else group = 8; return group; } static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index, u32 *powerbase0, u32 *powerbase1, u32 *p_outwriteval) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 i, chnlgroup = 0, pwr_diff_limit[4]; u32 writeval = 0, customer_limit, rf; for (rf = 0; rf < 2; rf++) { switch (rtlefuse->eeprom_regulatory) { case 0: chnlgroup = 0; writeval = rtlphy->mcs_txpwrlevel_origoffset [chnlgroup][index + (rf ? 8 : 0)] + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "RTK better performance, writeval(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeval); break; case 1: if (rtlphy->pwrgroup_cnt == 1) chnlgroup = 0; if (rtlphy->pwrgroup_cnt >= MAX_PG_GROUP) { chnlgroup = _rtl92d_phy_get_chnlgroup_bypg( channel - 1); if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) chnlgroup++; else chnlgroup += 4; writeval = rtlphy->mcs_txpwrlevel_origoffset [chnlgroup][index + (rf ? 8 : 0)] + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Realtek regulatory, 20MHz, writeval(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeval); } break; case 2: writeval = ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Better regulatory, writeval(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeval); break; case 3: chnlgroup = 0; if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "customer's limit, 40MHz rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', rtlefuse->pwrgroup_ht40[rf] [channel - 1]); } else { RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "customer's limit, 20MHz rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', rtlefuse->pwrgroup_ht20[rf] [channel - 1]); } for (i = 0; i < 4; i++) { pwr_diff_limit[i] = (u8)((rtlphy->mcs_txpwrlevel_origoffset [chnlgroup][index + (rf ? 8 : 0)] & (0x7f << (i * 8))) >> (i * 8)); if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { if (pwr_diff_limit[i] > rtlefuse->pwrgroup_ht40[rf] [channel - 1]) pwr_diff_limit[i] = rtlefuse->pwrgroup_ht40 [rf][channel - 1]; } else { if (pwr_diff_limit[i] > rtlefuse->pwrgroup_ht20[rf][ channel - 1]) pwr_diff_limit[i] = rtlefuse->pwrgroup_ht20[rf] [channel - 1]; } } customer_limit = (pwr_diff_limit[3] << 24) | (pwr_diff_limit[2] << 16) | (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer's limit rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', customer_limit); writeval = customer_limit + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer, writeval rf(%c)= 0x%x\n", rf == 0 ? 'A' : 'B', writeval); break; default: chnlgroup = 0; writeval = rtlphy->mcs_txpwrlevel_origoffset [chnlgroup][index + (rf ? 8 : 0)] + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "RTK better performance, writeval rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeval); break; } *(p_outwriteval + rf) = writeval; } } static void _rtl92d_write_ofdm_power_reg(struct ieee80211_hw *hw, u8 index, u32 *pvalue) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); static u16 regoffset_a[6] = { RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24, RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04, RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12 }; static u16 regoffset_b[6] = { RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24, RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04, RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12 }; u8 i, rf, pwr_val[4]; u32 writeval; u16 regoffset; for (rf = 0; rf < 2; rf++) { writeval = pvalue[rf]; for (i = 0; i < 4; i++) { pwr_val[i] = (u8) ((writeval & (0x7f << (i * 8))) >> (i * 8)); if (pwr_val[i] > RF6052_MAX_TX_PWR) pwr_val[i] = RF6052_MAX_TX_PWR; } writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) | (pwr_val[1] << 8) | pwr_val[0]; if (rf == 0) regoffset = regoffset_a[index]; else regoffset = regoffset_b[index]; rtl_set_bbreg(hw, regoffset, BMASKDWORD, writeval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Set 0x%x = %08x\n", regoffset, writeval); if (((get_rf_type(rtlphy) == RF_2T2R) && (regoffset == RTXAGC_A_MCS15_MCS12 || regoffset == RTXAGC_B_MCS15_MCS12)) || ((get_rf_type(rtlphy) != RF_2T2R) && (regoffset == RTXAGC_A_MCS07_MCS04 || regoffset == RTXAGC_B_MCS07_MCS04))) { writeval = pwr_val[3]; if (regoffset == RTXAGC_A_MCS15_MCS12 || regoffset == RTXAGC_A_MCS07_MCS04) regoffset = 0xc90; if (regoffset == RTXAGC_B_MCS15_MCS12 || regoffset == RTXAGC_B_MCS07_MCS04) regoffset = 0xc98; for (i = 0; i < 3; i++) { if (i != 2) writeval = (writeval > 8) ? (writeval - 8) : 0; else writeval = (writeval > 6) ? (writeval - 6) : 0; rtl_write_byte(rtlpriv, (u32) (regoffset + i), (u8) writeval); } } } } void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel, u8 channel) { u32 writeval[2], powerbase0[2], powerbase1[2]; u8 index; _rtl92d_phy_get_power_base(hw, ppowerlevel, channel, &powerbase0[0], &powerbase1[0]); for (index = 0; index < 6; index++) { _rtl92d_get_txpower_writeval_by_regulatory(hw, channel, index, &powerbase0[0], &powerbase1[0], &writeval[0]); _rtl92d_write_ofdm_power_reg(hw, index, &writeval[0]); } } bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = &(rtlpriv->rtlhal); u8 u1btmp; u8 direct = bmac0 ? BIT(3) | BIT(2) : BIT(3); u8 mac_reg = bmac0 ? REG_MAC1 : REG_MAC0; u8 mac_on_bit = bmac0 ? MAC1_ON : MAC0_ON; bool bresult = true; /* true: need to enable BB/RF power */ rtlhal->during_mac0init_radiob = false; rtlhal->during_mac1init_radioa = false; RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "===>\n"); /* MAC0 Need PHY1 load radio_b.txt . Driver use DBI to write. */ u1btmp = rtl_read_byte(rtlpriv, mac_reg); if (!(u1btmp & mac_on_bit)) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable BB & RF\n"); /* Enable BB and RF power */ rtl92de_write_dword_dbi(hw, REG_SYS_ISO_CTRL, rtl92de_read_dword_dbi(hw, REG_SYS_ISO_CTRL, direct) | BIT(29) | BIT(16) | BIT(17), direct); } else { /* We think if MAC1 is ON,then radio_a.txt * and radio_b.txt has been load. */ bresult = false; } RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "<===\n"); return bresult; } void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = &(rtlpriv->rtlhal); u8 u1btmp; u8 direct = bmac0 ? BIT(3) | BIT(2) : BIT(3); u8 mac_reg = bmac0 ? REG_MAC1 : REG_MAC0; u8 mac_on_bit = bmac0 ? MAC1_ON : MAC0_ON; rtlhal->during_mac0init_radiob = false; rtlhal->during_mac1init_radioa = false; RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "====>\n"); /* check MAC0 enable or not again now, if * enabled, not power down radio A. */ u1btmp = rtl_read_byte(rtlpriv, mac_reg); if (!(u1btmp & mac_on_bit)) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "power down\n"); /* power down RF radio A according to YuNan's advice. */ rtl92de_write_dword_dbi(hw, RFPGA0_XA_LSSIPARAMETER, 0x00000000, direct); } RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "<====\n"); } bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); bool rtstatus = true; struct rtl_hal *rtlhal = &(rtlpriv->rtlhal); u32 u4_regvalue = 0; u8 rfpath; struct bb_reg_def *pphyreg; bool mac1_initradioa_first = false, mac0_initradiob_first = false; bool need_pwrdown_radioa = false, need_pwrdown_radiob = false; bool true_bpath = false; if (rtlphy->rf_type == RF_1T1R) rtlphy->num_total_rfpath = 1; else rtlphy->num_total_rfpath = 2; /* Single phy mode: use radio_a radio_b config path_A path_B */ /* seperately by MAC0, and MAC1 needn't configure RF; */ /* Dual PHY mode:MAC0 use radio_a config 1st phy path_A, */ /* MAC1 use radio_b config 2nd PHY path_A. */ /* DMDP,MAC0 on G band,MAC1 on A band. */ if (rtlhal->macphymode == DUALMAC_DUALPHY) { if (rtlhal->current_bandtype == BAND_ON_2_4G && rtlhal->interfaceindex == 0) { /* MAC0 needs PHY1 load radio_b.txt. * Driver use DBI to write. */ if (rtl92d_phy_enable_anotherphy(hw, true)) { rtlphy->num_total_rfpath = 2; mac0_initradiob_first = true; } else { /* We think if MAC1 is ON,then radio_a.txt and * radio_b.txt has been load. */ return rtstatus; } } else if (rtlhal->current_bandtype == BAND_ON_5G && rtlhal->interfaceindex == 1) { /* MAC1 needs PHY0 load radio_a.txt. * Driver use DBI to write. */ if (rtl92d_phy_enable_anotherphy(hw, false)) { rtlphy->num_total_rfpath = 2; mac1_initradioa_first = true; } else { /* We think if MAC0 is ON,then radio_a.txt and * radio_b.txt has been load. */ return rtstatus; } } else if (rtlhal->interfaceindex == 1) { /* MAC0 enabled, only init radia B. */ true_bpath = true; } } for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { /* Mac1 use PHY0 write */ if (mac1_initradioa_first) { if (rfpath == RF90_PATH_A) { rtlhal->during_mac1init_radioa = true; need_pwrdown_radioa = true; } else if (rfpath == RF90_PATH_B) { rtlhal->during_mac1init_radioa = false; mac1_initradioa_first = false; rfpath = RF90_PATH_A; true_bpath = true; rtlphy->num_total_rfpath = 1; } } else if (mac0_initradiob_first) { /* Mac0 use PHY1 write */ if (rfpath == RF90_PATH_A) rtlhal->during_mac0init_radiob = false; if (rfpath == RF90_PATH_B) { rtlhal->during_mac0init_radiob = true; mac0_initradiob_first = false; need_pwrdown_radiob = true; rfpath = RF90_PATH_A; true_bpath = true; rtlphy->num_total_rfpath = 1; } } pphyreg = &rtlphy->phyreg_def[rfpath]; switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV); break; case RF90_PATH_B: case RF90_PATH_D: u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16); break; } rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); udelay(1); rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1); udelay(1); /* Set bit number of Address and Data for RF register */ /* Set 1 to 4 bits for 8255 */ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREADDRESSLENGTH, 0x0); udelay(1); /* Set 0 to 12 bits for 8255 */ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0); udelay(1); switch (rfpath) { case RF90_PATH_A: if (true_bpath) rtstatus = rtl92d_phy_config_rf_with_headerfile( hw, radiob_txt, (enum radio_path)rfpath); else rtstatus = rtl92d_phy_config_rf_with_headerfile( hw, radioa_txt, (enum radio_path)rfpath); break; case RF90_PATH_B: rtstatus = rtl92d_phy_config_rf_with_headerfile(hw, radiob_txt, (enum radio_path) rfpath); break; case RF90_PATH_C: break; case RF90_PATH_D: break; } switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, u4_regvalue); break; case RF90_PATH_B: case RF90_PATH_D: rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16, u4_regvalue); break; } if (!rtstatus) { RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio[%d] Fail!!", rfpath); goto phy_rf_cfg_fail; } } /* check MAC0 enable or not again, if enabled, * not power down radio A. */ /* check MAC1 enable or not again, if enabled, * not power down radio B. */ if (need_pwrdown_radioa) rtl92d_phy_powerdown_anotherphy(hw, false); else if (need_pwrdown_radiob) rtl92d_phy_powerdown_anotherphy(hw, true); RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n"); return rtstatus; phy_rf_cfg_fail: return rtstatus; }
gpl-2.0
sfoolish/linux_3.2.0-39.62_ubuntu12.04
arch/um/drivers/daemon_user.c
5012
4350
/* * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and * James Leu (jleu@mindspring.net). * Copyright (C) 2001 by various other people who didn't put their name here. * Licensed under the GPL. */ #include <stdint.h> #include <unistd.h> #include <errno.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/time.h> #include <sys/un.h> #include "daemon.h" #include "net_user.h" #include "os.h" #include "um_malloc.h" enum request_type { REQ_NEW_CONTROL }; #define SWITCH_MAGIC 0xfeedface struct request_v3 { uint32_t magic; uint32_t version; enum request_type type; struct sockaddr_un sock; }; static struct sockaddr_un *new_addr(void *name, int len) { struct sockaddr_un *sun; sun = uml_kmalloc(sizeof(struct sockaddr_un), UM_GFP_KERNEL); if (sun == NULL) { printk(UM_KERN_ERR "new_addr: allocation of sockaddr_un " "failed\n"); return NULL; } sun->sun_family = AF_UNIX; memcpy(sun->sun_path, name, len); return sun; } static int connect_to_switch(struct daemon_data *pri) { struct sockaddr_un *ctl_addr = pri->ctl_addr; struct sockaddr_un *local_addr = pri->local_addr; struct sockaddr_un *sun; struct request_v3 req; int fd, n, err; pri->control = socket(AF_UNIX, SOCK_STREAM, 0); if (pri->control < 0) { err = -errno; printk(UM_KERN_ERR "daemon_open : control socket failed, " "errno = %d\n", -err); return err; } if (connect(pri->control, (struct sockaddr *) ctl_addr, sizeof(*ctl_addr)) < 0) { err = -errno; printk(UM_KERN_ERR "daemon_open : control connect failed, " "errno = %d\n", -err); goto out; } fd = socket(AF_UNIX, SOCK_DGRAM, 0); if (fd < 0) { err = -errno; printk(UM_KERN_ERR "daemon_open : data socket failed, " "errno = %d\n", -err); goto out; } if (bind(fd, (struct sockaddr *) local_addr, sizeof(*local_addr)) < 0) { err = -errno; printk(UM_KERN_ERR "daemon_open : data bind failed, " "errno = %d\n", -err); goto out_close; } sun = uml_kmalloc(sizeof(struct sockaddr_un), UM_GFP_KERNEL); if (sun == NULL) { printk(UM_KERN_ERR "new_addr: allocation of sockaddr_un " "failed\n"); err = -ENOMEM; goto out_close; } req.magic = SWITCH_MAGIC; req.version = SWITCH_VERSION; req.type = REQ_NEW_CONTROL; req.sock = *local_addr; n = write(pri->control, &req, sizeof(req)); if (n != sizeof(req)) { printk(UM_KERN_ERR "daemon_open : control setup request " "failed, err = %d\n", -errno); err = -ENOTCONN; goto out_free; } n = read(pri->control, sun, sizeof(*sun)); if (n != sizeof(*sun)) { printk(UM_KERN_ERR "daemon_open : read of data socket failed, " "err = %d\n", -errno); err = -ENOTCONN; goto out_free; } pri->data_addr = sun; return fd; out_free: kfree(sun); out_close: close(fd); out: close(pri->control); return err; } static int daemon_user_init(void *data, void *dev) { struct daemon_data *pri = data; struct timeval tv; struct { char zero; int pid; int usecs; } name; if (!strcmp(pri->sock_type, "unix")) pri->ctl_addr = new_addr(pri->ctl_sock, strlen(pri->ctl_sock) + 1); name.zero = 0; name.pid = os_getpid(); gettimeofday(&tv, NULL); name.usecs = tv.tv_usec; pri->local_addr = new_addr(&name, sizeof(name)); pri->dev = dev; pri->fd = connect_to_switch(pri); if (pri->fd < 0) { kfree(pri->local_addr); pri->local_addr = NULL; return pri->fd; } return 0; } static int daemon_open(void *data) { struct daemon_data *pri = data; return pri->fd; } static void daemon_remove(void *data) { struct daemon_data *pri = data; close(pri->fd); pri->fd = -1; close(pri->control); pri->control = -1; kfree(pri->data_addr); pri->data_addr = NULL; kfree(pri->ctl_addr); pri->ctl_addr = NULL; kfree(pri->local_addr); pri->local_addr = NULL; } int daemon_user_write(int fd, void *buf, int len, struct daemon_data *pri) { struct sockaddr_un *data_addr = pri->data_addr; return net_sendto(fd, buf, len, data_addr, sizeof(*data_addr)); } const struct net_user_info daemon_user_info = { .init = daemon_user_init, .open = daemon_open, .close = NULL, .remove = daemon_remove, .add_address = NULL, .delete_address = NULL, .mtu = ETH_MAX_PACKET, .max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER, };
gpl-2.0
Red--Code/Code-Red-honami
sound/synth/emux/emux_seq.c
7572
8956
/* * Midi Sequencer interface routines. * * Copyright (C) 1999 Steve Ratcliffe * Copyright (c) 1999-2000 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "emux_voice.h" #include <linux/slab.h> #include <linux/module.h> /* Prototypes for static functions */ static void free_port(void *private); static void snd_emux_init_port(struct snd_emux_port *p); static int snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info); static int snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info); /* * MIDI emulation operators */ static struct snd_midi_op emux_ops = { snd_emux_note_on, snd_emux_note_off, snd_emux_key_press, snd_emux_terminate_note, snd_emux_control, snd_emux_nrpn, snd_emux_sysex, }; /* * number of MIDI channels */ #define MIDI_CHANNELS 16 /* * type flags for MIDI sequencer port */ #define DEFAULT_MIDI_TYPE (SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC |\ SNDRV_SEQ_PORT_TYPE_MIDI_GM |\ SNDRV_SEQ_PORT_TYPE_MIDI_GS |\ SNDRV_SEQ_PORT_TYPE_MIDI_XG |\ SNDRV_SEQ_PORT_TYPE_HARDWARE |\ SNDRV_SEQ_PORT_TYPE_SYNTHESIZER) /* * Initialise the EMUX Synth by creating a client and registering * a series of ports. * Each of the ports will contain the 16 midi channels. Applications * can connect to these ports to play midi data. */ int snd_emux_init_seq(struct snd_emux *emu, struct snd_card *card, int index) { int i; struct snd_seq_port_callback pinfo; char tmpname[64]; emu->client = snd_seq_create_kernel_client(card, index, "%s WaveTable", emu->name); if (emu->client < 0) { snd_printk(KERN_ERR "can't create client\n"); return -ENODEV; } if (emu->num_ports < 0) { snd_printk(KERN_WARNING "seqports must be greater than zero\n"); emu->num_ports = 1; } else if (emu->num_ports >= SNDRV_EMUX_MAX_PORTS) { snd_printk(KERN_WARNING "too many ports." "limited max. ports %d\n", SNDRV_EMUX_MAX_PORTS); emu->num_ports = SNDRV_EMUX_MAX_PORTS; } memset(&pinfo, 0, sizeof(pinfo)); pinfo.owner = THIS_MODULE; pinfo.use = snd_emux_use; pinfo.unuse = snd_emux_unuse; pinfo.event_input = snd_emux_event_input; for (i = 0; i < emu->num_ports; i++) { struct snd_emux_port *p; sprintf(tmpname, "%s Port %d", emu->name, i); p = snd_emux_create_port(emu, tmpname, MIDI_CHANNELS, 0, &pinfo); if (p == NULL) { snd_printk(KERN_ERR "can't create port\n"); return -ENOMEM; } p->port_mode = SNDRV_EMUX_PORT_MODE_MIDI; snd_emux_init_port(p); emu->ports[i] = p->chset.port; emu->portptrs[i] = p; } return 0; } /* * Detach from the ports that were set up for this synthesizer and * destroy the kernel client. */ void snd_emux_detach_seq(struct snd_emux *emu) { if (emu->voices) snd_emux_terminate_all(emu); mutex_lock(&emu->register_mutex); if (emu->client >= 0) { snd_seq_delete_kernel_client(emu->client); emu->client = -1; } mutex_unlock(&emu->register_mutex); } /* * create a sequencer port and channel_set */ struct snd_emux_port * snd_emux_create_port(struct snd_emux *emu, char *name, int max_channels, int oss_port, struct snd_seq_port_callback *callback) { struct snd_emux_port *p; int i, type, cap; /* Allocate structures for this channel */ if ((p = kzalloc(sizeof(*p), GFP_KERNEL)) == NULL) { snd_printk(KERN_ERR "no memory\n"); return NULL; } p->chset.channels = kcalloc(max_channels, sizeof(struct snd_midi_channel), GFP_KERNEL); if (p->chset.channels == NULL) { snd_printk(KERN_ERR "no memory\n"); kfree(p); return NULL; } for (i = 0; i < max_channels; i++) p->chset.channels[i].number = i; p->chset.private_data = p; p->chset.max_channels = max_channels; p->emu = emu; p->chset.client = emu->client; #ifdef SNDRV_EMUX_USE_RAW_EFFECT snd_emux_create_effect(p); #endif callback->private_free = free_port; callback->private_data = p; cap = SNDRV_SEQ_PORT_CAP_WRITE; if (oss_port) { type = SNDRV_SEQ_PORT_TYPE_SPECIFIC; } else { type = DEFAULT_MIDI_TYPE; cap |= SNDRV_SEQ_PORT_CAP_SUBS_WRITE; } p->chset.port = snd_seq_event_port_attach(emu->client, callback, cap, type, max_channels, emu->max_voices, name); return p; } /* * release memory block for port */ static void free_port(void *private_data) { struct snd_emux_port *p; p = private_data; if (p) { #ifdef SNDRV_EMUX_USE_RAW_EFFECT snd_emux_delete_effect(p); #endif kfree(p->chset.channels); kfree(p); } } #define DEFAULT_DRUM_FLAGS (1<<9) /* * initialize the port specific parameters */ static void snd_emux_init_port(struct snd_emux_port *p) { p->drum_flags = DEFAULT_DRUM_FLAGS; p->volume_atten = 0; snd_emux_reset_port(p); } /* * reset port */ void snd_emux_reset_port(struct snd_emux_port *port) { int i; /* stop all sounds */ snd_emux_sounds_off_all(port); snd_midi_channel_set_clear(&port->chset); #ifdef SNDRV_EMUX_USE_RAW_EFFECT snd_emux_clear_effect(port); #endif /* set port specific control parameters */ port->ctrls[EMUX_MD_DEF_BANK] = 0; port->ctrls[EMUX_MD_DEF_DRUM] = 0; port->ctrls[EMUX_MD_REALTIME_PAN] = 1; for (i = 0; i < port->chset.max_channels; i++) { struct snd_midi_channel *chan = port->chset.channels + i; chan->drum_channel = ((port->drum_flags >> i) & 1) ? 1 : 0; } } /* * input sequencer event */ int snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data, int atomic, int hop) { struct snd_emux_port *port; port = private_data; if (snd_BUG_ON(!port || !ev)) return -EINVAL; snd_midi_process_event(&emux_ops, ev, &port->chset); return 0; } /* * increment usage count */ int snd_emux_inc_count(struct snd_emux *emu) { emu->used++; if (!try_module_get(emu->ops.owner)) goto __error; if (!try_module_get(emu->card->module)) { module_put(emu->ops.owner); __error: emu->used--; return 0; } return 1; } /* * decrease usage count */ void snd_emux_dec_count(struct snd_emux *emu) { module_put(emu->card->module); emu->used--; if (emu->used <= 0) snd_emux_terminate_all(emu); module_put(emu->ops.owner); } /* * Routine that is called upon a first use of a particular port */ static int snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info) { struct snd_emux_port *p; struct snd_emux *emu; p = private_data; if (snd_BUG_ON(!p)) return -EINVAL; emu = p->emu; if (snd_BUG_ON(!emu)) return -EINVAL; mutex_lock(&emu->register_mutex); snd_emux_init_port(p); snd_emux_inc_count(emu); mutex_unlock(&emu->register_mutex); return 0; } /* * Routine that is called upon the last unuse() of a particular port. */ static int snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info) { struct snd_emux_port *p; struct snd_emux *emu; p = private_data; if (snd_BUG_ON(!p)) return -EINVAL; emu = p->emu; if (snd_BUG_ON(!emu)) return -EINVAL; mutex_lock(&emu->register_mutex); snd_emux_sounds_off_all(p); snd_emux_dec_count(emu); mutex_unlock(&emu->register_mutex); return 0; } /* * attach virtual rawmidi devices */ int snd_emux_init_virmidi(struct snd_emux *emu, struct snd_card *card) { int i; emu->vmidi = NULL; if (emu->midi_ports <= 0) return 0; emu->vmidi = kcalloc(emu->midi_ports, sizeof(struct snd_rawmidi *), GFP_KERNEL); if (emu->vmidi == NULL) return -ENOMEM; for (i = 0; i < emu->midi_ports; i++) { struct snd_rawmidi *rmidi; struct snd_virmidi_dev *rdev; if (snd_virmidi_new(card, emu->midi_devidx + i, &rmidi) < 0) goto __error; rdev = rmidi->private_data; sprintf(rmidi->name, "%s Synth MIDI", emu->name); rdev->seq_mode = SNDRV_VIRMIDI_SEQ_ATTACH; rdev->client = emu->client; rdev->port = emu->ports[i]; if (snd_device_register(card, rmidi) < 0) { snd_device_free(card, rmidi); goto __error; } emu->vmidi[i] = rmidi; /* snd_printk(KERN_DEBUG "virmidi %d ok\n", i); */ } return 0; __error: /* snd_printk(KERN_DEBUG "error init..\n"); */ snd_emux_delete_virmidi(emu); return -ENOMEM; } int snd_emux_delete_virmidi(struct snd_emux *emu) { int i; if (emu->vmidi == NULL) return 0; for (i = 0; i < emu->midi_ports; i++) { if (emu->vmidi[i]) snd_device_free(emu->card, emu->vmidi[i]); } kfree(emu->vmidi); emu->vmidi = NULL; return 0; }
gpl-2.0
joeyli/linux-modsign
sound/synth/emux/emux_seq.c
7572
8956
/* * Midi Sequencer interface routines. * * Copyright (C) 1999 Steve Ratcliffe * Copyright (c) 1999-2000 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "emux_voice.h" #include <linux/slab.h> #include <linux/module.h> /* Prototypes for static functions */ static void free_port(void *private); static void snd_emux_init_port(struct snd_emux_port *p); static int snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info); static int snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info); /* * MIDI emulation operators */ static struct snd_midi_op emux_ops = { snd_emux_note_on, snd_emux_note_off, snd_emux_key_press, snd_emux_terminate_note, snd_emux_control, snd_emux_nrpn, snd_emux_sysex, }; /* * number of MIDI channels */ #define MIDI_CHANNELS 16 /* * type flags for MIDI sequencer port */ #define DEFAULT_MIDI_TYPE (SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC |\ SNDRV_SEQ_PORT_TYPE_MIDI_GM |\ SNDRV_SEQ_PORT_TYPE_MIDI_GS |\ SNDRV_SEQ_PORT_TYPE_MIDI_XG |\ SNDRV_SEQ_PORT_TYPE_HARDWARE |\ SNDRV_SEQ_PORT_TYPE_SYNTHESIZER) /* * Initialise the EMUX Synth by creating a client and registering * a series of ports. * Each of the ports will contain the 16 midi channels. Applications * can connect to these ports to play midi data. */ int snd_emux_init_seq(struct snd_emux *emu, struct snd_card *card, int index) { int i; struct snd_seq_port_callback pinfo; char tmpname[64]; emu->client = snd_seq_create_kernel_client(card, index, "%s WaveTable", emu->name); if (emu->client < 0) { snd_printk(KERN_ERR "can't create client\n"); return -ENODEV; } if (emu->num_ports < 0) { snd_printk(KERN_WARNING "seqports must be greater than zero\n"); emu->num_ports = 1; } else if (emu->num_ports >= SNDRV_EMUX_MAX_PORTS) { snd_printk(KERN_WARNING "too many ports." "limited max. ports %d\n", SNDRV_EMUX_MAX_PORTS); emu->num_ports = SNDRV_EMUX_MAX_PORTS; } memset(&pinfo, 0, sizeof(pinfo)); pinfo.owner = THIS_MODULE; pinfo.use = snd_emux_use; pinfo.unuse = snd_emux_unuse; pinfo.event_input = snd_emux_event_input; for (i = 0; i < emu->num_ports; i++) { struct snd_emux_port *p; sprintf(tmpname, "%s Port %d", emu->name, i); p = snd_emux_create_port(emu, tmpname, MIDI_CHANNELS, 0, &pinfo); if (p == NULL) { snd_printk(KERN_ERR "can't create port\n"); return -ENOMEM; } p->port_mode = SNDRV_EMUX_PORT_MODE_MIDI; snd_emux_init_port(p); emu->ports[i] = p->chset.port; emu->portptrs[i] = p; } return 0; } /* * Detach from the ports that were set up for this synthesizer and * destroy the kernel client. */ void snd_emux_detach_seq(struct snd_emux *emu) { if (emu->voices) snd_emux_terminate_all(emu); mutex_lock(&emu->register_mutex); if (emu->client >= 0) { snd_seq_delete_kernel_client(emu->client); emu->client = -1; } mutex_unlock(&emu->register_mutex); } /* * create a sequencer port and channel_set */ struct snd_emux_port * snd_emux_create_port(struct snd_emux *emu, char *name, int max_channels, int oss_port, struct snd_seq_port_callback *callback) { struct snd_emux_port *p; int i, type, cap; /* Allocate structures for this channel */ if ((p = kzalloc(sizeof(*p), GFP_KERNEL)) == NULL) { snd_printk(KERN_ERR "no memory\n"); return NULL; } p->chset.channels = kcalloc(max_channels, sizeof(struct snd_midi_channel), GFP_KERNEL); if (p->chset.channels == NULL) { snd_printk(KERN_ERR "no memory\n"); kfree(p); return NULL; } for (i = 0; i < max_channels; i++) p->chset.channels[i].number = i; p->chset.private_data = p; p->chset.max_channels = max_channels; p->emu = emu; p->chset.client = emu->client; #ifdef SNDRV_EMUX_USE_RAW_EFFECT snd_emux_create_effect(p); #endif callback->private_free = free_port; callback->private_data = p; cap = SNDRV_SEQ_PORT_CAP_WRITE; if (oss_port) { type = SNDRV_SEQ_PORT_TYPE_SPECIFIC; } else { type = DEFAULT_MIDI_TYPE; cap |= SNDRV_SEQ_PORT_CAP_SUBS_WRITE; } p->chset.port = snd_seq_event_port_attach(emu->client, callback, cap, type, max_channels, emu->max_voices, name); return p; } /* * release memory block for port */ static void free_port(void *private_data) { struct snd_emux_port *p; p = private_data; if (p) { #ifdef SNDRV_EMUX_USE_RAW_EFFECT snd_emux_delete_effect(p); #endif kfree(p->chset.channels); kfree(p); } } #define DEFAULT_DRUM_FLAGS (1<<9) /* * initialize the port specific parameters */ static void snd_emux_init_port(struct snd_emux_port *p) { p->drum_flags = DEFAULT_DRUM_FLAGS; p->volume_atten = 0; snd_emux_reset_port(p); } /* * reset port */ void snd_emux_reset_port(struct snd_emux_port *port) { int i; /* stop all sounds */ snd_emux_sounds_off_all(port); snd_midi_channel_set_clear(&port->chset); #ifdef SNDRV_EMUX_USE_RAW_EFFECT snd_emux_clear_effect(port); #endif /* set port specific control parameters */ port->ctrls[EMUX_MD_DEF_BANK] = 0; port->ctrls[EMUX_MD_DEF_DRUM] = 0; port->ctrls[EMUX_MD_REALTIME_PAN] = 1; for (i = 0; i < port->chset.max_channels; i++) { struct snd_midi_channel *chan = port->chset.channels + i; chan->drum_channel = ((port->drum_flags >> i) & 1) ? 1 : 0; } } /* * input sequencer event */ int snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data, int atomic, int hop) { struct snd_emux_port *port; port = private_data; if (snd_BUG_ON(!port || !ev)) return -EINVAL; snd_midi_process_event(&emux_ops, ev, &port->chset); return 0; } /* * increment usage count */ int snd_emux_inc_count(struct snd_emux *emu) { emu->used++; if (!try_module_get(emu->ops.owner)) goto __error; if (!try_module_get(emu->card->module)) { module_put(emu->ops.owner); __error: emu->used--; return 0; } return 1; } /* * decrease usage count */ void snd_emux_dec_count(struct snd_emux *emu) { module_put(emu->card->module); emu->used--; if (emu->used <= 0) snd_emux_terminate_all(emu); module_put(emu->ops.owner); } /* * Routine that is called upon a first use of a particular port */ static int snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info) { struct snd_emux_port *p; struct snd_emux *emu; p = private_data; if (snd_BUG_ON(!p)) return -EINVAL; emu = p->emu; if (snd_BUG_ON(!emu)) return -EINVAL; mutex_lock(&emu->register_mutex); snd_emux_init_port(p); snd_emux_inc_count(emu); mutex_unlock(&emu->register_mutex); return 0; } /* * Routine that is called upon the last unuse() of a particular port. */ static int snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info) { struct snd_emux_port *p; struct snd_emux *emu; p = private_data; if (snd_BUG_ON(!p)) return -EINVAL; emu = p->emu; if (snd_BUG_ON(!emu)) return -EINVAL; mutex_lock(&emu->register_mutex); snd_emux_sounds_off_all(p); snd_emux_dec_count(emu); mutex_unlock(&emu->register_mutex); return 0; } /* * attach virtual rawmidi devices */ int snd_emux_init_virmidi(struct snd_emux *emu, struct snd_card *card) { int i; emu->vmidi = NULL; if (emu->midi_ports <= 0) return 0; emu->vmidi = kcalloc(emu->midi_ports, sizeof(struct snd_rawmidi *), GFP_KERNEL); if (emu->vmidi == NULL) return -ENOMEM; for (i = 0; i < emu->midi_ports; i++) { struct snd_rawmidi *rmidi; struct snd_virmidi_dev *rdev; if (snd_virmidi_new(card, emu->midi_devidx + i, &rmidi) < 0) goto __error; rdev = rmidi->private_data; sprintf(rmidi->name, "%s Synth MIDI", emu->name); rdev->seq_mode = SNDRV_VIRMIDI_SEQ_ATTACH; rdev->client = emu->client; rdev->port = emu->ports[i]; if (snd_device_register(card, rmidi) < 0) { snd_device_free(card, rmidi); goto __error; } emu->vmidi[i] = rmidi; /* snd_printk(KERN_DEBUG "virmidi %d ok\n", i); */ } return 0; __error: /* snd_printk(KERN_DEBUG "error init..\n"); */ snd_emux_delete_virmidi(emu); return -ENOMEM; } int snd_emux_delete_virmidi(struct snd_emux *emu) { int i; if (emu->vmidi == NULL) return 0; for (i = 0; i < emu->midi_ports; i++) { if (emu->vmidi[i]) snd_device_free(emu->card, emu->vmidi[i]); } kfree(emu->vmidi); emu->vmidi = NULL; return 0; }
gpl-2.0
RadonX-MM/kernel
drivers/isdn/mISDN/dsp_audio.c
8852
11083
/* * Audio support data for mISDN_dsp. * * Copyright 2002/2003 by Andreas Eversberg (jolly@eversberg.eu) * Rewritten by Peter * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/delay.h> #include <linux/mISDNif.h> #include <linux/mISDNdsp.h> #include <linux/export.h> #include "core.h" #include "dsp.h" /* ulaw[unsigned char] -> signed 16-bit */ s32 dsp_audio_ulaw_to_s32[256]; /* alaw[unsigned char] -> signed 16-bit */ s32 dsp_audio_alaw_to_s32[256]; s32 *dsp_audio_law_to_s32; EXPORT_SYMBOL(dsp_audio_law_to_s32); /* signed 16-bit -> law */ u8 dsp_audio_s16_to_law[65536]; EXPORT_SYMBOL(dsp_audio_s16_to_law); /* alaw -> ulaw */ u8 dsp_audio_alaw_to_ulaw[256]; /* ulaw -> alaw */ static u8 dsp_audio_ulaw_to_alaw[256]; u8 dsp_silence; /***************************************************** * generate table for conversion of s16 to alaw/ulaw * *****************************************************/ #define AMI_MASK 0x55 static inline unsigned char linear2alaw(short int linear) { int mask; int seg; int pcm_val; static int seg_end[8] = { 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF }; pcm_val = linear; if (pcm_val >= 0) { /* Sign (7th) bit = 1 */ mask = AMI_MASK | 0x80; } else { /* Sign bit = 0 */ mask = AMI_MASK; pcm_val = -pcm_val; } /* Convert the scaled magnitude to segment number. */ for (seg = 0; seg < 8; seg++) { if (pcm_val <= seg_end[seg]) break; } /* Combine the sign, segment, and quantization bits. */ return ((seg << 4) | ((pcm_val >> ((seg) ? (seg + 3) : 4)) & 0x0F)) ^ mask; } static inline short int alaw2linear(unsigned char alaw) { int i; int seg; alaw ^= AMI_MASK; i = ((alaw & 0x0F) << 4) + 8 /* rounding error */; seg = (((int) alaw & 0x70) >> 4); if (seg) i = (i + 0x100) << (seg - 1); return (short int) ((alaw & 0x80) ? i : -i); } static inline short int ulaw2linear(unsigned char ulaw) { short mu, e, f, y; static short etab[] = {0, 132, 396, 924, 1980, 4092, 8316, 16764}; mu = 255 - ulaw; e = (mu & 0x70) / 16; f = mu & 0x0f; y = f * (1 << (e + 3)); y += etab[e]; if (mu & 0x80) y = -y; return y; } #define BIAS 0x84 /*!< define the add-in bias for 16 bit samples */ static unsigned char linear2ulaw(short sample) { static int exp_lut[256] = { 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; int sign, exponent, mantissa; unsigned char ulawbyte; /* Get the sample into sign-magnitude. */ sign = (sample >> 8) & 0x80; /* set aside the sign */ if (sign != 0) sample = -sample; /* get magnitude */ /* Convert from 16 bit linear to ulaw. */ sample = sample + BIAS; exponent = exp_lut[(sample >> 7) & 0xFF]; mantissa = (sample >> (exponent + 3)) & 0x0F; ulawbyte = ~(sign | (exponent << 4) | mantissa); return ulawbyte; } static int reverse_bits(int i) { int z, j; z = 0; for (j = 0; j < 8; j++) { if ((i & (1 << j)) != 0) z |= 1 << (7 - j); } return z; } void dsp_audio_generate_law_tables(void) { int i; for (i = 0; i < 256; i++) dsp_audio_alaw_to_s32[i] = alaw2linear(reverse_bits(i)); for (i = 0; i < 256; i++) dsp_audio_ulaw_to_s32[i] = ulaw2linear(reverse_bits(i)); for (i = 0; i < 256; i++) { dsp_audio_alaw_to_ulaw[i] = linear2ulaw(dsp_audio_alaw_to_s32[i]); dsp_audio_ulaw_to_alaw[i] = linear2alaw(dsp_audio_ulaw_to_s32[i]); } } void dsp_audio_generate_s2law_table(void) { int i; if (dsp_options & DSP_OPT_ULAW) { /* generating ulaw-table */ for (i = -32768; i < 32768; i++) { dsp_audio_s16_to_law[i & 0xffff] = reverse_bits(linear2ulaw(i)); } } else { /* generating alaw-table */ for (i = -32768; i < 32768; i++) { dsp_audio_s16_to_law[i & 0xffff] = reverse_bits(linear2alaw(i)); } } } /* * the seven bit sample is the number of every second alaw-sample ordered by * aplitude. 0x00 is negative, 0x7f is positive amplitude. */ u8 dsp_audio_seven2law[128]; u8 dsp_audio_law2seven[256]; /******************************************************************** * generate table for conversion law from/to 7-bit alaw-like sample * ********************************************************************/ void dsp_audio_generate_seven(void) { int i, j, k; u8 spl; u8 sorted_alaw[256]; /* generate alaw table, sorted by the linear value */ for (i = 0; i < 256; i++) { j = 0; for (k = 0; k < 256; k++) { if (dsp_audio_alaw_to_s32[k] < dsp_audio_alaw_to_s32[i]) j++; } sorted_alaw[j] = i; } /* generate tabels */ for (i = 0; i < 256; i++) { /* spl is the source: the law-sample (converted to alaw) */ spl = i; if (dsp_options & DSP_OPT_ULAW) spl = dsp_audio_ulaw_to_alaw[i]; /* find the 7-bit-sample */ for (j = 0; j < 256; j++) { if (sorted_alaw[j] == spl) break; } /* write 7-bit audio value */ dsp_audio_law2seven[i] = j >> 1; } for (i = 0; i < 128; i++) { spl = sorted_alaw[i << 1]; if (dsp_options & DSP_OPT_ULAW) spl = dsp_audio_alaw_to_ulaw[spl]; dsp_audio_seven2law[i] = spl; } } /* mix 2*law -> law */ u8 dsp_audio_mix_law[65536]; /****************************************************** * generate mix table to mix two law samples into one * ******************************************************/ void dsp_audio_generate_mix_table(void) { int i, j; s32 sample; i = 0; while (i < 256) { j = 0; while (j < 256) { sample = dsp_audio_law_to_s32[i]; sample += dsp_audio_law_to_s32[j]; if (sample > 32767) sample = 32767; if (sample < -32768) sample = -32768; dsp_audio_mix_law[(i << 8) | j] = dsp_audio_s16_to_law[sample & 0xffff]; j++; } i++; } } /************************************* * generate different volume changes * *************************************/ static u8 dsp_audio_reduce8[256]; static u8 dsp_audio_reduce7[256]; static u8 dsp_audio_reduce6[256]; static u8 dsp_audio_reduce5[256]; static u8 dsp_audio_reduce4[256]; static u8 dsp_audio_reduce3[256]; static u8 dsp_audio_reduce2[256]; static u8 dsp_audio_reduce1[256]; static u8 dsp_audio_increase1[256]; static u8 dsp_audio_increase2[256]; static u8 dsp_audio_increase3[256]; static u8 dsp_audio_increase4[256]; static u8 dsp_audio_increase5[256]; static u8 dsp_audio_increase6[256]; static u8 dsp_audio_increase7[256]; static u8 dsp_audio_increase8[256]; static u8 *dsp_audio_volume_change[16] = { dsp_audio_reduce8, dsp_audio_reduce7, dsp_audio_reduce6, dsp_audio_reduce5, dsp_audio_reduce4, dsp_audio_reduce3, dsp_audio_reduce2, dsp_audio_reduce1, dsp_audio_increase1, dsp_audio_increase2, dsp_audio_increase3, dsp_audio_increase4, dsp_audio_increase5, dsp_audio_increase6, dsp_audio_increase7, dsp_audio_increase8, }; void dsp_audio_generate_volume_changes(void) { register s32 sample; int i; int num[] = { 110, 125, 150, 175, 200, 300, 400, 500 }; int denum[] = { 100, 100, 100, 100, 100, 100, 100, 100 }; i = 0; while (i < 256) { dsp_audio_reduce8[i] = dsp_audio_s16_to_law[ (dsp_audio_law_to_s32[i] * denum[7] / num[7]) & 0xffff]; dsp_audio_reduce7[i] = dsp_audio_s16_to_law[ (dsp_audio_law_to_s32[i] * denum[6] / num[6]) & 0xffff]; dsp_audio_reduce6[i] = dsp_audio_s16_to_law[ (dsp_audio_law_to_s32[i] * denum[5] / num[5]) & 0xffff]; dsp_audio_reduce5[i] = dsp_audio_s16_to_law[ (dsp_audio_law_to_s32[i] * denum[4] / num[4]) & 0xffff]; dsp_audio_reduce4[i] = dsp_audio_s16_to_law[ (dsp_audio_law_to_s32[i] * denum[3] / num[3]) & 0xffff]; dsp_audio_reduce3[i] = dsp_audio_s16_to_law[ (dsp_audio_law_to_s32[i] * denum[2] / num[2]) & 0xffff]; dsp_audio_reduce2[i] = dsp_audio_s16_to_law[ (dsp_audio_law_to_s32[i] * denum[1] / num[1]) & 0xffff]; dsp_audio_reduce1[i] = dsp_audio_s16_to_law[ (dsp_audio_law_to_s32[i] * denum[0] / num[0]) & 0xffff]; sample = dsp_audio_law_to_s32[i] * num[0] / denum[0]; if (sample < -32768) sample = -32768; else if (sample > 32767) sample = 32767; dsp_audio_increase1[i] = dsp_audio_s16_to_law[sample & 0xffff]; sample = dsp_audio_law_to_s32[i] * num[1] / denum[1]; if (sample < -32768) sample = -32768; else if (sample > 32767) sample = 32767; dsp_audio_increase2[i] = dsp_audio_s16_to_law[sample & 0xffff]; sample = dsp_audio_law_to_s32[i] * num[2] / denum[2]; if (sample < -32768) sample = -32768; else if (sample > 32767) sample = 32767; dsp_audio_increase3[i] = dsp_audio_s16_to_law[sample & 0xffff]; sample = dsp_audio_law_to_s32[i] * num[3] / denum[3]; if (sample < -32768) sample = -32768; else if (sample > 32767) sample = 32767; dsp_audio_increase4[i] = dsp_audio_s16_to_law[sample & 0xffff]; sample = dsp_audio_law_to_s32[i] * num[4] / denum[4]; if (sample < -32768) sample = -32768; else if (sample > 32767) sample = 32767; dsp_audio_increase5[i] = dsp_audio_s16_to_law[sample & 0xffff]; sample = dsp_audio_law_to_s32[i] * num[5] / denum[5]; if (sample < -32768) sample = -32768; else if (sample > 32767) sample = 32767; dsp_audio_increase6[i] = dsp_audio_s16_to_law[sample & 0xffff]; sample = dsp_audio_law_to_s32[i] * num[6] / denum[6]; if (sample < -32768) sample = -32768; else if (sample > 32767) sample = 32767; dsp_audio_increase7[i] = dsp_audio_s16_to_law[sample & 0xffff]; sample = dsp_audio_law_to_s32[i] * num[7] / denum[7]; if (sample < -32768) sample = -32768; else if (sample > 32767) sample = 32767; dsp_audio_increase8[i] = dsp_audio_s16_to_law[sample & 0xffff]; i++; } } /************************************** * change the volume of the given skb * **************************************/ /* this is a helper function for changing volume of skb. the range may be * -8 to 8, which is a shift to the power of 2. 0 == no volume, 3 == volume*8 */ void dsp_change_volume(struct sk_buff *skb, int volume) { u8 *volume_change; int i, ii; u8 *p; int shift; if (volume == 0) return; /* get correct conversion table */ if (volume < 0) { shift = volume + 8; if (shift < 0) shift = 0; } else { shift = volume + 7; if (shift > 15) shift = 15; } volume_change = dsp_audio_volume_change[shift]; i = 0; ii = skb->len; p = skb->data; /* change volume */ while (i < ii) { *p = volume_change[*p]; p++; i++; } }
gpl-2.0
GalaxyTab4/bliss_kernel_samsung_matisse
drivers/char/toshiba.c
12180
13568
/* toshiba.c -- Linux driver for accessing the SMM on Toshiba laptops * * Copyright (c) 1996-2001 Jonathan A. Buzzard (jonathan@buzzard.org.uk) * * Valuable assistance and patches from: * Tom May <tom@you-bastards.com> * Rob Napier <rnapier@employees.org> * * Fn status port numbers for machine ID's courtesy of * 0xfc02: Scott Eisert <scott.e@sky-eye.com> * 0xfc04: Steve VanDevender <stevev@efn.org> * 0xfc08: Garth Berry <garth@itsbruce.net> * 0xfc0a: Egbert Eich <eich@xfree86.org> * 0xfc10: Andrew Lofthouse <Andrew.Lofthouse@robins.af.mil> * 0xfc11: Spencer Olson <solson@novell.com> * 0xfc13: Claudius Frankewitz <kryp@gmx.de> * 0xfc15: Tom May <tom@you-bastards.com> * 0xfc17: Dave Konrad <konrad@xenia.it> * 0xfc1a: George Betzos <betzos@engr.colostate.edu> * 0xfc1b: Munemasa Wada <munemasa@jnovel.co.jp> * 0xfc1d: Arthur Liu <armie@slap.mine.nu> * 0xfc5a: Jacques L'helgoualc'h <lhh@free.fr> * 0xfcd1: Mr. Dave Konrad <konrad@xenia.it> * * WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING * * This code is covered by the GNU GPL and you are free to make any * changes you wish to it under the terms of the license. However the * code has the potential to render your computer and/or someone else's * unusable. Please proceed with care when modifying the code. * * Note: Unfortunately the laptop hardware can close the System Configuration * Interface on it's own accord. It is therefore necessary for *all* * programs using this driver to be aware that *any* SCI call can fail at * *any* time. It is up to any program to be aware of this eventuality * and take appropriate steps. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The information used to write this driver has been obtained by reverse * engineering the software supplied by Toshiba for their portable computers in * strict accordance with the European Council Directive 92/250/EEC on the legal * protection of computer programs, and it's implementation into English Law by * the Copyright (Computer Programs) Regulations 1992 (S.I. 1992 No.3233). * */ #define TOSH_VERSION "1.11 26/9/2001" #define TOSH_DEBUG 0 #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/miscdevice.h> #include <linux/ioport.h> #include <asm/io.h> #include <asm/uaccess.h> #include <linux/init.h> #include <linux/stat.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <linux/toshiba.h> #define TOSH_MINOR_DEV 181 MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jonathan Buzzard <jonathan@buzzard.org.uk>"); MODULE_DESCRIPTION("Toshiba laptop SMM driver"); MODULE_SUPPORTED_DEVICE("toshiba"); static DEFINE_MUTEX(tosh_mutex); static int tosh_fn; module_param_named(fn, tosh_fn, int, 0); MODULE_PARM_DESC(fn, "User specified Fn key detection port"); static int tosh_id; static int tosh_bios; static int tosh_date; static int tosh_sci; static int tosh_fan; static long tosh_ioctl(struct file *, unsigned int, unsigned long); static const struct file_operations tosh_fops = { .owner = THIS_MODULE, .unlocked_ioctl = tosh_ioctl, .llseek = noop_llseek, }; static struct miscdevice tosh_device = { TOSH_MINOR_DEV, "toshiba", &tosh_fops }; /* * Read the Fn key status */ #ifdef CONFIG_PROC_FS static int tosh_fn_status(void) { unsigned char scan; unsigned long flags; if (tosh_fn!=0) { scan = inb(tosh_fn); } else { local_irq_save(flags); outb(0x8e, 0xe4); scan = inb(0xe5); local_irq_restore(flags); } return (int) scan; } #endif /* * For the Portage 610CT and the Tecra 700CS/700CDT emulate the HCI fan function */ static int tosh_emulate_fan(SMMRegisters *regs) { unsigned long eax,ecx,flags; unsigned char al; eax = regs->eax & 0xff00; ecx = regs->ecx & 0xffff; /* Portage 610CT */ if (tosh_id==0xfccb) { if (eax==0xfe00) { /* fan status */ local_irq_save(flags); outb(0xbe, 0xe4); al = inb(0xe5); local_irq_restore(flags); regs->eax = 0x00; regs->ecx = (unsigned int) (al & 0x01); } if ((eax==0xff00) && (ecx==0x0000)) { /* fan off */ local_irq_save(flags); outb(0xbe, 0xe4); al = inb(0xe5); outb(0xbe, 0xe4); outb (al | 0x01, 0xe5); local_irq_restore(flags); regs->eax = 0x00; regs->ecx = 0x00; } if ((eax==0xff00) && (ecx==0x0001)) { /* fan on */ local_irq_save(flags); outb(0xbe, 0xe4); al = inb(0xe5); outb(0xbe, 0xe4); outb(al & 0xfe, 0xe5); local_irq_restore(flags); regs->eax = 0x00; regs->ecx = 0x01; } } /* Tecra 700CS/CDT */ if (tosh_id==0xfccc) { if (eax==0xfe00) { /* fan status */ local_irq_save(flags); outb(0xe0, 0xe4); al = inb(0xe5); local_irq_restore(flags); regs->eax = 0x00; regs->ecx = al & 0x01; } if ((eax==0xff00) && (ecx==0x0000)) { /* fan off */ local_irq_save(flags); outb(0xe0, 0xe4); al = inb(0xe5); outw(0xe0 | ((al & 0xfe) << 8), 0xe4); local_irq_restore(flags); regs->eax = 0x00; regs->ecx = 0x00; } if ((eax==0xff00) && (ecx==0x0001)) { /* fan on */ local_irq_save(flags); outb(0xe0, 0xe4); al = inb(0xe5); outw(0xe0 | ((al | 0x01) << 8), 0xe4); local_irq_restore(flags); regs->eax = 0x00; regs->ecx = 0x01; } } return 0; } /* * Put the laptop into System Management Mode */ int tosh_smm(SMMRegisters *regs) { int eax; asm ("# load the values into the registers\n\t" \ "pushl %%eax\n\t" \ "movl 0(%%eax),%%edx\n\t" \ "push %%edx\n\t" \ "movl 4(%%eax),%%ebx\n\t" \ "movl 8(%%eax),%%ecx\n\t" \ "movl 12(%%eax),%%edx\n\t" \ "movl 16(%%eax),%%esi\n\t" \ "movl 20(%%eax),%%edi\n\t" \ "popl %%eax\n\t" \ "# call the System Management mode\n\t" \ "inb $0xb2,%%al\n\t" "# fill out the memory with the values in the registers\n\t" \ "xchgl %%eax,(%%esp)\n\t" "movl %%ebx,4(%%eax)\n\t" \ "movl %%ecx,8(%%eax)\n\t" \ "movl %%edx,12(%%eax)\n\t" \ "movl %%esi,16(%%eax)\n\t" \ "movl %%edi,20(%%eax)\n\t" \ "popl %%edx\n\t" \ "movl %%edx,0(%%eax)\n\t" \ "# setup the return value to the carry flag\n\t" \ "lahf\n\t" \ "shrl $8,%%eax\n\t" \ "andl $1,%%eax\n" \ : "=a" (eax) : "a" (regs) : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); return eax; } EXPORT_SYMBOL(tosh_smm); static long tosh_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { SMMRegisters regs; SMMRegisters __user *argp = (SMMRegisters __user *)arg; unsigned short ax,bx; int err; if (!argp) return -EINVAL; if (copy_from_user(&regs, argp, sizeof(SMMRegisters))) return -EFAULT; switch (cmd) { case TOSH_SMM: ax = regs.eax & 0xff00; bx = regs.ebx & 0xffff; /* block HCI calls to read/write memory & PCI devices */ if (((ax==0xff00) || (ax==0xfe00)) && (bx>0x0069)) return -EINVAL; /* do we need to emulate the fan ? */ mutex_lock(&tosh_mutex); if (tosh_fan==1) { if (((ax==0xf300) || (ax==0xf400)) && (bx==0x0004)) { err = tosh_emulate_fan(&regs); mutex_unlock(&tosh_mutex); break; } } err = tosh_smm(&regs); mutex_unlock(&tosh_mutex); break; default: return -EINVAL; } if (copy_to_user(argp, &regs, sizeof(SMMRegisters))) return -EFAULT; return (err==0) ? 0:-EINVAL; } /* * Print the information for /proc/toshiba */ #ifdef CONFIG_PROC_FS static int proc_toshiba_show(struct seq_file *m, void *v) { int key; key = tosh_fn_status(); /* Arguments 0) Linux driver version (this will change if format changes) 1) Machine ID 2) SCI version 3) BIOS version (major, minor) 4) BIOS date (in SCI date format) 5) Fn Key status */ seq_printf(m, "1.1 0x%04x %d.%d %d.%d 0x%04x 0x%02x\n", tosh_id, (tosh_sci & 0xff00)>>8, tosh_sci & 0xff, (tosh_bios & 0xff00)>>8, tosh_bios & 0xff, tosh_date, key); return 0; } static int proc_toshiba_open(struct inode *inode, struct file *file) { return single_open(file, proc_toshiba_show, NULL); } static const struct file_operations proc_toshiba_fops = { .owner = THIS_MODULE, .open = proc_toshiba_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* * Determine which port to use for the Fn key status */ static void tosh_set_fn_port(void) { switch (tosh_id) { case 0xfc02: case 0xfc04: case 0xfc09: case 0xfc0a: case 0xfc10: case 0xfc11: case 0xfc13: case 0xfc15: case 0xfc1a: case 0xfc1b: case 0xfc5a: tosh_fn = 0x62; break; case 0xfc08: case 0xfc17: case 0xfc1d: case 0xfcd1: case 0xfce0: case 0xfce2: tosh_fn = 0x68; break; default: tosh_fn = 0x00; break; } return; } /* * Get the machine identification number of the current model */ static int tosh_get_machine_id(void __iomem *bios) { int id; SMMRegisters regs; unsigned short bx,cx; unsigned long address; id = (0x100*(int) readb(bios+0xfffe))+((int) readb(bios+0xfffa)); /* do we have a SCTTable machine identication number on our hands */ if (id==0xfc2f) { /* start by getting a pointer into the BIOS */ regs.eax = 0xc000; regs.ebx = 0x0000; regs.ecx = 0x0000; tosh_smm(&regs); bx = (unsigned short) (regs.ebx & 0xffff); /* At this point in the Toshiba routines under MS Windows the bx register holds 0xe6f5. However my code is producing a different value! For the time being I will just fudge the value. This has been verified on a Satellite Pro 430CDT, Tecra 750CDT, Tecra 780DVD and Satellite 310CDT. */ #if TOSH_DEBUG printk("toshiba: debugging ID ebx=0x%04x\n", regs.ebx); #endif bx = 0xe6f5; /* now twiddle with our pointer a bit */ address = bx; cx = readw(bios + address); address = 9+bx+cx; cx = readw(bios + address); address = 0xa+cx; cx = readw(bios + address); /* now construct our machine identification number */ id = ((cx & 0xff)<<8)+((cx & 0xff00)>>8); } return id; } /* * Probe for the presence of a Toshiba laptop * * returns and non-zero if unable to detect the presence of a Toshiba * laptop, otherwise zero and determines the Machine ID, BIOS version and * date, and SCI version. */ static int tosh_probe(void) { int i,major,minor,day,year,month,flag; unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 }; SMMRegisters regs; void __iomem *bios = ioremap_cache(0xf0000, 0x10000); if (!bios) return -ENOMEM; /* extra sanity check for the string "TOSHIBA" in the BIOS because some machines that are not Toshiba's pass the next test */ for (i=0;i<7;i++) { if (readb(bios+0xe010+i)!=signature[i]) { printk("toshiba: not a supported Toshiba laptop\n"); iounmap(bios); return -ENODEV; } } /* call the Toshiba SCI support check routine */ regs.eax = 0xf0f0; regs.ebx = 0x0000; regs.ecx = 0x0000; flag = tosh_smm(&regs); /* if this is not a Toshiba laptop carry flag is set and ah=0x86 */ if ((flag==1) || ((regs.eax & 0xff00)==0x8600)) { printk("toshiba: not a supported Toshiba laptop\n"); iounmap(bios); return -ENODEV; } /* if we get this far then we are running on a Toshiba (probably)! */ tosh_sci = regs.edx & 0xffff; /* next get the machine ID of the current laptop */ tosh_id = tosh_get_machine_id(bios); /* get the BIOS version */ major = readb(bios+0xe009)-'0'; minor = ((readb(bios+0xe00b)-'0')*10)+(readb(bios+0xe00c)-'0'); tosh_bios = (major*0x100)+minor; /* get the BIOS date */ day = ((readb(bios+0xfff5)-'0')*10)+(readb(bios+0xfff6)-'0'); month = ((readb(bios+0xfff8)-'0')*10)+(readb(bios+0xfff9)-'0'); year = ((readb(bios+0xfffb)-'0')*10)+(readb(bios+0xfffc)-'0'); tosh_date = (((year-90) & 0x1f)<<10) | ((month & 0xf)<<6) | ((day & 0x1f)<<1); /* in theory we should check the ports we are going to use for the fn key detection (and the fan on the Portage 610/Tecra700), and then request them to stop other drivers using them. However as the keyboard driver grabs 0x60-0x6f and the pic driver grabs 0xa0-0xbf we can't. We just have to live dangerously and use the ports anyway, oh boy! */ /* do we need to emulate the fan? */ if ((tosh_id==0xfccb) || (tosh_id==0xfccc)) tosh_fan = 1; iounmap(bios); return 0; } static int __init toshiba_init(void) { int retval; /* are we running on a Toshiba laptop */ if (tosh_probe()) return -ENODEV; printk(KERN_INFO "Toshiba System Management Mode driver v" TOSH_VERSION "\n"); /* set the port to use for Fn status if not specified as a parameter */ if (tosh_fn==0x00) tosh_set_fn_port(); /* register the device file */ retval = misc_register(&tosh_device); if (retval < 0) return retval; #ifdef CONFIG_PROC_FS { struct proc_dir_entry *pde; pde = proc_create("toshiba", 0, NULL, &proc_toshiba_fops); if (!pde) { misc_deregister(&tosh_device); return -ENOMEM; } } #endif return 0; } static void __exit toshiba_exit(void) { remove_proc_entry("toshiba", NULL); misc_deregister(&tosh_device); } module_init(toshiba_init); module_exit(toshiba_exit);
gpl-2.0
TEAM-Gummy/android_kernel_samsung_hlte
drivers/video/msm/mipi_truly.c
2197
6789
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "msm_fb.h" #include "mipi_dsi.h" #include "mipi_truly.h" static struct msm_panel_common_pdata *mipi_truly_pdata; static struct dsi_buf truly_tx_buf; static struct dsi_buf truly_rx_buf; #define TRULY_CMD_DELAY 0 #define TRULY_SLEEP_OFF_DELAY 150 #define TRULY_DISPLAY_ON_DELAY 150 #define GPIO_TRULY_LCD_RESET 129 static int prev_bl = 17; static char extend_cmd_enable[4] = {0xB9, 0xFF, 0x83, 0x69}; static char display_setting[16] = { 0xB2, 0x00, 0x23, 0x62, 0x62, 0x70, 0x00, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x01, }; static char wave_cycle_setting[6] = {0xB4, 0x00, 0x1D, 0x5F, 0x0E, 0x06}; static char gip_setting[27] = { 0xD5, 0x00, 0x04, 0x03, 0x00, 0x01, 0x05, 0x1C, 0x70, 0x01, 0x03, 0x00, 0x00, 0x40, 0x06, 0x51, 0x07, 0x00, 0x00, 0x41, 0x06, 0x50, 0x07, 0x07, 0x0F, 0x04, 0x00, }; static char power_setting[20] = { 0xB1, 0x01, 0x00, 0x34, 0x06, 0x00, 0x0F, 0x0F, 0x2A, 0x32, 0x3F, 0x3F, 0x07, 0x3A, 0x01, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, }; static char vcom_setting[3] = {0xB6, 0x56, 0x56}; static char pannel_setting[2] = {0xCC, 0x02}; static char gamma_setting[35] = { 0xE0, 0x00, 0x1D, 0x22, 0x38, 0x3D, 0x3F, 0x2E, 0x4A, 0x06, 0x0D, 0x0F, 0x13, 0x15, 0x13, 0x16, 0x10, 0x19, 0x00, 0x1D, 0x22, 0x38, 0x3D, 0x3F, 0x2E, 0x4A, 0x06, 0x0D, 0x0F, 0x13, 0x15, 0x13, 0x16, 0x10, 0x19, }; static char mipi_setting[14] = { 0xBA, 0x00, 0xA0, 0xC6, 0x00, 0x0A, 0x00, 0x10, 0x30, 0x6F, 0x02, 0x11, 0x18, 0x40, }; static char exit_sleep[2] = {0x11, 0x00}; static char display_on[2] = {0x29, 0x00}; static char display_off[2] = {0x28, 0x00}; static char enter_sleep[2] = {0x10, 0x00}; static struct dsi_cmd_desc truly_display_off_cmds[] = { {DTYPE_DCS_WRITE, 1, 0, 0, 10, sizeof(display_off), display_off}, {DTYPE_DCS_WRITE, 1, 0, 0, 120, sizeof(enter_sleep), enter_sleep} }; static struct dsi_cmd_desc truly_display_on_cmds[] = { {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(extend_cmd_enable), extend_cmd_enable}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(display_setting), display_setting}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(wave_cycle_setting), wave_cycle_setting}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(gip_setting), gip_setting}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(power_setting), power_setting}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(vcom_setting), vcom_setting}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(pannel_setting), pannel_setting}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(gamma_setting), gamma_setting}, {DTYPE_GEN_LWRITE, 1, 0, 0, TRULY_CMD_DELAY, sizeof(mipi_setting), mipi_setting}, {DTYPE_DCS_WRITE, 1, 0, 0, TRULY_SLEEP_OFF_DELAY, sizeof(exit_sleep), exit_sleep}, {DTYPE_DCS_WRITE, 1, 0, 0, TRULY_DISPLAY_ON_DELAY, sizeof(display_on), display_on}, }; static int mipi_truly_lcd_on(struct platform_device *pdev) { struct msm_fb_data_type *mfd; struct dcs_cmd_req cmdreq; mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; msleep(20); memset(&cmdreq, 0, sizeof(cmdreq)); cmdreq.cmds = truly_display_on_cmds; cmdreq.cmds_cnt = ARRAY_SIZE(truly_display_on_cmds); cmdreq.flags = CMD_REQ_COMMIT; cmdreq.rlen = 0; cmdreq.cb = NULL; mipi_dsi_cmdlist_put(&cmdreq); return 0; } static int mipi_truly_lcd_off(struct platform_device *pdev) { struct msm_fb_data_type *mfd; struct dcs_cmd_req cmdreq; mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; memset(&cmdreq, 0, sizeof(cmdreq)); cmdreq.cmds = truly_display_off_cmds; cmdreq.cmds_cnt = ARRAY_SIZE(truly_display_off_cmds); cmdreq.flags = CMD_REQ_COMMIT; cmdreq.rlen = 0; cmdreq.cb = NULL; mipi_dsi_cmdlist_put(&cmdreq); return 0; } #define BL_LEVEL 17 static void mipi_truly_set_backlight(struct msm_fb_data_type *mfd) { int step = 0, i = 0; int bl_level = mfd->bl_level; /* real backlight level, 1 - max, 16 - min, 17 - off */ bl_level = BL_LEVEL - bl_level; if (bl_level > prev_bl) { step = bl_level - prev_bl; if (bl_level == BL_LEVEL) step--; } else if (bl_level < prev_bl) { step = bl_level + 16 - prev_bl; } else { pr_debug("%s: no change\n", __func__); return; } if (bl_level == BL_LEVEL) { /* turn off backlight */ mipi_truly_pdata->pmic_backlight(0); } else { if (prev_bl == BL_LEVEL) { /* turn on backlight */ mipi_truly_pdata->pmic_backlight(1); udelay(30); } /* adjust backlight level */ for (i = 0; i < step; i++) { mipi_truly_pdata->pmic_backlight(0); udelay(1); mipi_truly_pdata->pmic_backlight(1); udelay(1); } } msleep(20); prev_bl = bl_level; return; } static int __devinit mipi_truly_lcd_probe(struct platform_device *pdev) { if (pdev->id == 0) { mipi_truly_pdata = pdev->dev.platform_data; return 0; } msm_fb_add_device(pdev); return 0; } static struct platform_driver this_driver = { .probe = mipi_truly_lcd_probe, .driver = { .name = "mipi_truly", }, }; static struct msm_fb_panel_data truly_panel_data = { .on = mipi_truly_lcd_on, .off = mipi_truly_lcd_off, .set_backlight = mipi_truly_set_backlight, }; static int ch_used[3]; int mipi_truly_device_register(struct msm_panel_info *pinfo, u32 channel, u32 panel) { struct platform_device *pdev = NULL; int ret; if ((channel >= 3) || ch_used[channel]) return -ENODEV; ch_used[channel] = TRUE; pdev = platform_device_alloc("mipi_truly", (panel << 8)|channel); if (!pdev) return -ENOMEM; truly_panel_data.panel_info = *pinfo; ret = platform_device_add_data(pdev, &truly_panel_data, sizeof(truly_panel_data)); if (ret) { pr_err("%s: platform_device_add_data failed!\n", __func__); goto err_device_put; } ret = platform_device_add(pdev); if (ret) { pr_err("%s: platform_device_register failed!\n", __func__); goto err_device_put; } return 0; err_device_put: platform_device_put(pdev); return ret; } static int __init mipi_truly_lcd_init(void) { mipi_dsi_buf_alloc(&truly_tx_buf, DSI_BUF_SIZE); mipi_dsi_buf_alloc(&truly_rx_buf, DSI_BUF_SIZE); return platform_driver_register(&this_driver); } module_init(mipi_truly_lcd_init);
gpl-2.0
adbaby/android_kernel_msm8974
drivers/video/msm/mipi_renesas.c
2197
54725
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "msm_fb.h" #include "mipi_dsi.h" #include "mipi_renesas.h" #include <mach/socinfo.h> #define RENESAS_CMD_DELAY 0 /* 50 */ #define RENESAS_SLEEP_OFF_DELAY 50 static struct msm_panel_common_pdata *mipi_renesas_pdata; static struct dsi_buf renesas_tx_buf; static struct dsi_buf renesas_rx_buf; static int mipi_renesas_lcd_init(void); static char config_sleep_out[2] = {0x11, 0x00}; static char config_CMD_MODE[2] = {0x40, 0x01}; static char config_WRTXHT[7] = {0x92, 0x16, 0x08, 0x08, 0x00, 0x01, 0xe0}; static char config_WRTXVT[7] = {0x8b, 0x02, 0x02, 0x02, 0x00, 0x03, 0x60}; static char config_PLL2NR[2] = {0xa0, 0x24}; static char config_PLL2NF1[2] = {0xa2, 0xd0}; static char config_PLL2NF2[2] = {0xa4, 0x00}; static char config_PLL2BWADJ1[2] = {0xa6, 0xd0}; static char config_PLL2BWADJ2[2] = {0xa8, 0x00}; static char config_PLL2CTL[2] = {0xaa, 0x00}; static char config_DBICBR[2] = {0x48, 0x03}; static char config_DBICTYPE[2] = {0x49, 0x00}; static char config_DBICSET1[2] = {0x4a, 0x1c}; static char config_DBICADD[2] = {0x4b, 0x00}; static char config_DBICCTL[2] = {0x4e, 0x01}; /* static char config_COLMOD_565[2] = {0x3a, 0x05}; */ /* static char config_COLMOD_666PACK[2] = {0x3a, 0x06}; */ static char config_COLMOD_888[2] = {0x3a, 0x07}; static char config_MADCTL[2] = {0x36, 0x00}; static char config_DBIOC[2] = {0x82, 0x40}; static char config_CASET[7] = {0x2a, 0x00, 0x00, 0x00, 0x00, 0x01, 0xdf }; static char config_PASET[7] = {0x2b, 0x00, 0x00, 0x00, 0x00, 0x03, 0x5f }; static char config_TXON[2] = {0x81, 0x00}; static char config_BLSET_TM[2] = {0xff, 0x6c}; static char config_DSIRXCTL[2] = {0x41, 0x01}; static char config_TEON[2] = {0x35, 0x00}; static char config_TEOFF[1] = {0x34}; static char config_AGCPSCTL_TM[2] = {0x56, 0x08}; static char config_DBICADD70[2] = {0x4b, 0x70}; static char config_DBICSET_15[2] = {0x4a, 0x15}; static char config_DBICADD72[2] = {0x4b, 0x72}; static char config_Power_Ctrl_2a_cmd[3] = {0x4c, 0x40, 0x10}; static char config_Auto_Sequencer_Setting_a_cmd[3] = {0x4c, 0x00, 0x00}; static char Driver_Output_Ctrl_indx[3] = {0x4c, 0x00, 0x01}; static char Driver_Output_Ctrl_cmd[3] = {0x4c, 0x03, 0x10}; static char config_LCD_drive_AC_Ctrl_indx[3] = {0x4c, 0x00, 0x02}; static char config_LCD_drive_AC_Ctrl_cmd[3] = {0x4c, 0x01, 0x00}; static char config_Entry_Mode_indx[3] = {0x4c, 0x00, 0x03}; static char config_Entry_Mode_cmd[3] = {0x4c, 0x00, 0x00}; static char config_Display_Ctrl_1_indx[3] = {0x4c, 0x00, 0x07}; static char config_Display_Ctrl_1_cmd[3] = {0x4c, 0x00, 0x00}; static char config_Display_Ctrl_2_indx[3] = {0x4c, 0x00, 0x08}; static char config_Display_Ctrl_2_cmd[3] = {0x4c, 0x00, 0x04}; static char config_Display_Ctrl_3_indx[3] = {0x4c, 0x00, 0x09}; static char config_Display_Ctrl_3_cmd[3] = {0x4c, 0x00, 0x0c}; static char config_Display_IF_Ctrl_1_indx[3] = {0x4c, 0x00, 0x0c}; static char config_Display_IF_Ctrl_1_cmd[3] = {0x4c, 0x40, 0x10}; static char config_Display_IF_Ctrl_2_indx[3] = {0x4c, 0x00, 0x0e}; static char config_Display_IF_Ctrl_2_cmd[3] = {0x4c, 0x00, 0x00}; static char config_Panel_IF_Ctrl_1_indx[3] = {0x4c, 0x00, 0x20}; static char config_Panel_IF_Ctrl_1_cmd[3] = {0x4c, 0x01, 0x3f}; static char config_Panel_IF_Ctrl_3_indx[3] = {0x4c, 0x00, 0x22}; static char config_Panel_IF_Ctrl_3_cmd[3] = {0x4c, 0x76, 0x00}; static char config_Panel_IF_Ctrl_4_indx[3] = {0x4c, 0x00, 0x23}; static char config_Panel_IF_Ctrl_4_cmd[3] = {0x4c, 0x1c, 0x0a}; static char config_Panel_IF_Ctrl_5_indx[3] = {0x4c, 0x00, 0x24}; static char config_Panel_IF_Ctrl_5_cmd[3] = {0x4c, 0x1c, 0x2c}; static char config_Panel_IF_Ctrl_6_indx[3] = {0x4c, 0x00, 0x25}; static char config_Panel_IF_Ctrl_6_cmd[3] = {0x4c, 0x1c, 0x4e}; static char config_Panel_IF_Ctrl_8_indx[3] = {0x4c, 0x00, 0x27}; static char config_Panel_IF_Ctrl_8_cmd[3] = {0x4c, 0x00, 0x00}; static char config_Panel_IF_Ctrl_9_indx[3] = {0x4c, 0x00, 0x28}; static char config_Panel_IF_Ctrl_9_cmd[3] = {0x4c, 0x76, 0x0c}; static char config_gam_adjust_00_indx[3] = {0x4c, 0x03, 0x00}; static char config_gam_adjust_00_cmd[3] = {0x4c, 0x00, 0x00}; static char config_gam_adjust_01_indx[3] = {0x4c, 0x03, 0x01}; static char config_gam_adjust_01_cmd[3] = {0x4c, 0x05, 0x02}; static char config_gam_adjust_02_indx[3] = {0x4c, 0x03, 0x02}; static char config_gam_adjust_02_cmd[3] = {0x4c, 0x07, 0x05}; static char config_gam_adjust_03_indx[3] = {0x4c, 0x03, 0x03}; static char config_gam_adjust_03_cmd[3] = {0x4c, 0x00, 0x00}; static char config_gam_adjust_04_indx[3] = {0x4c, 0x03, 0x04}; static char config_gam_adjust_04_cmd[3] = {0x4c, 0x02, 0x00}; static char config_gam_adjust_05_indx[3] = {0x4c, 0x03, 0x05}; static char config_gam_adjust_05_cmd[3] = {0x4c, 0x07, 0x07}; static char config_gam_adjust_06_indx[3] = {0x4c, 0x03, 0x06}; static char config_gam_adjust_06_cmd[3] = {0x4c, 0x10, 0x10}; static char config_gam_adjust_07_indx[3] = {0x4c, 0x03, 0x07}; static char config_gam_adjust_07_cmd[3] = {0x4c, 0x02, 0x02}; static char config_gam_adjust_08_indx[3] = {0x4c, 0x03, 0x08}; static char config_gam_adjust_08_cmd[3] = {0x4c, 0x07, 0x04}; static char config_gam_adjust_09_indx[3] = {0x4c, 0x03, 0x09}; static char config_gam_adjust_09_cmd[3] = {0x4c, 0x07, 0x07}; static char config_gam_adjust_0A_indx[3] = {0x4c, 0x03, 0x0a}; static char config_gam_adjust_0A_cmd[3] = {0x4c, 0x00, 0x00}; static char config_gam_adjust_0B_indx[3] = {0x4c, 0x03, 0x0b}; static char config_gam_adjust_0B_cmd[3] = {0x4c, 0x00, 0x00}; static char config_gam_adjust_0C_indx[3] = {0x4c, 0x03, 0x0c}; static char config_gam_adjust_0C_cmd[3] = {0x4c, 0x07, 0x07}; static char config_gam_adjust_0D_indx[3] = {0x4c, 0x03, 0x0d}; static char config_gam_adjust_0D_cmd[3] = {0x4c, 0x10, 0x10}; static char config_gam_adjust_10_indx[3] = {0x4c, 0x03, 0x10}; static char config_gam_adjust_10_cmd[3] = {0x4c, 0x01, 0x04}; static char config_gam_adjust_11_indx[3] = {0x4c, 0x03, 0x11}; static char config_gam_adjust_11_cmd[3] = {0x4c, 0x05, 0x03}; static char config_gam_adjust_12_indx[3] = {0x4c, 0x03, 0x12}; static char config_gam_adjust_12_cmd[3] = {0x4c, 0x03, 0x04}; static char config_gam_adjust_15_indx[3] = {0x4c, 0x03, 0x15}; static char config_gam_adjust_15_cmd[3] = {0x4c, 0x03, 0x04}; static char config_gam_adjust_16_indx[3] = {0x4c, 0x03, 0x16}; static char config_gam_adjust_16_cmd[3] = {0x4c, 0x03, 0x1c}; static char config_gam_adjust_17_indx[3] = {0x4c, 0x03, 0x17}; static char config_gam_adjust_17_cmd[3] = {0x4c, 0x02, 0x04}; static char config_gam_adjust_18_indx[3] = {0x4c, 0x03, 0x18}; static char config_gam_adjust_18_cmd[3] = {0x4c, 0x04, 0x02}; static char config_gam_adjust_19_indx[3] = {0x4c, 0x03, 0x19}; static char config_gam_adjust_19_cmd[3] = {0x4c, 0x03, 0x05}; static char config_gam_adjust_1C_indx[3] = {0x4c, 0x03, 0x1c}; static char config_gam_adjust_1C_cmd[3] = {0x4c, 0x07, 0x07}; static char config_gam_adjust_1D_indx[3] = {0x4c, 0x03, 0x1D}; static char config_gam_adjust_1D_cmd[3] = {0x4c, 0x02, 0x1f}; static char config_gam_adjust_20_indx[3] = {0x4c, 0x03, 0x20}; static char config_gam_adjust_20_cmd[3] = {0x4c, 0x05, 0x07}; static char config_gam_adjust_21_indx[3] = {0x4c, 0x03, 0x21}; static char config_gam_adjust_21_cmd[3] = {0x4c, 0x06, 0x04}; static char config_gam_adjust_22_indx[3] = {0x4c, 0x03, 0x22}; static char config_gam_adjust_22_cmd[3] = {0x4c, 0x04, 0x05}; static char config_gam_adjust_27_indx[3] = {0x4c, 0x03, 0x27}; static char config_gam_adjust_27_cmd[3] = {0x4c, 0x02, 0x03}; static char config_gam_adjust_28_indx[3] = {0x4c, 0x03, 0x28}; static char config_gam_adjust_28_cmd[3] = {0x4c, 0x03, 0x00}; static char config_gam_adjust_29_indx[3] = {0x4c, 0x03, 0x29}; static char config_gam_adjust_29_cmd[3] = {0x4c, 0x00, 0x02}; static char config_Power_Ctrl_1_indx[3] = {0x4c, 0x01, 0x00}; static char config_Power_Ctrl_1b_cmd[3] = {0x4c, 0x36, 0x3c}; static char config_Power_Ctrl_2_indx[3] = {0x4c, 0x01, 0x01}; static char config_Power_Ctrl_2b_cmd[3] = {0x4c, 0x40, 0x03}; static char config_Power_Ctrl_3_indx[3] = {0x4c, 0x01, 0x02}; static char config_Power_Ctrl_3a_cmd[3] = {0x4c, 0x00, 0x01}; static char config_Power_Ctrl_4_indx[3] = {0x4c, 0x01, 0x03}; static char config_Power_Ctrl_4a_cmd[3] = {0x4c, 0x3c, 0x58}; static char config_Power_Ctrl_6_indx[3] = {0x4c, 0x01, 0x0c}; static char config_Power_Ctrl_6a_cmd[3] = {0x4c, 0x01, 0x35}; static char config_Auto_Sequencer_Setting_b_cmd[3] = {0x4c, 0x00, 0x02}; static char config_Panel_IF_Ctrl_10_indx[3] = {0x4c, 0x00, 0x29}; static char config_Panel_IF_Ctrl_10a_cmd[3] = {0x4c, 0x03, 0xbf}; static char config_Auto_Sequencer_Setting_indx[3] = {0x4c, 0x01, 0x06}; static char config_Auto_Sequencer_Setting_c_cmd[3] = {0x4c, 0x00, 0x03}; static char config_Power_Ctrl_2c_cmd[3] = {0x4c, 0x40, 0x10}; static char config_VIDEO[2] = {0x40, 0x00}; static char config_Panel_IF_Ctrl_10_indx_off[3] = {0x4C, 0x00, 0x29}; static char config_Panel_IF_Ctrl_10b_cmd_off[3] = {0x4C, 0x00, 0x02}; static char config_Power_Ctrl_1a_cmd[3] = {0x4C, 0x30, 0x00}; static struct dsi_cmd_desc renesas_sleep_off_cmds[] = { {DTYPE_DCS_WRITE, 1, 0, 0, RENESAS_SLEEP_OFF_DELAY, sizeof(config_sleep_out), config_sleep_out } }; static struct dsi_cmd_desc renesas_display_off_cmds[] = { /* Choosing Command Mode */ {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_CMD_MODE), config_CMD_MODE }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Auto_Sequencer_Setting_indx), config_Auto_Sequencer_Setting_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Auto_Sequencer_Setting_b_cmd), config_Auto_Sequencer_Setting_b_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY * 2, sizeof(config_DBICSET_15), config_DBICSET_15}, /* After waiting >= 5 frames, turn OFF RGB signals This is done by on DSI/MDP (depends on Vid/Cmd Mode. */ {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Auto_Sequencer_Setting_indx), config_Auto_Sequencer_Setting_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Auto_Sequencer_Setting_a_cmd), config_Auto_Sequencer_Setting_a_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_10_indx_off), config_Panel_IF_Ctrl_10_indx_off}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_10b_cmd_off), config_Panel_IF_Ctrl_10b_cmd_off}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_1_indx), config_Power_Ctrl_1_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_1a_cmd), config_Power_Ctrl_1a_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_TEOFF), config_TEOFF}, }; static struct dsi_cmd_desc renesas_display_on_cmds[] = { /* Choosing Command Mode */ {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_CMD_MODE), config_CMD_MODE }, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_WRTXHT), config_WRTXHT }, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_WRTXVT), config_WRTXVT }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_PLL2NR), config_PLL2NR }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_PLL2NF1), config_PLL2NF1 }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_PLL2NF2), config_PLL2NF2 }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_PLL2BWADJ1), config_PLL2BWADJ1}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_PLL2BWADJ2), config_PLL2BWADJ2}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_PLL2CTL), config_PLL2CTL}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICBR), config_DBICBR}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICTYPE), config_DBICTYPE}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET1), config_DBICSET1}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD), config_DBICADD}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICCTL), config_DBICCTL}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_COLMOD_888), config_COLMOD_888}, /* Choose config_COLMOD_565 or config_COLMOD_666PACK for other modes */ {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_MADCTL), config_MADCTL}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBIOC), config_DBIOC}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_CASET), config_CASET}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_PASET), config_PASET}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DSIRXCTL), config_DSIRXCTL}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_TEON), config_TEON}, {DTYPE_DCS_WRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_TXON), config_TXON}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_BLSET_TM), config_BLSET_TM}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_AGCPSCTL_TM), config_AGCPSCTL_TM}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_1_indx), config_Power_Ctrl_1_indx }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_1a_cmd), config_Power_Ctrl_1a_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_2_indx), config_Power_Ctrl_2_indx }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_2a_cmd), config_Power_Ctrl_2a_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Auto_Sequencer_Setting_indx), config_Auto_Sequencer_Setting_indx }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Auto_Sequencer_Setting_a_cmd), config_Auto_Sequencer_Setting_a_cmd }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(Driver_Output_Ctrl_indx), Driver_Output_Ctrl_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(Driver_Output_Ctrl_cmd), Driver_Output_Ctrl_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_LCD_drive_AC_Ctrl_indx), config_LCD_drive_AC_Ctrl_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_LCD_drive_AC_Ctrl_cmd), config_LCD_drive_AC_Ctrl_cmd }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Entry_Mode_indx), config_Entry_Mode_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Entry_Mode_cmd), config_Entry_Mode_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Display_Ctrl_1_indx), config_Display_Ctrl_1_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Display_Ctrl_1_cmd), config_Display_Ctrl_1_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Display_Ctrl_2_indx), config_Display_Ctrl_2_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Display_Ctrl_2_cmd), config_Display_Ctrl_2_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Display_Ctrl_3_indx), config_Display_Ctrl_3_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Display_Ctrl_3_cmd), config_Display_Ctrl_3_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Display_IF_Ctrl_1_indx), config_Display_IF_Ctrl_1_indx }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Display_IF_Ctrl_1_cmd), config_Display_IF_Ctrl_1_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Display_IF_Ctrl_2_indx), config_Display_IF_Ctrl_2_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Display_IF_Ctrl_2_cmd), config_Display_IF_Ctrl_2_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_1_indx), config_Panel_IF_Ctrl_1_indx }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_1_cmd), config_Panel_IF_Ctrl_1_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_3_indx), config_Panel_IF_Ctrl_3_indx }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_3_cmd), config_Panel_IF_Ctrl_3_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_4_indx), config_Panel_IF_Ctrl_4_indx }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_4_cmd), config_Panel_IF_Ctrl_4_cmd }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_5_indx), config_Panel_IF_Ctrl_5_indx }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_5_cmd), config_Panel_IF_Ctrl_5_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_6_indx), config_Panel_IF_Ctrl_6_indx }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_6_cmd), config_Panel_IF_Ctrl_6_cmd }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_8_indx), config_Panel_IF_Ctrl_8_indx }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_8_cmd), config_Panel_IF_Ctrl_8_cmd }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_9_indx), config_Panel_IF_Ctrl_9_indx }, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_9_cmd), config_Panel_IF_Ctrl_9_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_00_indx), config_gam_adjust_00_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_00_cmd), config_gam_adjust_00_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_01_indx), config_gam_adjust_01_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_01_cmd), config_gam_adjust_01_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_02_indx), config_gam_adjust_02_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_02_cmd), config_gam_adjust_02_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_03_indx), config_gam_adjust_03_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_03_cmd), config_gam_adjust_03_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_04_indx), config_gam_adjust_04_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_04_cmd), config_gam_adjust_04_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_05_indx), config_gam_adjust_05_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_05_cmd), config_gam_adjust_05_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_06_indx), config_gam_adjust_06_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_06_cmd), config_gam_adjust_06_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_07_indx), config_gam_adjust_07_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_07_cmd), config_gam_adjust_07_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_08_indx), config_gam_adjust_08_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_08_cmd), config_gam_adjust_08_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_09_indx), config_gam_adjust_09_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_09_cmd), config_gam_adjust_09_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_0A_indx), config_gam_adjust_0A_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_0A_cmd), config_gam_adjust_0A_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_0B_indx), config_gam_adjust_0B_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_0B_cmd), config_gam_adjust_0B_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_0C_indx), config_gam_adjust_0C_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_0C_cmd), config_gam_adjust_0C_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_0D_indx), config_gam_adjust_0D_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_0D_cmd), config_gam_adjust_0D_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_10_indx), config_gam_adjust_10_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_10_cmd), config_gam_adjust_10_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_11_indx), config_gam_adjust_11_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_11_cmd), config_gam_adjust_11_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_12_indx), config_gam_adjust_12_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_12_cmd), config_gam_adjust_12_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_15_indx), config_gam_adjust_15_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_15_cmd), config_gam_adjust_15_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_16_indx), config_gam_adjust_16_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_16_cmd), config_gam_adjust_16_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_17_indx), config_gam_adjust_17_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_17_cmd), config_gam_adjust_17_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_18_indx), config_gam_adjust_18_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_18_cmd), config_gam_adjust_18_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_19_indx), config_gam_adjust_19_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_19_cmd), config_gam_adjust_19_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_1C_indx), config_gam_adjust_1C_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_1C_cmd), config_gam_adjust_1C_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_1D_indx), config_gam_adjust_1D_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_1D_cmd), config_gam_adjust_1D_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_20_indx), config_gam_adjust_20_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_20_cmd), config_gam_adjust_20_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_21_indx), config_gam_adjust_21_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_21_cmd), config_gam_adjust_21_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_22_indx), config_gam_adjust_22_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_22_cmd), config_gam_adjust_22_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_27_indx), config_gam_adjust_27_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_27_cmd), config_gam_adjust_27_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_28_indx), config_gam_adjust_28_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_28_cmd), config_gam_adjust_28_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_29_indx), config_gam_adjust_29_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_gam_adjust_29_cmd), config_gam_adjust_29_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_1_indx), config_Power_Ctrl_1_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_1b_cmd), config_Power_Ctrl_1b_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_2_indx), config_Power_Ctrl_2_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_2b_cmd), config_Power_Ctrl_2b_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_3_indx), config_Power_Ctrl_3_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_3a_cmd), config_Power_Ctrl_3a_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_4_indx), config_Power_Ctrl_4_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_4a_cmd), config_Power_Ctrl_4a_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_6_indx), config_Power_Ctrl_6_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_6a_cmd), config_Power_Ctrl_6a_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Auto_Sequencer_Setting_indx), config_Auto_Sequencer_Setting_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Auto_Sequencer_Setting_b_cmd), config_Auto_Sequencer_Setting_b_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_10_indx), config_Panel_IF_Ctrl_10_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Panel_IF_Ctrl_10a_cmd), config_Panel_IF_Ctrl_10a_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Auto_Sequencer_Setting_indx), config_Auto_Sequencer_Setting_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Auto_Sequencer_Setting_c_cmd), config_Auto_Sequencer_Setting_c_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD70), config_DBICADD70}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_2_indx), config_Power_Ctrl_2_indx}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICSET_15), config_DBICSET_15}, {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_DBICADD72), config_DBICADD72}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_Power_Ctrl_2c_cmd), config_Power_Ctrl_2c_cmd}, {DTYPE_DCS_WRITE1, 1, 0, 0, 0/* RENESAS_CMD_DELAY */, sizeof(config_DBICSET_15), config_DBICSET_15}, }; static char config_WRTXHT2[7] = {0x92, 0x15, 0x05, 0x0F, 0x00, 0x01, 0xe0}; static char config_WRTXVT2[7] = {0x8b, 0x14, 0x01, 0x14, 0x00, 0x03, 0x60}; static struct dsi_cmd_desc renesas_hvga_on_cmds[] = { {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_WRTXHT2), config_WRTXHT2}, {DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_WRTXVT2), config_WRTXVT2}, }; static struct dsi_cmd_desc renesas_video_on_cmds[] = { {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_VIDEO), config_VIDEO} }; static struct dsi_cmd_desc renesas_cmd_on_cmds[] = { {DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY, sizeof(config_CMD_MODE), config_CMD_MODE}, }; static int mipi_renesas_lcd_on(struct platform_device *pdev) { struct msm_fb_data_type *mfd; struct mipi_panel_info *mipi; struct dcs_cmd_req cmdreq; mfd = platform_get_drvdata(pdev); mipi = &mfd->panel_info.mipi; if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; memset(&cmdreq, 0, sizeof(cmdreq)); cmdreq.cmds = renesas_sleep_off_cmds; cmdreq.cmds_cnt = ARRAY_SIZE(renesas_sleep_off_cmds); cmdreq.flags = CMD_REQ_COMMIT; cmdreq.rlen = 0; cmdreq.cb = NULL; mipi_dsi_cmdlist_put(&cmdreq); mipi_set_tx_power_mode(1); cmdreq.cmds = renesas_display_on_cmds; cmdreq.cmds_cnt = ARRAY_SIZE(renesas_display_on_cmds); cmdreq.flags = CMD_REQ_COMMIT; cmdreq.rlen = 0; cmdreq.cb = NULL; mipi_dsi_cmdlist_put(&cmdreq); if (cpu_is_msm7x25a() || cpu_is_msm7x25aa() || cpu_is_msm7x25ab()) { cmdreq.cmds = renesas_hvga_on_cmds; cmdreq.cmds_cnt = ARRAY_SIZE(renesas_hvga_on_cmds); cmdreq.flags = CMD_REQ_COMMIT; cmdreq.rlen = 0; cmdreq.cb = NULL; mipi_dsi_cmdlist_put(&cmdreq); } if (mipi->mode == DSI_VIDEO_MODE) { cmdreq.cmds = renesas_video_on_cmds; cmdreq.cmds_cnt = ARRAY_SIZE(renesas_video_on_cmds); cmdreq.flags = CMD_REQ_COMMIT; cmdreq.rlen = 0; cmdreq.cb = NULL; mipi_dsi_cmdlist_put(&cmdreq); } else { cmdreq.cmds = renesas_cmd_on_cmds; cmdreq.cmds_cnt = ARRAY_SIZE(renesas_cmd_on_cmds); cmdreq.flags = CMD_REQ_COMMIT; cmdreq.rlen = 0; cmdreq.cb = NULL; mipi_dsi_cmdlist_put(&cmdreq); } mipi_set_tx_power_mode(0); return 0; } static int mipi_renesas_lcd_off(struct platform_device *pdev) { struct msm_fb_data_type *mfd; struct dcs_cmd_req cmdreq; mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; memset(&cmdreq, 0, sizeof(cmdreq)); cmdreq.cmds = renesas_display_off_cmds; cmdreq.cmds_cnt = ARRAY_SIZE(renesas_display_off_cmds); cmdreq.flags = CMD_REQ_COMMIT; cmdreq.rlen = 0; cmdreq.cb = NULL; mipi_dsi_cmdlist_put(&cmdreq); return 0; } static int __devinit mipi_renesas_lcd_probe(struct platform_device *pdev) { if (pdev->id == 0) { mipi_renesas_pdata = pdev->dev.platform_data; return 0; } msm_fb_add_device(pdev); return 0; } static void mipi_renesas_set_backlight(struct msm_fb_data_type *mfd) { int ret = -EPERM; int bl_level; bl_level = mfd->bl_level; if (mipi_renesas_pdata && mipi_renesas_pdata->pmic_backlight) ret = mipi_renesas_pdata->pmic_backlight(bl_level); else pr_err("%s(): Backlight level set failed", __func__); } static struct platform_driver this_driver = { .probe = mipi_renesas_lcd_probe, .driver = { .name = "mipi_renesas", }, }; static struct msm_fb_panel_data renesas_panel_data = { .on = mipi_renesas_lcd_on, .off = mipi_renesas_lcd_off, .set_backlight = mipi_renesas_set_backlight, }; static int ch_used[3]; int mipi_renesas_device_register(struct msm_panel_info *pinfo, u32 channel, u32 panel) { struct platform_device *pdev = NULL; int ret; if ((channel >= 3) || ch_used[channel]) return -ENODEV; ch_used[channel] = TRUE; ret = mipi_renesas_lcd_init(); if (ret) { pr_err("mipi_renesas_lcd_init() failed with ret %u\n", ret); return ret; } pdev = platform_device_alloc("mipi_renesas", (panel << 8)|channel); if (!pdev) return -ENOMEM; renesas_panel_data.panel_info = *pinfo; ret = platform_device_add_data(pdev, &renesas_panel_data, sizeof(renesas_panel_data)); if (ret) { pr_err("%s: platform_device_add_data failed!\n", __func__); goto err_device_put; } ret = platform_device_add(pdev); if (ret) { pr_err("%s: platform_device_register failed!\n", __func__); goto err_device_put; } return 0; err_device_put: platform_device_put(pdev); return ret; } static int mipi_renesas_lcd_init(void) { mipi_dsi_buf_alloc(&renesas_tx_buf, DSI_BUF_SIZE); mipi_dsi_buf_alloc(&renesas_rx_buf, DSI_BUF_SIZE); return platform_driver_register(&this_driver); }
gpl-2.0
anomalchik/android_kernel_xiaomi
arch/sparc/kernel/leon_kernel.c
2453
13360
/* * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/interrupt.h> #include <linux/of_device.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <asm/oplib.h> #include <asm/timer.h> #include <asm/prom.h> #include <asm/leon.h> #include <asm/leon_amba.h> #include <asm/traps.h> #include <asm/cacheflush.h> #include <asm/smp.h> #include <asm/setup.h> #include "kernel.h" #include "prom.h" #include "irq.h" struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address */ struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address */ int leondebug_irq_disable; int leon_debug_irqout; static int dummy_master_l10_counter; unsigned long amba_system_id; static DEFINE_SPINLOCK(leon_irq_lock); unsigned long leon3_gptimer_irq; /* interrupt controller irq number */ unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */ unsigned int sparc_leon_eirq; #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu]) #define LEON_IACK (&leon3_irqctrl_regs->iclear) #define LEON_DO_ACK_HW 1 /* Return the last ACKed IRQ by the Extended IRQ controller. It has already * been (automatically) ACKed when the CPU takes the trap. */ static inline unsigned int leon_eirq_get(int cpu) { return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f; } /* Handle one or multiple IRQs from the extended interrupt controller */ static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc) { unsigned int eirq; struct irq_bucket *p; int cpu = sparc_leon3_cpuid(); eirq = leon_eirq_get(cpu); p = irq_map[eirq]; if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */ generic_handle_irq(p->irq); } /* The extended IRQ controller has been found, this function registers it */ void leon_eirq_setup(unsigned int eirq) { unsigned long mask, oldmask; unsigned int veirq; if (eirq < 1 || eirq > 0xf) { printk(KERN_ERR "LEON EXT IRQ NUMBER BAD: %d\n", eirq); return; } veirq = leon_build_device_irq(eirq, leon_handle_ext_irq, "extirq", 0); /* * Unmask the Extended IRQ, the IRQs routed through the Ext-IRQ * controller have a mask-bit of their own, so this is safe. */ irq_link(veirq); mask = 1 << eirq; oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(boot_cpu_id)); LEON3_BYPASS_STORE_PA(LEON_IMASK(boot_cpu_id), (oldmask | mask)); sparc_leon_eirq = eirq; } unsigned long leon_get_irqmask(unsigned int irq) { unsigned long mask; if (!irq || ((irq > 0xf) && !sparc_leon_eirq) || ((irq > 0x1f) && sparc_leon_eirq)) { printk(KERN_ERR "leon_get_irqmask: false irq number: %d\n", irq); mask = 0; } else { mask = LEON_HARD_INT(irq); } return mask; } #ifdef CONFIG_SMP static int irq_choose_cpu(const struct cpumask *affinity) { cpumask_t mask; cpumask_and(&mask, cpu_online_mask, affinity); if (cpumask_equal(&mask, cpu_online_mask) || cpumask_empty(&mask)) return boot_cpu_id; else return cpumask_first(&mask); } #else #define irq_choose_cpu(affinity) boot_cpu_id #endif static int leon_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force) { unsigned long mask, oldmask, flags; int oldcpu, newcpu; mask = (unsigned long)data->chip_data; oldcpu = irq_choose_cpu(data->affinity); newcpu = irq_choose_cpu(dest); if (oldcpu == newcpu) goto out; /* unmask on old CPU first before enabling on the selected CPU */ spin_lock_irqsave(&leon_irq_lock, flags); oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(oldcpu)); LEON3_BYPASS_STORE_PA(LEON_IMASK(oldcpu), (oldmask & ~mask)); oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(newcpu)); LEON3_BYPASS_STORE_PA(LEON_IMASK(newcpu), (oldmask | mask)); spin_unlock_irqrestore(&leon_irq_lock, flags); out: return IRQ_SET_MASK_OK; } static void leon_unmask_irq(struct irq_data *data) { unsigned long mask, oldmask, flags; int cpu; mask = (unsigned long)data->chip_data; cpu = irq_choose_cpu(data->affinity); spin_lock_irqsave(&leon_irq_lock, flags); oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu)); LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask)); spin_unlock_irqrestore(&leon_irq_lock, flags); } static void leon_mask_irq(struct irq_data *data) { unsigned long mask, oldmask, flags; int cpu; mask = (unsigned long)data->chip_data; cpu = irq_choose_cpu(data->affinity); spin_lock_irqsave(&leon_irq_lock, flags); oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu)); LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask)); spin_unlock_irqrestore(&leon_irq_lock, flags); } static unsigned int leon_startup_irq(struct irq_data *data) { irq_link(data->irq); leon_unmask_irq(data); return 0; } static void leon_shutdown_irq(struct irq_data *data) { leon_mask_irq(data); irq_unlink(data->irq); } /* Used by external level sensitive IRQ handlers on the LEON: ACK IRQ ctrl */ static void leon_eoi_irq(struct irq_data *data) { unsigned long mask = (unsigned long)data->chip_data; if (mask & LEON_DO_ACK_HW) LEON3_BYPASS_STORE_PA(LEON_IACK, mask & ~LEON_DO_ACK_HW); } static struct irq_chip leon_irq = { .name = "leon", .irq_startup = leon_startup_irq, .irq_shutdown = leon_shutdown_irq, .irq_mask = leon_mask_irq, .irq_unmask = leon_unmask_irq, .irq_eoi = leon_eoi_irq, .irq_set_affinity = leon_set_affinity, }; /* * Build a LEON IRQ for the edge triggered LEON IRQ controller: * Edge (normal) IRQ - handle_simple_irq, ack=DONT-CARE, never ack * Level IRQ (PCI|Level-GPIO) - handle_fasteoi_irq, ack=1, ack after ISR * Per-CPU Edge - handle_percpu_irq, ack=0 */ unsigned int leon_build_device_irq(unsigned int real_irq, irq_flow_handler_t flow_handler, const char *name, int do_ack) { unsigned int irq; unsigned long mask; struct irq_desc *desc; irq = 0; mask = leon_get_irqmask(real_irq); if (mask == 0) goto out; irq = irq_alloc(real_irq, real_irq); if (irq == 0) goto out; if (do_ack) mask |= LEON_DO_ACK_HW; desc = irq_to_desc(irq); if (!desc || !desc->handle_irq || desc->handle_irq == handle_bad_irq) { irq_set_chip_and_handler_name(irq, &leon_irq, flow_handler, name); irq_set_chip_data(irq, (void *)mask); } out: return irq; } static unsigned int _leon_build_device_irq(struct platform_device *op, unsigned int real_irq) { return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0); } void leon_update_virq_handling(unsigned int virq, irq_flow_handler_t flow_handler, const char *name, int do_ack) { unsigned long mask = (unsigned long)irq_get_chip_data(virq); mask &= ~LEON_DO_ACK_HW; if (do_ack) mask |= LEON_DO_ACK_HW; irq_set_chip_and_handler_name(virq, &leon_irq, flow_handler, name); irq_set_chip_data(virq, (void *)mask); } static u32 leon_cycles_offset(void) { u32 rld, val, off; rld = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld); val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val); off = rld - val; return rld - val; } #ifdef CONFIG_SMP /* smp clockevent irq */ irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused) { struct clock_event_device *ce; int cpu = smp_processor_id(); leon_clear_profile_irq(cpu); if (cpu == boot_cpu_id) timer_interrupt(irq, NULL); ce = &per_cpu(sparc32_clockevent, cpu); irq_enter(); if (ce->event_handler) ce->event_handler(ce); irq_exit(); return IRQ_HANDLED; } #endif /* CONFIG_SMP */ void __init leon_init_timers(void) { int irq, eirq; struct device_node *rootnp, *np, *nnp; struct property *pp; int len; int icsel; int ampopts; int err; u32 config; sparc_config.get_cycles_offset = leon_cycles_offset; sparc_config.cs_period = 1000000 / HZ; sparc_config.features |= FEAT_L10_CLOCKSOURCE; #ifndef CONFIG_SMP sparc_config.features |= FEAT_L10_CLOCKEVENT; #endif leondebug_irq_disable = 0; leon_debug_irqout = 0; master_l10_counter = (unsigned int *)&dummy_master_l10_counter; dummy_master_l10_counter = 0; rootnp = of_find_node_by_path("/ambapp0"); if (!rootnp) goto bad; /* Find System ID: GRLIB build ID and optional CHIP ID */ pp = of_find_property(rootnp, "systemid", &len); if (pp) amba_system_id = *(unsigned long *)pp->value; /* Find IRQMP IRQ Controller Registers base adr otherwise bail out */ np = of_find_node_by_name(rootnp, "GAISLER_IRQMP"); if (!np) { np = of_find_node_by_name(rootnp, "01_00d"); if (!np) goto bad; } pp = of_find_property(np, "reg", &len); if (!pp) goto bad; leon3_irqctrl_regs = *(struct leon3_irqctrl_regs_map **)pp->value; /* Find GPTIMER Timer Registers base address otherwise bail out. */ nnp = rootnp; do { np = of_find_node_by_name(nnp, "GAISLER_GPTIMER"); if (!np) { np = of_find_node_by_name(nnp, "01_011"); if (!np) goto bad; } ampopts = 0; pp = of_find_property(np, "ampopts", &len); if (pp) { ampopts = *(int *)pp->value; if (ampopts == 0) { /* Skip this instance, resource already * allocated by other OS */ nnp = np; continue; } } /* Select Timer-Instance on Timer Core. Default is zero */ leon3_gptimer_idx = ampopts & 0x7; pp = of_find_property(np, "reg", &len); if (pp) leon3_gptimer_regs = *(struct leon3_gptimer_regs_map **) pp->value; pp = of_find_property(np, "interrupts", &len); if (pp) leon3_gptimer_irq = *(unsigned int *)pp->value; } while (0); if (!(leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq)) goto bad; LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0); LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld, (((1000000 / HZ) - 1))); LEON3_BYPASS_STORE_PA( &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0); /* * The IRQ controller may (if implemented) consist of multiple * IRQ controllers, each mapped on a 4Kb boundary. * Each CPU may be routed to different IRQCTRLs, however * we assume that all CPUs (in SMP system) is routed to the * same IRQ Controller, and for non-SMP only one IRQCTRL is * accessed anyway. * In AMP systems, Linux must run on CPU0 for the time being. */ icsel = LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->icsel[boot_cpu_id/8]); icsel = (icsel >> ((7 - (boot_cpu_id&0x7)) * 4)) & 0xf; leon3_irqctrl_regs += icsel; /* Mask all IRQs on boot-cpu IRQ controller */ LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[boot_cpu_id], 0); /* Probe extended IRQ controller */ eirq = (LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->mpstatus) >> 16) & 0xf; if (eirq != 0) leon_eirq_setup(eirq); #ifdef CONFIG_SMP { unsigned long flags; /* * In SMP, sun4m adds a IPI handler to IRQ trap handler that * LEON never must take, sun4d and LEON overwrites the branch * with a NOP. */ local_irq_save(flags); patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */ local_ops->cache_all(); local_irq_restore(flags); } #endif config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config); if (config & (1 << LEON3_GPTIMER_SEPIRQ)) leon3_gptimer_irq += leon3_gptimer_idx; else if ((config & LEON3_GPTIMER_TIMERS) > 1) pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n"); #ifdef CONFIG_SMP /* Install per-cpu IRQ handler for broadcasted ticker */ irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq, "per-cpu", 0); err = request_irq(irq, leon_percpu_timer_ce_interrupt, IRQF_PERCPU | IRQF_TIMER, "timer", NULL); #else irq = _leon_build_device_irq(NULL, leon3_gptimer_irq); err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); #endif if (err) { pr_err("Unable to attach timer IRQ%d\n", irq); prom_halt(); } LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, LEON3_GPTIMER_EN | LEON3_GPTIMER_RL | LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN); return; bad: printk(KERN_ERR "No Timer/irqctrl found\n"); BUG(); return; } static void leon_clear_clock_irq(void) { } static void leon_load_profile_irq(int cpu, unsigned int limit) { } void __init leon_trans_init(struct device_node *dp) { if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) { struct property *p; p = of_find_property(dp, "mid", (void *)0); if (p) { int mid; dp->name = prom_early_alloc(5 + 1); memcpy(&mid, p->value, p->length); sprintf((char *)dp->name, "cpu%.2d", mid); } } } #ifdef CONFIG_SMP void leon_clear_profile_irq(int cpu) { } void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu) { unsigned long mask, flags, *addr; mask = leon_get_irqmask(irq_nr); spin_lock_irqsave(&leon_irq_lock, flags); addr = (unsigned long *)LEON_IMASK(cpu); LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | mask)); spin_unlock_irqrestore(&leon_irq_lock, flags); } #endif void __init leon_init_IRQ(void) { sparc_config.init_timers = leon_init_timers; sparc_config.build_device_irq = _leon_build_device_irq; sparc_config.clock_rate = 1000000; sparc_config.clear_clock_irq = leon_clear_clock_irq; sparc_config.load_profile_irq = leon_load_profile_irq; }
gpl-2.0
samnazarko/vero2-linux
drivers/scsi/mac_esp.c
3221
15934
/* mac_esp.c: ESP front-end for Macintosh Quadra systems. * * Adapted from jazz_esp.c and the old mac_esp.c. * * The pseudo DMA algorithm is based on the one used in NetBSD. * See sys/arch/mac68k/obio/esp.c for some background information. * * Copyright (C) 2007-2008 Finn Thain */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/nubus.h> #include <linux/slab.h> #include <asm/irq.h> #include <asm/dma.h> #include <asm/macints.h> #include <asm/macintosh.h> #include <asm/mac_via.h> #include <scsi/scsi_host.h> #include "esp_scsi.h" #define DRV_MODULE_NAME "mac_esp" #define PFX DRV_MODULE_NAME ": " #define DRV_VERSION "1.000" #define DRV_MODULE_RELDATE "Sept 15, 2007" #define MAC_ESP_IO_BASE 0x50F00000 #define MAC_ESP_REGS_QUADRA (MAC_ESP_IO_BASE + 0x10000) #define MAC_ESP_REGS_QUADRA2 (MAC_ESP_IO_BASE + 0xF000) #define MAC_ESP_REGS_QUADRA3 (MAC_ESP_IO_BASE + 0x18000) #define MAC_ESP_REGS_SPACING 0x402 #define MAC_ESP_PDMA_REG 0xF9800024 #define MAC_ESP_PDMA_REG_SPACING 0x4 #define MAC_ESP_PDMA_IO_OFFSET 0x100 #define esp_read8(REG) mac_esp_read8(esp, REG) #define esp_write8(VAL, REG) mac_esp_write8(esp, VAL, REG) struct mac_esp_priv { struct esp *esp; void __iomem *pdma_regs; void __iomem *pdma_io; int error; }; static struct esp *esp_chips[2]; #define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \ platform_get_drvdata((struct platform_device *) \ (esp->dev))) static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg) { nubus_writeb(val, esp->regs + reg * 16); } static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg) { return nubus_readb(esp->regs + reg * 16); } /* For pseudo DMA and PIO we need the virtual address * so this address mapping is the identity mapping. */ static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf, size_t sz, int dir) { return (dma_addr_t)buf; } static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg, int num_sg, int dir) { int i; for (i = 0; i < num_sg; i++) sg[i].dma_address = (u32)sg_virt(&sg[i]); return num_sg; } static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr, size_t sz, int dir) { /* Nothing to do. */ } static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, int num_sg, int dir) { /* Nothing to do. */ } static void mac_esp_reset_dma(struct esp *esp) { /* Nothing to do. */ } static void mac_esp_dma_drain(struct esp *esp) { /* Nothing to do. */ } static void mac_esp_dma_invalidate(struct esp *esp) { /* Nothing to do. */ } static int mac_esp_dma_error(struct esp *esp) { return MAC_ESP_GET_PRIV(esp)->error; } static inline int mac_esp_wait_for_empty_fifo(struct esp *esp) { struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); int i = 500000; do { if (!(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES)) return 0; if (esp_read8(ESP_STATUS) & ESP_STAT_INTR) return 1; udelay(2); } while (--i); printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n", esp_read8(ESP_STATUS)); mep->error = 1; return 1; } static inline int mac_esp_wait_for_dreq(struct esp *esp) { struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); int i = 500000; do { if (mep->pdma_regs == NULL) { if (via2_scsi_drq_pending()) return 0; } else { if (nubus_readl(mep->pdma_regs) & 0x200) return 0; } if (esp_read8(ESP_STATUS) & ESP_STAT_INTR) return 1; udelay(2); } while (--i); printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n", esp_read8(ESP_STATUS)); mep->error = 1; return 1; } #define MAC_ESP_PDMA_LOOP(operands) \ asm volatile ( \ " tstw %1 \n" \ " jbeq 20f \n" \ "1: movew " operands " \n" \ "2: movew " operands " \n" \ "3: movew " operands " \n" \ "4: movew " operands " \n" \ "5: movew " operands " \n" \ "6: movew " operands " \n" \ "7: movew " operands " \n" \ "8: movew " operands " \n" \ "9: movew " operands " \n" \ "10: movew " operands " \n" \ "11: movew " operands " \n" \ "12: movew " operands " \n" \ "13: movew " operands " \n" \ "14: movew " operands " \n" \ "15: movew " operands " \n" \ "16: movew " operands " \n" \ " subqw #1,%1 \n" \ " jbne 1b \n" \ "20: tstw %2 \n" \ " jbeq 30f \n" \ "21: movew " operands " \n" \ " subqw #1,%2 \n" \ " jbne 21b \n" \ "30: tstw %3 \n" \ " jbeq 40f \n" \ "31: moveb " operands " \n" \ "32: nop \n" \ "40: \n" \ " \n" \ " .section __ex_table,\"a\" \n" \ " .align 4 \n" \ " .long 1b,40b \n" \ " .long 2b,40b \n" \ " .long 3b,40b \n" \ " .long 4b,40b \n" \ " .long 5b,40b \n" \ " .long 6b,40b \n" \ " .long 7b,40b \n" \ " .long 8b,40b \n" \ " .long 9b,40b \n" \ " .long 10b,40b \n" \ " .long 11b,40b \n" \ " .long 12b,40b \n" \ " .long 13b,40b \n" \ " .long 14b,40b \n" \ " .long 15b,40b \n" \ " .long 16b,40b \n" \ " .long 21b,40b \n" \ " .long 31b,40b \n" \ " .long 32b,40b \n" \ " .previous \n" \ : "+a" (addr), "+r" (count32), "+r" (count2) \ : "g" (count1), "a" (mep->pdma_io)) static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); mep->error = 0; if (!write) scsi_esp_cmd(esp, ESP_CMD_FLUSH); esp_write8((esp_count >> 0) & 0xFF, ESP_TCLOW); esp_write8((esp_count >> 8) & 0xFF, ESP_TCMED); scsi_esp_cmd(esp, cmd); do { unsigned int count32 = esp_count >> 5; unsigned int count2 = (esp_count & 0x1F) >> 1; unsigned int count1 = esp_count & 1; unsigned int start_addr = addr; if (mac_esp_wait_for_dreq(esp)) break; if (write) { MAC_ESP_PDMA_LOOP("%4@,%0@+"); esp_count -= addr - start_addr; } else { unsigned int n; MAC_ESP_PDMA_LOOP("%0@+,%4@"); if (mac_esp_wait_for_empty_fifo(esp)) break; n = (esp_read8(ESP_TCMED) << 8) + esp_read8(ESP_TCLOW); addr = start_addr + esp_count - n; esp_count = n; } } while (esp_count); } /* * Programmed IO routines follow. */ static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp) { int i = 500000; do { unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; if (fbytes) return fbytes; udelay(2); } while (--i); printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n", esp_read8(ESP_STATUS)); return 0; } static inline int mac_esp_wait_for_intr(struct esp *esp) { struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); int i = 500000; do { esp->sreg = esp_read8(ESP_STATUS); if (esp->sreg & ESP_STAT_INTR) return 0; udelay(2); } while (--i); printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg); mep->error = 1; return 1; } #define MAC_ESP_PIO_LOOP(operands, reg1) \ asm volatile ( \ "1: moveb " operands " \n" \ " subqw #1,%1 \n" \ " jbne 1b \n" \ : "+a" (addr), "+r" (reg1) \ : "a" (fifo)) #define MAC_ESP_PIO_FILL(operands, reg1) \ asm volatile ( \ " moveb " operands " \n" \ " moveb " operands " \n" \ " moveb " operands " \n" \ " moveb " operands " \n" \ " moveb " operands " \n" \ " moveb " operands " \n" \ " moveb " operands " \n" \ " moveb " operands " \n" \ " moveb " operands " \n" \ " moveb " operands " \n" \ " moveb " operands " \n" \ " moveb " operands " \n" \ " moveb " operands " \n" \ " moveb " operands " \n" \ " moveb " operands " \n" \ " moveb " operands " \n" \ " subqw #8,%1 \n" \ " subqw #8,%1 \n" \ : "+a" (addr), "+r" (reg1) \ : "a" (fifo)) #define MAC_ESP_FIFO_SIZE 16 static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); u8 *fifo = esp->regs + ESP_FDATA * 16; cmd &= ~ESP_CMD_DMA; mep->error = 0; if (write) { scsi_esp_cmd(esp, cmd); while (1) { unsigned int n; n = mac_esp_wait_for_fifo(esp); if (!n) break; if (n > esp_count) n = esp_count; esp_count -= n; MAC_ESP_PIO_LOOP("%2@,%0@+", n); if (!esp_count) break; if (mac_esp_wait_for_intr(esp)) break; if (((esp->sreg & ESP_STAT_PMASK) != ESP_DIP) && ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP)) break; esp->ireg = esp_read8(ESP_INTRPT); if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) != ESP_INTR_BSERV) break; scsi_esp_cmd(esp, ESP_CMD_TI); } } else { scsi_esp_cmd(esp, ESP_CMD_FLUSH); if (esp_count >= MAC_ESP_FIFO_SIZE) MAC_ESP_PIO_FILL("%0@+,%2@", esp_count); else MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count); scsi_esp_cmd(esp, cmd); while (esp_count) { unsigned int n; if (mac_esp_wait_for_intr(esp)) break; if (((esp->sreg & ESP_STAT_PMASK) != ESP_DOP) && ((esp->sreg & ESP_STAT_PMASK) != ESP_MOP)) break; esp->ireg = esp_read8(ESP_INTRPT); if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) != ESP_INTR_BSERV) break; n = MAC_ESP_FIFO_SIZE - (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES); if (n > esp_count) n = esp_count; if (n == MAC_ESP_FIFO_SIZE) { MAC_ESP_PIO_FILL("%0@+,%2@", esp_count); } else { esp_count -= n; MAC_ESP_PIO_LOOP("%0@+,%2@", n); } scsi_esp_cmd(esp, ESP_CMD_TI); } } } static int mac_esp_irq_pending(struct esp *esp) { if (esp_read8(ESP_STATUS) & ESP_STAT_INTR) return 1; return 0; } static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) { return dma_len > 0xFFFF ? 0xFFFF : dma_len; } static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id) { int got_intr; /* * This is an edge triggered IRQ, so we have to be careful to * avoid missing a transition when it is shared by two ESP devices. */ do { got_intr = 0; if (esp_chips[0] && (mac_esp_read8(esp_chips[0], ESP_STATUS) & ESP_STAT_INTR)) { (void)scsi_esp_intr(irq, esp_chips[0]); got_intr = 1; } if (esp_chips[1] && (mac_esp_read8(esp_chips[1], ESP_STATUS) & ESP_STAT_INTR)) { (void)scsi_esp_intr(irq, esp_chips[1]); got_intr = 1; } } while (got_intr); return IRQ_HANDLED; } static struct esp_driver_ops mac_esp_ops = { .esp_write8 = mac_esp_write8, .esp_read8 = mac_esp_read8, .map_single = mac_esp_map_single, .map_sg = mac_esp_map_sg, .unmap_single = mac_esp_unmap_single, .unmap_sg = mac_esp_unmap_sg, .irq_pending = mac_esp_irq_pending, .dma_length_limit = mac_esp_dma_length_limit, .reset_dma = mac_esp_reset_dma, .dma_drain = mac_esp_dma_drain, .dma_invalidate = mac_esp_dma_invalidate, .send_dma_cmd = mac_esp_send_pdma_cmd, .dma_error = mac_esp_dma_error, }; static int esp_mac_probe(struct platform_device *dev) { struct scsi_host_template *tpnt = &scsi_esp_template; struct Scsi_Host *host; struct esp *esp; int err; struct mac_esp_priv *mep; if (!MACH_IS_MAC) return -ENODEV; if (dev->id > 1) return -ENODEV; host = scsi_host_alloc(tpnt, sizeof(struct esp)); err = -ENOMEM; if (!host) goto fail; host->max_id = 8; host->use_clustering = DISABLE_CLUSTERING; esp = shost_priv(host); esp->host = host; esp->dev = dev; esp->command_block = kzalloc(16, GFP_KERNEL); if (!esp->command_block) goto fail_unlink; esp->command_block_dma = (dma_addr_t)esp->command_block; esp->scsi_id = 7; host->this_id = esp->scsi_id; esp->scsi_id_mask = 1 << esp->scsi_id; mep = kzalloc(sizeof(struct mac_esp_priv), GFP_KERNEL); if (!mep) goto fail_free_command_block; mep->esp = esp; platform_set_drvdata(dev, mep); switch (macintosh_config->scsi_type) { case MAC_SCSI_QUADRA: esp->cfreq = 16500000; esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA; mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET; mep->pdma_regs = NULL; break; case MAC_SCSI_QUADRA2: esp->cfreq = 25000000; esp->regs = (void __iomem *)(MAC_ESP_REGS_QUADRA2 + dev->id * MAC_ESP_REGS_SPACING); mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET; mep->pdma_regs = (void __iomem *)(MAC_ESP_PDMA_REG + dev->id * MAC_ESP_PDMA_REG_SPACING); nubus_writel(0x1d1, mep->pdma_regs); break; case MAC_SCSI_QUADRA3: /* These quadras have a real DMA controller (the PSC) but we * don't know how to drive it so we must use PIO instead. */ esp->cfreq = 25000000; esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA3; mep->pdma_io = NULL; mep->pdma_regs = NULL; break; } esp->ops = &mac_esp_ops; if (mep->pdma_io == NULL) { printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id); esp_write8(0, ESP_TCLOW); esp_write8(0, ESP_TCMED); esp->flags = ESP_FLAG_DISABLE_SYNC; mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd; } else { printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id); } host->irq = IRQ_MAC_SCSI; esp_chips[dev->id] = esp; mb(); if (esp_chips[!dev->id] == NULL) { err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL); if (err < 0) { esp_chips[dev->id] = NULL; goto fail_free_priv; } } err = scsi_esp_register(esp, &dev->dev); if (err) goto fail_free_irq; return 0; fail_free_irq: if (esp_chips[!dev->id] == NULL) free_irq(host->irq, esp); fail_free_priv: kfree(mep); fail_free_command_block: kfree(esp->command_block); fail_unlink: scsi_host_put(host); fail: return err; } static int esp_mac_remove(struct platform_device *dev) { struct mac_esp_priv *mep = platform_get_drvdata(dev); struct esp *esp = mep->esp; unsigned int irq = esp->host->irq; scsi_esp_unregister(esp); esp_chips[dev->id] = NULL; if (!(esp_chips[0] || esp_chips[1])) free_irq(irq, NULL); kfree(mep); kfree(esp->command_block); scsi_host_put(esp->host); return 0; } static struct platform_driver esp_mac_driver = { .probe = esp_mac_probe, .remove = esp_mac_remove, .driver = { .name = DRV_MODULE_NAME, .owner = THIS_MODULE, }, }; static int __init mac_esp_init(void) { return platform_driver_register(&esp_mac_driver); } static void __exit mac_esp_exit(void) { platform_driver_unregister(&esp_mac_driver); } MODULE_DESCRIPTION("Mac ESP SCSI driver"); MODULE_AUTHOR("Finn Thain <fthain@telegraphics.com.au>"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); MODULE_ALIAS("platform:" DRV_MODULE_NAME); module_init(mac_esp_init); module_exit(mac_esp_exit);
gpl-2.0
Abhinav1997/android_kernel_sony_riogrande
drivers/i2c/busses/i2c-versatile.c
4245
3578
/* * i2c-versatile.c * * Copyright (C) 2006 ARM Ltd. * written by Russell King, Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> #define I2C_CONTROL 0x00 #define I2C_CONTROLS 0x00 #define I2C_CONTROLC 0x04 #define SCL (1 << 0) #define SDA (1 << 1) struct i2c_versatile { struct i2c_adapter adap; struct i2c_algo_bit_data algo; void __iomem *base; }; static void i2c_versatile_setsda(void *data, int state) { struct i2c_versatile *i2c = data; writel(SDA, i2c->base + (state ? I2C_CONTROLS : I2C_CONTROLC)); } static void i2c_versatile_setscl(void *data, int state) { struct i2c_versatile *i2c = data; writel(SCL, i2c->base + (state ? I2C_CONTROLS : I2C_CONTROLC)); } static int i2c_versatile_getsda(void *data) { struct i2c_versatile *i2c = data; return !!(readl(i2c->base + I2C_CONTROL) & SDA); } static int i2c_versatile_getscl(void *data) { struct i2c_versatile *i2c = data; return !!(readl(i2c->base + I2C_CONTROL) & SCL); } static struct i2c_algo_bit_data i2c_versatile_algo = { .setsda = i2c_versatile_setsda, .setscl = i2c_versatile_setscl, .getsda = i2c_versatile_getsda, .getscl = i2c_versatile_getscl, .udelay = 30, .timeout = HZ, }; static int i2c_versatile_probe(struct platform_device *dev) { struct i2c_versatile *i2c; struct resource *r; int ret; r = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!r) { ret = -EINVAL; goto err_out; } if (!request_mem_region(r->start, resource_size(r), "versatile-i2c")) { ret = -EBUSY; goto err_out; } i2c = kzalloc(sizeof(struct i2c_versatile), GFP_KERNEL); if (!i2c) { ret = -ENOMEM; goto err_release; } i2c->base = ioremap(r->start, resource_size(r)); if (!i2c->base) { ret = -ENOMEM; goto err_free; } writel(SCL | SDA, i2c->base + I2C_CONTROLS); i2c->adap.owner = THIS_MODULE; strlcpy(i2c->adap.name, "Versatile I2C adapter", sizeof(i2c->adap.name)); i2c->adap.algo_data = &i2c->algo; i2c->adap.dev.parent = &dev->dev; i2c->algo = i2c_versatile_algo; i2c->algo.data = i2c; if (dev->id >= 0) { /* static bus numbering */ i2c->adap.nr = dev->id; ret = i2c_bit_add_numbered_bus(&i2c->adap); } else /* dynamic bus numbering */ ret = i2c_bit_add_bus(&i2c->adap); if (ret >= 0) { platform_set_drvdata(dev, i2c); return 0; } iounmap(i2c->base); err_free: kfree(i2c); err_release: release_mem_region(r->start, resource_size(r)); err_out: return ret; } static int i2c_versatile_remove(struct platform_device *dev) { struct i2c_versatile *i2c = platform_get_drvdata(dev); platform_set_drvdata(dev, NULL); i2c_del_adapter(&i2c->adap); return 0; } static struct platform_driver i2c_versatile_driver = { .probe = i2c_versatile_probe, .remove = i2c_versatile_remove, .driver = { .name = "versatile-i2c", .owner = THIS_MODULE, }, }; static int __init i2c_versatile_init(void) { return platform_driver_register(&i2c_versatile_driver); } static void __exit i2c_versatile_exit(void) { platform_driver_unregister(&i2c_versatile_driver); } subsys_initcall(i2c_versatile_init); module_exit(i2c_versatile_exit); MODULE_DESCRIPTION("ARM Versatile I2C bus driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:versatile-i2c");
gpl-2.0
DingSoung/linux-3.0.1
drivers/i2c/busses/i2c-versatile.c
4245
3578
/* * i2c-versatile.c * * Copyright (C) 2006 ARM Ltd. * written by Russell King, Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> #define I2C_CONTROL 0x00 #define I2C_CONTROLS 0x00 #define I2C_CONTROLC 0x04 #define SCL (1 << 0) #define SDA (1 << 1) struct i2c_versatile { struct i2c_adapter adap; struct i2c_algo_bit_data algo; void __iomem *base; }; static void i2c_versatile_setsda(void *data, int state) { struct i2c_versatile *i2c = data; writel(SDA, i2c->base + (state ? I2C_CONTROLS : I2C_CONTROLC)); } static void i2c_versatile_setscl(void *data, int state) { struct i2c_versatile *i2c = data; writel(SCL, i2c->base + (state ? I2C_CONTROLS : I2C_CONTROLC)); } static int i2c_versatile_getsda(void *data) { struct i2c_versatile *i2c = data; return !!(readl(i2c->base + I2C_CONTROL) & SDA); } static int i2c_versatile_getscl(void *data) { struct i2c_versatile *i2c = data; return !!(readl(i2c->base + I2C_CONTROL) & SCL); } static struct i2c_algo_bit_data i2c_versatile_algo = { .setsda = i2c_versatile_setsda, .setscl = i2c_versatile_setscl, .getsda = i2c_versatile_getsda, .getscl = i2c_versatile_getscl, .udelay = 30, .timeout = HZ, }; static int i2c_versatile_probe(struct platform_device *dev) { struct i2c_versatile *i2c; struct resource *r; int ret; r = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!r) { ret = -EINVAL; goto err_out; } if (!request_mem_region(r->start, resource_size(r), "versatile-i2c")) { ret = -EBUSY; goto err_out; } i2c = kzalloc(sizeof(struct i2c_versatile), GFP_KERNEL); if (!i2c) { ret = -ENOMEM; goto err_release; } i2c->base = ioremap(r->start, resource_size(r)); if (!i2c->base) { ret = -ENOMEM; goto err_free; } writel(SCL | SDA, i2c->base + I2C_CONTROLS); i2c->adap.owner = THIS_MODULE; strlcpy(i2c->adap.name, "Versatile I2C adapter", sizeof(i2c->adap.name)); i2c->adap.algo_data = &i2c->algo; i2c->adap.dev.parent = &dev->dev; i2c->algo = i2c_versatile_algo; i2c->algo.data = i2c; if (dev->id >= 0) { /* static bus numbering */ i2c->adap.nr = dev->id; ret = i2c_bit_add_numbered_bus(&i2c->adap); } else /* dynamic bus numbering */ ret = i2c_bit_add_bus(&i2c->adap); if (ret >= 0) { platform_set_drvdata(dev, i2c); return 0; } iounmap(i2c->base); err_free: kfree(i2c); err_release: release_mem_region(r->start, resource_size(r)); err_out: return ret; } static int i2c_versatile_remove(struct platform_device *dev) { struct i2c_versatile *i2c = platform_get_drvdata(dev); platform_set_drvdata(dev, NULL); i2c_del_adapter(&i2c->adap); return 0; } static struct platform_driver i2c_versatile_driver = { .probe = i2c_versatile_probe, .remove = i2c_versatile_remove, .driver = { .name = "versatile-i2c", .owner = THIS_MODULE, }, }; static int __init i2c_versatile_init(void) { return platform_driver_register(&i2c_versatile_driver); } static void __exit i2c_versatile_exit(void) { platform_driver_unregister(&i2c_versatile_driver); } subsys_initcall(i2c_versatile_init); module_exit(i2c_versatile_exit); MODULE_DESCRIPTION("ARM Versatile I2C bus driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:versatile-i2c");
gpl-2.0
ch33kybutt/kernel_cmplus_tuna
arch/mips/loongson/common/pm.c
10389
3273
/* * loongson-specific suspend support * * Copyright (C) 2009 Lemote Inc. * Author: Wu Zhangjin <wuzhangjin@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/suspend.h> #include <linux/interrupt.h> #include <linux/pm.h> #include <asm/i8259.h> #include <asm/mipsregs.h> #include <loongson.h> static unsigned int __maybe_unused cached_master_mask; /* i8259A */ static unsigned int __maybe_unused cached_slave_mask; static unsigned int __maybe_unused cached_bonito_irq_mask; /* bonito */ void arch_suspend_disable_irqs(void) { /* disable all mips events */ local_irq_disable(); #ifdef CONFIG_I8259 /* disable all events of i8259A */ cached_slave_mask = inb(PIC_SLAVE_IMR); cached_master_mask = inb(PIC_MASTER_IMR); outb(0xff, PIC_SLAVE_IMR); inb(PIC_SLAVE_IMR); outb(0xff, PIC_MASTER_IMR); inb(PIC_MASTER_IMR); #endif /* disable all events of bonito */ cached_bonito_irq_mask = LOONGSON_INTEN; LOONGSON_INTENCLR = 0xffff; (void)LOONGSON_INTENCLR; } void arch_suspend_enable_irqs(void) { /* enable all mips events */ local_irq_enable(); #ifdef CONFIG_I8259 /* only enable the cached events of i8259A */ outb(cached_slave_mask, PIC_SLAVE_IMR); outb(cached_master_mask, PIC_MASTER_IMR); #endif /* enable all cached events of bonito */ LOONGSON_INTENSET = cached_bonito_irq_mask; (void)LOONGSON_INTENSET; } /* * Setup the board-specific events for waking up loongson from wait mode */ void __weak setup_wakeup_events(void) { } /* * Check wakeup events */ int __weak wakeup_loongson(void) { return 1; } /* * If the events are really what we want to wakeup the CPU, wake it up * otherwise put the CPU asleep again. */ static void wait_for_wakeup_events(void) { while (!wakeup_loongson()) LOONGSON_CHIPCFG0 &= ~0x7; } /* * Stop all perf counters * * $24 is the control register of Loongson perf counter */ static inline void stop_perf_counters(void) { __write_64bit_c0_register($24, 0, 0); } static void loongson_suspend_enter(void) { static unsigned int cached_cpu_freq; /* setup wakeup events via enabling the IRQs */ setup_wakeup_events(); stop_perf_counters(); cached_cpu_freq = LOONGSON_CHIPCFG0; /* Put CPU into wait mode */ LOONGSON_CHIPCFG0 &= ~0x7; /* wait for the given events to wakeup cpu from wait mode */ wait_for_wakeup_events(); LOONGSON_CHIPCFG0 = cached_cpu_freq; mmiowb(); } void __weak mach_suspend(void) { } void __weak mach_resume(void) { } static int loongson_pm_enter(suspend_state_t state) { mach_suspend(); /* processor specific suspend */ loongson_suspend_enter(); mach_resume(); return 0; } static int loongson_pm_valid_state(suspend_state_t state) { switch (state) { case PM_SUSPEND_ON: case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: return 1; default: return 0; } } static const struct platform_suspend_ops loongson_pm_ops = { .valid = loongson_pm_valid_state, .enter = loongson_pm_enter, }; static int __init loongson_pm_init(void) { suspend_set_ops(&loongson_pm_ops); return 0; } arch_initcall(loongson_pm_init);
gpl-2.0
chris41g/android_kernel_samsung_epic4gtouch
drivers/firmware/dmi-sysfs.c
10645
17371
/* * dmi-sysfs.c * * This module exports the DMI tables read-only to userspace through the * sysfs file system. * * Data is currently found below * /sys/firmware/dmi/... * * DMI attributes are presented in attribute files with names * formatted using %d-%d, so that the first integer indicates the * structure type (0-255), and the second field is the instance of that * entry. * * Copyright 2011 Google, Inc. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kobject.h> #include <linux/dmi.h> #include <linux/capability.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/io.h> #define MAX_ENTRY_TYPE 255 /* Most of these aren't used, but we consider the top entry type is only 8 bits */ struct dmi_sysfs_entry { struct dmi_header dh; struct kobject kobj; int instance; int position; struct list_head list; struct kobject *child; }; /* * Global list of dmi_sysfs_entry. Even though this should only be * manipulated at setup and teardown, the lazy nature of the kobject * system means we get lazy removes. */ static LIST_HEAD(entry_list); static DEFINE_SPINLOCK(entry_list_lock); /* dmi_sysfs_attribute - Top level attribute. used by all entries. */ struct dmi_sysfs_attribute { struct attribute attr; ssize_t (*show)(struct dmi_sysfs_entry *entry, char *buf); }; #define DMI_SYSFS_ATTR(_entry, _name) \ struct dmi_sysfs_attribute dmi_sysfs_attr_##_entry##_##_name = { \ .attr = {.name = __stringify(_name), .mode = 0400}, \ .show = dmi_sysfs_##_entry##_##_name, \ } /* * dmi_sysfs_mapped_attribute - Attribute where we require the entry be * mapped in. Use in conjunction with dmi_sysfs_specialize_attr_ops. */ struct dmi_sysfs_mapped_attribute { struct attribute attr; ssize_t (*show)(struct dmi_sysfs_entry *entry, const struct dmi_header *dh, char *buf); }; #define DMI_SYSFS_MAPPED_ATTR(_entry, _name) \ struct dmi_sysfs_mapped_attribute dmi_sysfs_attr_##_entry##_##_name = { \ .attr = {.name = __stringify(_name), .mode = 0400}, \ .show = dmi_sysfs_##_entry##_##_name, \ } /************************************************* * Generic DMI entry support. *************************************************/ static void dmi_entry_free(struct kobject *kobj) { kfree(kobj); } static struct dmi_sysfs_entry *to_entry(struct kobject *kobj) { return container_of(kobj, struct dmi_sysfs_entry, kobj); } static struct dmi_sysfs_attribute *to_attr(struct attribute *attr) { return container_of(attr, struct dmi_sysfs_attribute, attr); } static ssize_t dmi_sysfs_attr_show(struct kobject *kobj, struct attribute *_attr, char *buf) { struct dmi_sysfs_entry *entry = to_entry(kobj); struct dmi_sysfs_attribute *attr = to_attr(_attr); /* DMI stuff is only ever admin visible */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; return attr->show(entry, buf); } static const struct sysfs_ops dmi_sysfs_attr_ops = { .show = dmi_sysfs_attr_show, }; typedef ssize_t (*dmi_callback)(struct dmi_sysfs_entry *, const struct dmi_header *dh, void *); struct find_dmi_data { struct dmi_sysfs_entry *entry; dmi_callback callback; void *private; int instance_countdown; ssize_t ret; }; static void find_dmi_entry_helper(const struct dmi_header *dh, void *_data) { struct find_dmi_data *data = _data; struct dmi_sysfs_entry *entry = data->entry; /* Is this the entry we want? */ if (dh->type != entry->dh.type) return; if (data->instance_countdown != 0) { /* try the next instance? */ data->instance_countdown--; return; } /* * Don't ever revisit the instance. Short circuit later * instances by letting the instance_countdown run negative */ data->instance_countdown--; /* Found the entry */ data->ret = data->callback(entry, dh, data->private); } /* State for passing the read parameters through dmi_find_entry() */ struct dmi_read_state { char *buf; loff_t pos; size_t count; }; static ssize_t find_dmi_entry(struct dmi_sysfs_entry *entry, dmi_callback callback, void *private) { struct find_dmi_data data = { .entry = entry, .callback = callback, .private = private, .instance_countdown = entry->instance, .ret = -EIO, /* To signal the entry disappeared */ }; int ret; ret = dmi_walk(find_dmi_entry_helper, &data); /* This shouldn't happen, but just in case. */ if (ret) return -EINVAL; return data.ret; } /* * Calculate and return the byte length of the dmi entry identified by * dh. This includes both the formatted portion as well as the * unformatted string space, including the two trailing nul characters. */ static size_t dmi_entry_length(const struct dmi_header *dh) { const char *p = (const char *)dh; p += dh->length; while (p[0] || p[1]) p++; return 2 + p - (const char *)dh; } /************************************************* * Support bits for specialized DMI entry support *************************************************/ struct dmi_entry_attr_show_data { struct attribute *attr; char *buf; }; static ssize_t dmi_entry_attr_show_helper(struct dmi_sysfs_entry *entry, const struct dmi_header *dh, void *_data) { struct dmi_entry_attr_show_data *data = _data; struct dmi_sysfs_mapped_attribute *attr; attr = container_of(data->attr, struct dmi_sysfs_mapped_attribute, attr); return attr->show(entry, dh, data->buf); } static ssize_t dmi_entry_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct dmi_entry_attr_show_data data = { .attr = attr, .buf = buf, }; /* Find the entry according to our parent and call the * normalized show method hanging off of the attribute */ return find_dmi_entry(to_entry(kobj->parent), dmi_entry_attr_show_helper, &data); } static const struct sysfs_ops dmi_sysfs_specialize_attr_ops = { .show = dmi_entry_attr_show, }; /************************************************* * Specialized DMI entry support. *************************************************/ /*** Type 15 - System Event Table ***/ #define DMI_SEL_ACCESS_METHOD_IO8 0x00 #define DMI_SEL_ACCESS_METHOD_IO2x8 0x01 #define DMI_SEL_ACCESS_METHOD_IO16 0x02 #define DMI_SEL_ACCESS_METHOD_PHYS32 0x03 #define DMI_SEL_ACCESS_METHOD_GPNV 0x04 struct dmi_system_event_log { struct dmi_header header; u16 area_length; u16 header_start_offset; u16 data_start_offset; u8 access_method; u8 status; u32 change_token; union { struct { u16 index_addr; u16 data_addr; } io; u32 phys_addr32; u16 gpnv_handle; u32 access_method_address; }; u8 header_format; u8 type_descriptors_supported_count; u8 per_log_type_descriptor_length; u8 supported_log_type_descriptos[0]; } __packed; #define DMI_SYSFS_SEL_FIELD(_field) \ static ssize_t dmi_sysfs_sel_##_field(struct dmi_sysfs_entry *entry, \ const struct dmi_header *dh, \ char *buf) \ { \ struct dmi_system_event_log sel; \ if (sizeof(sel) > dmi_entry_length(dh)) \ return -EIO; \ memcpy(&sel, dh, sizeof(sel)); \ return sprintf(buf, "%u\n", sel._field); \ } \ static DMI_SYSFS_MAPPED_ATTR(sel, _field) DMI_SYSFS_SEL_FIELD(area_length); DMI_SYSFS_SEL_FIELD(header_start_offset); DMI_SYSFS_SEL_FIELD(data_start_offset); DMI_SYSFS_SEL_FIELD(access_method); DMI_SYSFS_SEL_FIELD(status); DMI_SYSFS_SEL_FIELD(change_token); DMI_SYSFS_SEL_FIELD(access_method_address); DMI_SYSFS_SEL_FIELD(header_format); DMI_SYSFS_SEL_FIELD(type_descriptors_supported_count); DMI_SYSFS_SEL_FIELD(per_log_type_descriptor_length); static struct attribute *dmi_sysfs_sel_attrs[] = { &dmi_sysfs_attr_sel_area_length.attr, &dmi_sysfs_attr_sel_header_start_offset.attr, &dmi_sysfs_attr_sel_data_start_offset.attr, &dmi_sysfs_attr_sel_access_method.attr, &dmi_sysfs_attr_sel_status.attr, &dmi_sysfs_attr_sel_change_token.attr, &dmi_sysfs_attr_sel_access_method_address.attr, &dmi_sysfs_attr_sel_header_format.attr, &dmi_sysfs_attr_sel_type_descriptors_supported_count.attr, &dmi_sysfs_attr_sel_per_log_type_descriptor_length.attr, NULL, }; static struct kobj_type dmi_system_event_log_ktype = { .release = dmi_entry_free, .sysfs_ops = &dmi_sysfs_specialize_attr_ops, .default_attrs = dmi_sysfs_sel_attrs, }; typedef u8 (*sel_io_reader)(const struct dmi_system_event_log *sel, loff_t offset); static DEFINE_MUTEX(io_port_lock); static u8 read_sel_8bit_indexed_io(const struct dmi_system_event_log *sel, loff_t offset) { u8 ret; mutex_lock(&io_port_lock); outb((u8)offset, sel->io.index_addr); ret = inb(sel->io.data_addr); mutex_unlock(&io_port_lock); return ret; } static u8 read_sel_2x8bit_indexed_io(const struct dmi_system_event_log *sel, loff_t offset) { u8 ret; mutex_lock(&io_port_lock); outb((u8)offset, sel->io.index_addr); outb((u8)(offset >> 8), sel->io.index_addr + 1); ret = inb(sel->io.data_addr); mutex_unlock(&io_port_lock); return ret; } static u8 read_sel_16bit_indexed_io(const struct dmi_system_event_log *sel, loff_t offset) { u8 ret; mutex_lock(&io_port_lock); outw((u16)offset, sel->io.index_addr); ret = inb(sel->io.data_addr); mutex_unlock(&io_port_lock); return ret; } static sel_io_reader sel_io_readers[] = { [DMI_SEL_ACCESS_METHOD_IO8] = read_sel_8bit_indexed_io, [DMI_SEL_ACCESS_METHOD_IO2x8] = read_sel_2x8bit_indexed_io, [DMI_SEL_ACCESS_METHOD_IO16] = read_sel_16bit_indexed_io, }; static ssize_t dmi_sel_raw_read_io(struct dmi_sysfs_entry *entry, const struct dmi_system_event_log *sel, char *buf, loff_t pos, size_t count) { ssize_t wrote = 0; sel_io_reader io_reader = sel_io_readers[sel->access_method]; while (count && pos < sel->area_length) { count--; *(buf++) = io_reader(sel, pos++); wrote++; } return wrote; } static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry, const struct dmi_system_event_log *sel, char *buf, loff_t pos, size_t count) { u8 __iomem *mapped; ssize_t wrote = 0; mapped = ioremap(sel->access_method_address, sel->area_length); if (!mapped) return -EIO; while (count && pos < sel->area_length) { count--; *(buf++) = readb(mapped + pos++); wrote++; } iounmap(mapped); return wrote; } static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry, const struct dmi_header *dh, void *_state) { struct dmi_read_state *state = _state; struct dmi_system_event_log sel; if (sizeof(sel) > dmi_entry_length(dh)) return -EIO; memcpy(&sel, dh, sizeof(sel)); switch (sel.access_method) { case DMI_SEL_ACCESS_METHOD_IO8: case DMI_SEL_ACCESS_METHOD_IO2x8: case DMI_SEL_ACCESS_METHOD_IO16: return dmi_sel_raw_read_io(entry, &sel, state->buf, state->pos, state->count); case DMI_SEL_ACCESS_METHOD_PHYS32: return dmi_sel_raw_read_phys32(entry, &sel, state->buf, state->pos, state->count); case DMI_SEL_ACCESS_METHOD_GPNV: pr_info("dmi-sysfs: GPNV support missing.\n"); return -EIO; default: pr_info("dmi-sysfs: Unknown access method %02x\n", sel.access_method); return -EIO; } } static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { struct dmi_sysfs_entry *entry = to_entry(kobj->parent); struct dmi_read_state state = { .buf = buf, .pos = pos, .count = count, }; return find_dmi_entry(entry, dmi_sel_raw_read_helper, &state); } static struct bin_attribute dmi_sel_raw_attr = { .attr = {.name = "raw_event_log", .mode = 0400}, .read = dmi_sel_raw_read, }; static int dmi_system_event_log(struct dmi_sysfs_entry *entry) { int ret; entry->child = kzalloc(sizeof(*entry->child), GFP_KERNEL); if (!entry->child) return -ENOMEM; ret = kobject_init_and_add(entry->child, &dmi_system_event_log_ktype, &entry->kobj, "system_event_log"); if (ret) goto out_free; ret = sysfs_create_bin_file(entry->child, &dmi_sel_raw_attr); if (ret) goto out_del; return 0; out_del: kobject_del(entry->child); out_free: kfree(entry->child); return ret; } /************************************************* * Generic DMI entry support. *************************************************/ static ssize_t dmi_sysfs_entry_length(struct dmi_sysfs_entry *entry, char *buf) { return sprintf(buf, "%d\n", entry->dh.length); } static ssize_t dmi_sysfs_entry_handle(struct dmi_sysfs_entry *entry, char *buf) { return sprintf(buf, "%d\n", entry->dh.handle); } static ssize_t dmi_sysfs_entry_type(struct dmi_sysfs_entry *entry, char *buf) { return sprintf(buf, "%d\n", entry->dh.type); } static ssize_t dmi_sysfs_entry_instance(struct dmi_sysfs_entry *entry, char *buf) { return sprintf(buf, "%d\n", entry->instance); } static ssize_t dmi_sysfs_entry_position(struct dmi_sysfs_entry *entry, char *buf) { return sprintf(buf, "%d\n", entry->position); } static DMI_SYSFS_ATTR(entry, length); static DMI_SYSFS_ATTR(entry, handle); static DMI_SYSFS_ATTR(entry, type); static DMI_SYSFS_ATTR(entry, instance); static DMI_SYSFS_ATTR(entry, position); static struct attribute *dmi_sysfs_entry_attrs[] = { &dmi_sysfs_attr_entry_length.attr, &dmi_sysfs_attr_entry_handle.attr, &dmi_sysfs_attr_entry_type.attr, &dmi_sysfs_attr_entry_instance.attr, &dmi_sysfs_attr_entry_position.attr, NULL, }; static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry, const struct dmi_header *dh, void *_state) { struct dmi_read_state *state = _state; size_t entry_length; entry_length = dmi_entry_length(dh); return memory_read_from_buffer(state->buf, state->count, &state->pos, dh, entry_length); } static ssize_t dmi_entry_raw_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { struct dmi_sysfs_entry *entry = to_entry(kobj); struct dmi_read_state state = { .buf = buf, .pos = pos, .count = count, }; return find_dmi_entry(entry, dmi_entry_raw_read_helper, &state); } static const struct bin_attribute dmi_entry_raw_attr = { .attr = {.name = "raw", .mode = 0400}, .read = dmi_entry_raw_read, }; static void dmi_sysfs_entry_release(struct kobject *kobj) { struct dmi_sysfs_entry *entry = to_entry(kobj); sysfs_remove_bin_file(&entry->kobj, &dmi_entry_raw_attr); spin_lock(&entry_list_lock); list_del(&entry->list); spin_unlock(&entry_list_lock); kfree(entry); } static struct kobj_type dmi_sysfs_entry_ktype = { .release = dmi_sysfs_entry_release, .sysfs_ops = &dmi_sysfs_attr_ops, .default_attrs = dmi_sysfs_entry_attrs, }; static struct kobject *dmi_kobj; static struct kset *dmi_kset; /* Global count of all instances seen. Only for setup */ static int __initdata instance_counts[MAX_ENTRY_TYPE + 1]; /* Global positional count of all entries seen. Only for setup */ static int __initdata position_count; static void __init dmi_sysfs_register_handle(const struct dmi_header *dh, void *_ret) { struct dmi_sysfs_entry *entry; int *ret = _ret; /* If a previous entry saw an error, short circuit */ if (*ret) return; /* Allocate and register a new entry into the entries set */ entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { *ret = -ENOMEM; return; } /* Set the key */ memcpy(&entry->dh, dh, sizeof(*dh)); entry->instance = instance_counts[dh->type]++; entry->position = position_count++; entry->kobj.kset = dmi_kset; *ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL, "%d-%d", dh->type, entry->instance); if (*ret) { kfree(entry); return; } /* Thread on the global list for cleanup */ spin_lock(&entry_list_lock); list_add_tail(&entry->list, &entry_list); spin_unlock(&entry_list_lock); /* Handle specializations by type */ switch (dh->type) { case DMI_ENTRY_SYSTEM_EVENT_LOG: *ret = dmi_system_event_log(entry); break; default: /* No specialization */ break; } if (*ret) goto out_err; /* Create the raw binary file to access the entry */ *ret = sysfs_create_bin_file(&entry->kobj, &dmi_entry_raw_attr); if (*ret) goto out_err; return; out_err: kobject_put(entry->child); kobject_put(&entry->kobj); return; } static void cleanup_entry_list(void) { struct dmi_sysfs_entry *entry, *next; /* No locks, we are on our way out */ list_for_each_entry_safe(entry, next, &entry_list, list) { kobject_put(entry->child); kobject_put(&entry->kobj); } } static int __init dmi_sysfs_init(void) { int error = -ENOMEM; int val; /* Set up our directory */ dmi_kobj = kobject_create_and_add("dmi", firmware_kobj); if (!dmi_kobj) goto err; dmi_kset = kset_create_and_add("entries", NULL, dmi_kobj); if (!dmi_kset) goto err; val = 0; error = dmi_walk(dmi_sysfs_register_handle, &val); if (error) goto err; if (val) { error = val; goto err; } pr_debug("dmi-sysfs: loaded.\n"); return 0; err: cleanup_entry_list(); kset_unregister(dmi_kset); kobject_put(dmi_kobj); return error; } /* clean up everything. */ static void __exit dmi_sysfs_exit(void) { pr_debug("dmi-sysfs: unloading.\n"); cleanup_entry_list(); kset_unregister(dmi_kset); kobject_put(dmi_kobj); } module_init(dmi_sysfs_init); module_exit(dmi_sysfs_exit); MODULE_AUTHOR("Mike Waychison <mikew@google.com>"); MODULE_DESCRIPTION("DMI sysfs support"); MODULE_LICENSE("GPL");
gpl-2.0
vinylfreak89/monarudo_kernel_M7_port
arch/sh/kernel/cpu/irq/ipr.c
12181
2629
/* * Interrupt handling for IPR-based IRQ. * * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi * Copyright (C) 2000 Kazumoto Kojima * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp> * Copyright (C) 2006 Paul Mundt * * Supported system: * On-chip supporting modules (TMU, RTC, etc.). * On-chip supporting modules for SH7709/SH7709A/SH7729. * Hitachi SolutionEngine external I/O: * MS7709SE01, MS7709ASE01, and MS7750SE01 * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/topology.h> static inline struct ipr_desc *get_ipr_desc(struct irq_data *data) { struct irq_chip *chip = irq_data_get_irq_chip(data); return container_of(chip, struct ipr_desc, chip); } static void disable_ipr_irq(struct irq_data *data) { struct ipr_data *p = irq_data_get_irq_chip_data(data); unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx]; /* Set the priority in IPR to 0 */ __raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr); (void)__raw_readw(addr); /* Read back to flush write posting */ } static void enable_ipr_irq(struct irq_data *data) { struct ipr_data *p = irq_data_get_irq_chip_data(data); unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx]; /* Set priority in IPR back to original value */ __raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr); } /* * The shift value is now the number of bits to shift, not the number of * bits/4. This is to make it easier to read the value directly from the * datasheets. The IPR address is calculated using the ipr_offset table. */ void register_ipr_controller(struct ipr_desc *desc) { int i; desc->chip.irq_mask = disable_ipr_irq; desc->chip.irq_unmask = enable_ipr_irq; for (i = 0; i < desc->nr_irqs; i++) { struct ipr_data *p = desc->ipr_data + i; int res; BUG_ON(p->ipr_idx >= desc->nr_offsets); BUG_ON(!desc->ipr_offsets[p->ipr_idx]); res = irq_alloc_desc_at(p->irq, numa_node_id()); if (unlikely(res != p->irq && res != -EEXIST)) { printk(KERN_INFO "can not get irq_desc for %d\n", p->irq); continue; } disable_irq_nosync(p->irq); irq_set_chip_and_handler_name(p->irq, &desc->chip, handle_level_irq, "level"); irq_set_chip_data(p->irq, p); disable_ipr_irq(irq_get_irq_data(p->irq)); } } EXPORT_SYMBOL(register_ipr_controller);
gpl-2.0
richardtrip/endeavoru
net/netfilter/nf_conntrack_broadcast.c
12437
2130
/* * broadcast connection tracking helper * * (c) 2005 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/ip.h> #include <net/route.h> #include <linux/inetdevice.h> #include <linux/skbuff.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_expect.h> int nf_conntrack_broadcast_help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int timeout) { struct nf_conntrack_expect *exp; struct iphdr *iph = ip_hdr(skb); struct rtable *rt = skb_rtable(skb); struct in_device *in_dev; struct nf_conn_help *help = nfct_help(ct); __be32 mask = 0; /* we're only interested in locally generated packets */ if (skb->sk == NULL) goto out; if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST)) goto out; if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) goto out; rcu_read_lock(); in_dev = __in_dev_get_rcu(rt->dst.dev); if (in_dev != NULL) { for_primary_ifa(in_dev) { if (ifa->ifa_broadcast == iph->daddr) { mask = ifa->ifa_mask; break; } } endfor_ifa(in_dev); } rcu_read_unlock(); if (mask == 0) goto out; exp = nf_ct_expect_alloc(ct); if (exp == NULL) goto out; exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; exp->tuple.src.u.udp.port = help->helper->tuple.src.u.udp.port; exp->mask.src.u3.ip = mask; exp->mask.src.u.udp.port = htons(0xFFFF); exp->expectfn = NULL; exp->flags = NF_CT_EXPECT_PERMANENT; exp->class = NF_CT_EXPECT_CLASS_DEFAULT; exp->helper = NULL; nf_ct_expect_related(exp); nf_ct_expect_put(exp); nf_ct_refresh(ct, skb, timeout * HZ); out: return NF_ACCEPT; } EXPORT_SYMBOL_GPL(nf_conntrack_broadcast_help); MODULE_LICENSE("GPL");
gpl-2.0
talnoah/N5-kernel
drivers/message/i2o/memory.c
13205
8272
/* * Functions to handle I2O memory * * Pulled from the inlines in i2o headers and uninlined * * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/i2o.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/slab.h> #include "core.h" /* Protects our 32/64bit mask switching */ static DEFINE_MUTEX(mem_lock); /** * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL * @c: I2O controller for which the calculation should be done * @body_size: maximum body size used for message in 32-bit words. * * Return the maximum number of SG elements in a SG list. */ u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size) { i2o_status_block *sb = c->status_block.virt; u16 sg_count = (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) - body_size; if (c->pae_support) { /* * for 64-bit a SG attribute element must be added and each * SG element needs 12 bytes instead of 8. */ sg_count -= 2; sg_count /= 3; } else sg_count /= 2; if (c->short_req && (sg_count > 8)) sg_count = 8; return sg_count; } EXPORT_SYMBOL_GPL(i2o_sg_tablesize); /** * i2o_dma_map_single - Map pointer to controller and fill in I2O message. * @c: I2O controller * @ptr: pointer to the data which should be mapped * @size: size of data in bytes * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE * @sg_ptr: pointer to the SG list inside the I2O message * * This function does all necessary DMA handling and also writes the I2O * SGL elements into the I2O message. For details on DMA handling see also * dma_map_single(). The pointer sg_ptr will only be set to the end of the * SG list if the allocation was successful. * * Returns DMA address which must be checked for failures using * dma_mapping_error(). */ dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, size_t size, enum dma_data_direction direction, u32 ** sg_ptr) { u32 sg_flags; u32 *mptr = *sg_ptr; dma_addr_t dma_addr; switch (direction) { case DMA_TO_DEVICE: sg_flags = 0xd4000000; break; case DMA_FROM_DEVICE: sg_flags = 0xd0000000; break; default: return 0; } dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 if ((sizeof(dma_addr_t) > 4) && c->pae_support) { *mptr++ = cpu_to_le32(0x7C020002); *mptr++ = cpu_to_le32(PAGE_SIZE); } #endif *mptr++ = cpu_to_le32(sg_flags | size); *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr)); #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 if ((sizeof(dma_addr_t) > 4) && c->pae_support) *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr)); #endif *sg_ptr = mptr; } return dma_addr; } EXPORT_SYMBOL_GPL(i2o_dma_map_single); /** * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message. * @c: I2O controller * @sg: SG list to be mapped * @sg_count: number of elements in the SG list * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE * @sg_ptr: pointer to the SG list inside the I2O message * * This function does all necessary DMA handling and also writes the I2O * SGL elements into the I2O message. For details on DMA handling see also * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG * list if the allocation was successful. * * Returns 0 on failure or 1 on success. */ int i2o_dma_map_sg(struct i2o_controller *c, struct scatterlist *sg, int sg_count, enum dma_data_direction direction, u32 ** sg_ptr) { u32 sg_flags; u32 *mptr = *sg_ptr; switch (direction) { case DMA_TO_DEVICE: sg_flags = 0x14000000; break; case DMA_FROM_DEVICE: sg_flags = 0x10000000; break; default: return 0; } sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction); if (!sg_count) return 0; #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 if ((sizeof(dma_addr_t) > 4) && c->pae_support) { *mptr++ = cpu_to_le32(0x7C020002); *mptr++ = cpu_to_le32(PAGE_SIZE); } #endif while (sg_count-- > 0) { if (!sg_count) sg_flags |= 0xC0000000; *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg)); *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg))); #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 if ((sizeof(dma_addr_t) > 4) && c->pae_support) *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); #endif sg = sg_next(sg); } *sg_ptr = mptr; return 1; } EXPORT_SYMBOL_GPL(i2o_dma_map_sg); /** * i2o_dma_alloc - Allocate DMA memory * @dev: struct device pointer to the PCI device of the I2O controller * @addr: i2o_dma struct which should get the DMA buffer * @len: length of the new DMA memory * * Allocate a coherent DMA memory and write the pointers into addr. * * Returns 0 on success or -ENOMEM on failure. */ int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len) { struct pci_dev *pdev = to_pci_dev(dev); int dma_64 = 0; mutex_lock(&mem_lock); if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_BIT_MASK(64))) { dma_64 = 1; if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { mutex_unlock(&mem_lock); return -ENOMEM; } } addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL); if ((sizeof(dma_addr_t) > 4) && dma_64) if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) printk(KERN_WARNING "i2o: unable to set 64-bit DMA"); mutex_unlock(&mem_lock); if (!addr->virt) return -ENOMEM; memset(addr->virt, 0, len); addr->len = len; return 0; } EXPORT_SYMBOL_GPL(i2o_dma_alloc); /** * i2o_dma_free - Free DMA memory * @dev: struct device pointer to the PCI device of the I2O controller * @addr: i2o_dma struct which contains the DMA buffer * * Free a coherent DMA memory and set virtual address of addr to NULL. */ void i2o_dma_free(struct device *dev, struct i2o_dma *addr) { if (addr->virt) { if (addr->phys) dma_free_coherent(dev, addr->len, addr->virt, addr->phys); else kfree(addr->virt); addr->virt = NULL; } } EXPORT_SYMBOL_GPL(i2o_dma_free); /** * i2o_dma_realloc - Realloc DMA memory * @dev: struct device pointer to the PCI device of the I2O controller * @addr: pointer to a i2o_dma struct DMA buffer * @len: new length of memory * * If there was something allocated in the addr, free it first. If len > 0 * than try to allocate it and write the addresses back to the addr * structure. If len == 0 set the virtual address to NULL. * * Returns the 0 on success or negative error code on failure. */ int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len) { i2o_dma_free(dev, addr); if (len) return i2o_dma_alloc(dev, addr, len); return 0; } EXPORT_SYMBOL_GPL(i2o_dma_realloc); /* * i2o_pool_alloc - Allocate an slab cache and mempool * @mempool: pointer to struct i2o_pool to write data into. * @name: name which is used to identify cache * @size: size of each object * @min_nr: minimum number of objects * * First allocates a slab cache with name and size. Then allocates a * mempool which uses the slab cache for allocation and freeing. * * Returns 0 on success or negative error code on failure. */ int i2o_pool_alloc(struct i2o_pool *pool, const char *name, size_t size, int min_nr) { pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL); if (!pool->name) goto exit; strcpy(pool->name, name); pool->slab = kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL); if (!pool->slab) goto free_name; pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); if (!pool->mempool) goto free_slab; return 0; free_slab: kmem_cache_destroy(pool->slab); free_name: kfree(pool->name); exit: return -ENOMEM; } EXPORT_SYMBOL_GPL(i2o_pool_alloc); /* * i2o_pool_free - Free slab cache and mempool again * @mempool: pointer to struct i2o_pool which should be freed * * Note that you have to return all objects to the mempool again before * calling i2o_pool_free(). */ void i2o_pool_free(struct i2o_pool *pool) { mempool_destroy(pool->mempool); kmem_cache_destroy(pool->slab); kfree(pool->name); }; EXPORT_SYMBOL_GPL(i2o_pool_free);
gpl-2.0
mattstock/bexkat1-newlib
newlib/libc/sys/linux/dl/dl-init.c
150
4844
/* Return the next shared object initializer function not yet run. Copyright (C) 1995,1996,1998,1999,2000,2001 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #include <stddef.h> #include <ldsodefs.h> /* Type of the initializer. */ typedef void (*init_t) (int, char **, char **); /* Flag, nonzero during startup phase. */ extern int _dl_starting_up; /* The object to be initialized first. */ extern struct link_map *_dl_initfirst; static void call_init (struct link_map *l, int argc, char **argv, char **env) { if (l->l_init_called) /* This object is all done. */ return; /* Avoid handling this constructor again in case we have a circular dependency. */ l->l_init_called = 1; /* Check for object which constructors we do not run here. */ if (__builtin_expect (l->l_name[0], 'a') == '\0' && l->l_type == lt_executable) return; /* Are there any constructors? */ if (l->l_info[DT_INIT] == NULL && __builtin_expect (l->l_info[DT_INIT_ARRAY] == NULL, 1)) return; /* Print a debug message if wanted. */ if (__builtin_expect (_dl_debug_mask & DL_DEBUG_IMPCALLS, 0)) _dl_debug_printf ("\ncalling init: %s\n\n", l->l_name[0] ? l->l_name : _dl_argv[0]); /* Now run the local constructors. There are two forms of them: - the one named by DT_INIT - the others in the DT_INIT_ARRAY. */ if (l->l_info[DT_INIT] != NULL) { init_t init = (init_t) DL_DT_INIT_ADDRESS (l, l->l_addr + l->l_info[DT_INIT]->d_un.d_ptr); /* Call the function. */ init (argc, argv, env); } /* Next see whether there is an array with initialization functions. */ if (l->l_info[DT_INIT_ARRAY] != NULL) { unsigned int j; unsigned int jm; ElfW(Addr) *addrs; jm = l->l_info[DT_INIT_ARRAYSZ]->d_un.d_val / sizeof (ElfW(Addr)); addrs = (ElfW(Addr) *) (l->l_info[DT_INIT_ARRAY]->d_un.d_ptr + l->l_addr); for (j = 0; j < jm; ++j) ((init_t) addrs[j]) (argc, argv, env); } } void internal_function _dl_init (struct link_map *main_map, int argc, char **argv, char **env) { ElfW(Dyn) *preinit_array = main_map->l_info[DT_PREINIT_ARRAY]; struct r_debug *r; unsigned int i; if (__builtin_expect (_dl_initfirst != NULL, 0)) { call_init (_dl_initfirst, argc, argv, env); _dl_initfirst = NULL; } /* Don't do anything if there is no preinit array. */ if (__builtin_expect (preinit_array != NULL, 0) && (i = preinit_array->d_un.d_val / sizeof (ElfW(Addr))) > 0) { ElfW(Addr) *addrs; unsigned int cnt; if (__builtin_expect (_dl_debug_mask & DL_DEBUG_IMPCALLS, 0)) _dl_debug_printf ("\ncalling preinit: %s\n\n", main_map->l_name[0] ? main_map->l_name : _dl_argv[0]); addrs = (ElfW(Addr) *) (main_map->l_info[DT_PREINIT_ARRAY]->d_un.d_ptr + main_map->l_addr); for (cnt = 0; cnt < i; ++cnt) ((init_t) addrs[cnt]) (argc, argv, env); } /* Notify the debugger we have added some objects. We need to call _dl_debug_initialize in a static program in case dynamic linking has not been used before. */ r = _dl_debug_initialize (0); r->r_state = RT_ADD; _dl_debug_state (); /* Stupid users forced the ELF specification to be changed. It now says that the dynamic loader is responsible for determining the order in which the constructors have to run. The constructors for all dependencies of an object must run before the constructor for the object itself. Circular dependencies are left unspecified. This is highly questionable since it puts the burden on the dynamic loader which has to find the dependencies at runtime instead of letting the user do it right. Stupidity rules! */ i = main_map->l_searchlist.r_nlist; while (i-- > 0) call_init (main_map->l_initfini[i], argc, argv, env); /* Notify the debugger all new objects are now ready to go. */ r->r_state = RT_CONSISTENT; _dl_debug_state (); /* Finished starting up. */ _dl_starting_up = 0; }
gpl-2.0
dmachaty/linux-bananapro
fs/ceph/mds_client.c
150
103342
#include <linux/ceph/ceph_debug.h> #include <linux/fs.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/gfp.h> #include <linux/sched.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/utsname.h> #include <linux/ratelimit.h> #include "super.h" #include "mds_client.h" #include <linux/ceph/ceph_features.h> #include <linux/ceph/messenger.h> #include <linux/ceph/decode.h> #include <linux/ceph/pagelist.h> #include <linux/ceph/auth.h> #include <linux/ceph/debugfs.h> /* * A cluster of MDS (metadata server) daemons is responsible for * managing the file system namespace (the directory hierarchy and * inodes) and for coordinating shared access to storage. Metadata is * partitioning hierarchically across a number of servers, and that * partition varies over time as the cluster adjusts the distribution * in order to balance load. * * The MDS client is primarily responsible to managing synchronous * metadata requests for operations like open, unlink, and so forth. * If there is a MDS failure, we find out about it when we (possibly * request and) receive a new MDS map, and can resubmit affected * requests. * * For the most part, though, we take advantage of a lossless * communications channel to the MDS, and do not need to worry about * timing out or resubmitting requests. * * We maintain a stateful "session" with each MDS we interact with. * Within each session, we sent periodic heartbeat messages to ensure * any capabilities or leases we have been issues remain valid. If * the session times out and goes stale, our leases and capabilities * are no longer valid. */ struct ceph_reconnect_state { int nr_caps; struct ceph_pagelist *pagelist; bool flock; }; static void __wake_requests(struct ceph_mds_client *mdsc, struct list_head *head); static const struct ceph_connection_operations mds_con_ops; /* * mds reply parsing */ /* * parse individual inode info */ static int parse_reply_info_in(void **p, void *end, struct ceph_mds_reply_info_in *info, u64 features) { int err = -EIO; info->in = *p; *p += sizeof(struct ceph_mds_reply_inode) + sizeof(*info->in->fragtree.splits) * le32_to_cpu(info->in->fragtree.nsplits); ceph_decode_32_safe(p, end, info->symlink_len, bad); ceph_decode_need(p, end, info->symlink_len, bad); info->symlink = *p; *p += info->symlink_len; if (features & CEPH_FEATURE_DIRLAYOUTHASH) ceph_decode_copy_safe(p, end, &info->dir_layout, sizeof(info->dir_layout), bad); else memset(&info->dir_layout, 0, sizeof(info->dir_layout)); ceph_decode_32_safe(p, end, info->xattr_len, bad); ceph_decode_need(p, end, info->xattr_len, bad); info->xattr_data = *p; *p += info->xattr_len; if (features & CEPH_FEATURE_MDS_INLINE_DATA) { ceph_decode_64_safe(p, end, info->inline_version, bad); ceph_decode_32_safe(p, end, info->inline_len, bad); ceph_decode_need(p, end, info->inline_len, bad); info->inline_data = *p; *p += info->inline_len; } else info->inline_version = CEPH_INLINE_NONE; return 0; bad: return err; } /* * parse a normal reply, which may contain a (dir+)dentry and/or a * target inode. */ static int parse_reply_info_trace(void **p, void *end, struct ceph_mds_reply_info_parsed *info, u64 features) { int err; if (info->head->is_dentry) { err = parse_reply_info_in(p, end, &info->diri, features); if (err < 0) goto out_bad; if (unlikely(*p + sizeof(*info->dirfrag) > end)) goto bad; info->dirfrag = *p; *p += sizeof(*info->dirfrag) + sizeof(u32)*le32_to_cpu(info->dirfrag->ndist); if (unlikely(*p > end)) goto bad; ceph_decode_32_safe(p, end, info->dname_len, bad); ceph_decode_need(p, end, info->dname_len, bad); info->dname = *p; *p += info->dname_len; info->dlease = *p; *p += sizeof(*info->dlease); } if (info->head->is_target) { err = parse_reply_info_in(p, end, &info->targeti, features); if (err < 0) goto out_bad; } if (unlikely(*p != end)) goto bad; return 0; bad: err = -EIO; out_bad: pr_err("problem parsing mds trace %d\n", err); return err; } /* * parse readdir results */ static int parse_reply_info_dir(void **p, void *end, struct ceph_mds_reply_info_parsed *info, u64 features) { u32 num, i = 0; int err; info->dir_dir = *p; if (*p + sizeof(*info->dir_dir) > end) goto bad; *p += sizeof(*info->dir_dir) + sizeof(u32)*le32_to_cpu(info->dir_dir->ndist); if (*p > end) goto bad; ceph_decode_need(p, end, sizeof(num) + 2, bad); num = ceph_decode_32(p); info->dir_end = ceph_decode_8(p); info->dir_complete = ceph_decode_8(p); if (num == 0) goto done; BUG_ON(!info->dir_in); info->dir_dname = (void *)(info->dir_in + num); info->dir_dname_len = (void *)(info->dir_dname + num); info->dir_dlease = (void *)(info->dir_dname_len + num); if ((unsigned long)(info->dir_dlease + num) > (unsigned long)info->dir_in + info->dir_buf_size) { pr_err("dir contents are larger than expected\n"); WARN_ON(1); goto bad; } info->dir_nr = num; while (num) { /* dentry */ ceph_decode_need(p, end, sizeof(u32)*2, bad); info->dir_dname_len[i] = ceph_decode_32(p); ceph_decode_need(p, end, info->dir_dname_len[i], bad); info->dir_dname[i] = *p; *p += info->dir_dname_len[i]; dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i], info->dir_dname[i]); info->dir_dlease[i] = *p; *p += sizeof(struct ceph_mds_reply_lease); /* inode */ err = parse_reply_info_in(p, end, &info->dir_in[i], features); if (err < 0) goto out_bad; i++; num--; } done: if (*p != end) goto bad; return 0; bad: err = -EIO; out_bad: pr_err("problem parsing dir contents %d\n", err); return err; } /* * parse fcntl F_GETLK results */ static int parse_reply_info_filelock(void **p, void *end, struct ceph_mds_reply_info_parsed *info, u64 features) { if (*p + sizeof(*info->filelock_reply) > end) goto bad; info->filelock_reply = *p; *p += sizeof(*info->filelock_reply); if (unlikely(*p != end)) goto bad; return 0; bad: return -EIO; } /* * parse create results */ static int parse_reply_info_create(void **p, void *end, struct ceph_mds_reply_info_parsed *info, u64 features) { if (features & CEPH_FEATURE_REPLY_CREATE_INODE) { if (*p == end) { info->has_create_ino = false; } else { info->has_create_ino = true; info->ino = ceph_decode_64(p); } } if (unlikely(*p != end)) goto bad; return 0; bad: return -EIO; } /* * parse extra results */ static int parse_reply_info_extra(void **p, void *end, struct ceph_mds_reply_info_parsed *info, u64 features) { if (info->head->op == CEPH_MDS_OP_GETFILELOCK) return parse_reply_info_filelock(p, end, info, features); else if (info->head->op == CEPH_MDS_OP_READDIR || info->head->op == CEPH_MDS_OP_LSSNAP) return parse_reply_info_dir(p, end, info, features); else if (info->head->op == CEPH_MDS_OP_CREATE) return parse_reply_info_create(p, end, info, features); else return -EIO; } /* * parse entire mds reply */ static int parse_reply_info(struct ceph_msg *msg, struct ceph_mds_reply_info_parsed *info, u64 features) { void *p, *end; u32 len; int err; info->head = msg->front.iov_base; p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); /* trace */ ceph_decode_32_safe(&p, end, len, bad); if (len > 0) { ceph_decode_need(&p, end, len, bad); err = parse_reply_info_trace(&p, p+len, info, features); if (err < 0) goto out_bad; } /* extra */ ceph_decode_32_safe(&p, end, len, bad); if (len > 0) { ceph_decode_need(&p, end, len, bad); err = parse_reply_info_extra(&p, p+len, info, features); if (err < 0) goto out_bad; } /* snap blob */ ceph_decode_32_safe(&p, end, len, bad); info->snapblob_len = len; info->snapblob = p; p += len; if (p != end) goto bad; return 0; bad: err = -EIO; out_bad: pr_err("mds parse_reply err %d\n", err); return err; } static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info) { if (!info->dir_in) return; free_pages((unsigned long)info->dir_in, get_order(info->dir_buf_size)); } /* * sessions */ const char *ceph_session_state_name(int s) { switch (s) { case CEPH_MDS_SESSION_NEW: return "new"; case CEPH_MDS_SESSION_OPENING: return "opening"; case CEPH_MDS_SESSION_OPEN: return "open"; case CEPH_MDS_SESSION_HUNG: return "hung"; case CEPH_MDS_SESSION_CLOSING: return "closing"; case CEPH_MDS_SESSION_RESTARTING: return "restarting"; case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting"; default: return "???"; } } static struct ceph_mds_session *get_session(struct ceph_mds_session *s) { if (atomic_inc_not_zero(&s->s_ref)) { dout("mdsc get_session %p %d -> %d\n", s, atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref)); return s; } else { dout("mdsc get_session %p 0 -- FAIL", s); return NULL; } } void ceph_put_mds_session(struct ceph_mds_session *s) { dout("mdsc put_session %p %d -> %d\n", s, atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); if (atomic_dec_and_test(&s->s_ref)) { if (s->s_auth.authorizer) ceph_auth_destroy_authorizer( s->s_mdsc->fsc->client->monc.auth, s->s_auth.authorizer); kfree(s); } } /* * called under mdsc->mutex */ struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, int mds) { struct ceph_mds_session *session; if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL) return NULL; session = mdsc->sessions[mds]; dout("lookup_mds_session %p %d\n", session, atomic_read(&session->s_ref)); get_session(session); return session; } static bool __have_session(struct ceph_mds_client *mdsc, int mds) { if (mds >= mdsc->max_sessions) return false; return mdsc->sessions[mds]; } static int __verify_registered_session(struct ceph_mds_client *mdsc, struct ceph_mds_session *s) { if (s->s_mds >= mdsc->max_sessions || mdsc->sessions[s->s_mds] != s) return -ENOENT; return 0; } /* * create+register a new session for given mds. * called under mdsc->mutex. */ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, int mds) { struct ceph_mds_session *s; if (mds >= mdsc->mdsmap->m_max_mds) return ERR_PTR(-EINVAL); s = kzalloc(sizeof(*s), GFP_NOFS); if (!s) return ERR_PTR(-ENOMEM); s->s_mdsc = mdsc; s->s_mds = mds; s->s_state = CEPH_MDS_SESSION_NEW; s->s_ttl = 0; s->s_seq = 0; mutex_init(&s->s_mutex); ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr); spin_lock_init(&s->s_gen_ttl_lock); s->s_cap_gen = 0; s->s_cap_ttl = jiffies - 1; spin_lock_init(&s->s_cap_lock); s->s_renew_requested = 0; s->s_renew_seq = 0; INIT_LIST_HEAD(&s->s_caps); s->s_nr_caps = 0; s->s_trim_caps = 0; atomic_set(&s->s_ref, 1); INIT_LIST_HEAD(&s->s_waiting); INIT_LIST_HEAD(&s->s_unsafe); s->s_num_cap_releases = 0; s->s_cap_reconnect = 0; s->s_cap_iterator = NULL; INIT_LIST_HEAD(&s->s_cap_releases); INIT_LIST_HEAD(&s->s_cap_flushing); INIT_LIST_HEAD(&s->s_cap_snaps_flushing); dout("register_session mds%d\n", mds); if (mds >= mdsc->max_sessions) { int newmax = 1 << get_count_order(mds+1); struct ceph_mds_session **sa; dout("register_session realloc to %d\n", newmax); sa = kcalloc(newmax, sizeof(void *), GFP_NOFS); if (sa == NULL) goto fail_realloc; if (mdsc->sessions) { memcpy(sa, mdsc->sessions, mdsc->max_sessions * sizeof(void *)); kfree(mdsc->sessions); } mdsc->sessions = sa; mdsc->max_sessions = newmax; } mdsc->sessions[mds] = s; atomic_inc(&mdsc->num_sessions); atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */ ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds, ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); return s; fail_realloc: kfree(s); return ERR_PTR(-ENOMEM); } /* * called under mdsc->mutex */ static void __unregister_session(struct ceph_mds_client *mdsc, struct ceph_mds_session *s) { dout("__unregister_session mds%d %p\n", s->s_mds, s); BUG_ON(mdsc->sessions[s->s_mds] != s); mdsc->sessions[s->s_mds] = NULL; ceph_con_close(&s->s_con); ceph_put_mds_session(s); atomic_dec(&mdsc->num_sessions); } /* * drop session refs in request. * * should be last request ref, or hold mdsc->mutex */ static void put_request_session(struct ceph_mds_request *req) { if (req->r_session) { ceph_put_mds_session(req->r_session); req->r_session = NULL; } } void ceph_mdsc_release_request(struct kref *kref) { struct ceph_mds_request *req = container_of(kref, struct ceph_mds_request, r_kref); destroy_reply_info(&req->r_reply_info); if (req->r_request) ceph_msg_put(req->r_request); if (req->r_reply) ceph_msg_put(req->r_reply); if (req->r_inode) { ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); iput(req->r_inode); } if (req->r_locked_dir) ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); iput(req->r_target_inode); if (req->r_dentry) dput(req->r_dentry); if (req->r_old_dentry) dput(req->r_old_dentry); if (req->r_old_dentry_dir) { /* * track (and drop pins for) r_old_dentry_dir * separately, since r_old_dentry's d_parent may have * changed between the dir mutex being dropped and * this request being freed. */ ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir), CEPH_CAP_PIN); iput(req->r_old_dentry_dir); } kfree(req->r_path1); kfree(req->r_path2); if (req->r_pagelist) ceph_pagelist_release(req->r_pagelist); put_request_session(req); ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); kfree(req); } /* * lookup session, bump ref if found. * * called under mdsc->mutex. */ static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc, u64 tid) { struct ceph_mds_request *req; struct rb_node *n = mdsc->request_tree.rb_node; while (n) { req = rb_entry(n, struct ceph_mds_request, r_node); if (tid < req->r_tid) n = n->rb_left; else if (tid > req->r_tid) n = n->rb_right; else { ceph_mdsc_get_request(req); return req; } } return NULL; } static void __insert_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *new) { struct rb_node **p = &mdsc->request_tree.rb_node; struct rb_node *parent = NULL; struct ceph_mds_request *req = NULL; while (*p) { parent = *p; req = rb_entry(parent, struct ceph_mds_request, r_node); if (new->r_tid < req->r_tid) p = &(*p)->rb_left; else if (new->r_tid > req->r_tid) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->r_node, parent, p); rb_insert_color(&new->r_node, &mdsc->request_tree); } /* * Register an in-flight request, and assign a tid. Link to directory * are modifying (if any). * * Called under mdsc->mutex. */ static void __register_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req, struct inode *dir) { req->r_tid = ++mdsc->last_tid; if (req->r_num_caps) ceph_reserve_caps(mdsc, &req->r_caps_reservation, req->r_num_caps); dout("__register_request %p tid %lld\n", req, req->r_tid); ceph_mdsc_get_request(req); __insert_request(mdsc, req); req->r_uid = current_fsuid(); req->r_gid = current_fsgid(); if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK) mdsc->oldest_tid = req->r_tid; if (dir) { ihold(dir); req->r_unsafe_dir = dir; } } static void __unregister_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) { dout("__unregister_request %p tid %lld\n", req, req->r_tid); if (req->r_tid == mdsc->oldest_tid) { struct rb_node *p = rb_next(&req->r_node); mdsc->oldest_tid = 0; while (p) { struct ceph_mds_request *next_req = rb_entry(p, struct ceph_mds_request, r_node); if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) { mdsc->oldest_tid = next_req->r_tid; break; } p = rb_next(p); } } rb_erase(&req->r_node, &mdsc->request_tree); RB_CLEAR_NODE(&req->r_node); if (req->r_unsafe_dir && req->r_got_unsafe) { struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); spin_lock(&ci->i_unsafe_lock); list_del_init(&req->r_unsafe_dir_item); spin_unlock(&ci->i_unsafe_lock); } if (req->r_target_inode && req->r_got_unsafe) { struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); spin_lock(&ci->i_unsafe_lock); list_del_init(&req->r_unsafe_target_item); spin_unlock(&ci->i_unsafe_lock); } if (req->r_unsafe_dir) { iput(req->r_unsafe_dir); req->r_unsafe_dir = NULL; } complete_all(&req->r_safe_completion); ceph_mdsc_put_request(req); } /* * Choose mds to send request to next. If there is a hint set in the * request (e.g., due to a prior forward hint from the mds), use that. * Otherwise, consult frag tree and/or caps to identify the * appropriate mds. If all else fails, choose randomly. * * Called under mdsc->mutex. */ static struct dentry *get_nonsnap_parent(struct dentry *dentry) { /* * we don't need to worry about protecting the d_parent access * here because we never renaming inside the snapped namespace * except to resplice to another snapdir, and either the old or new * result is a valid result. */ while (!IS_ROOT(dentry) && ceph_snap(d_inode(dentry)) != CEPH_NOSNAP) dentry = dentry->d_parent; return dentry; } static int __choose_mds(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) { struct inode *inode; struct ceph_inode_info *ci; struct ceph_cap *cap; int mode = req->r_direct_mode; int mds = -1; u32 hash = req->r_direct_hash; bool is_hash = req->r_direct_is_hash; /* * is there a specific mds we should try? ignore hint if we have * no session and the mds is not up (active or recovering). */ if (req->r_resend_mds >= 0 && (__have_session(mdsc, req->r_resend_mds) || ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { dout("choose_mds using resend_mds mds%d\n", req->r_resend_mds); return req->r_resend_mds; } if (mode == USE_RANDOM_MDS) goto random; inode = NULL; if (req->r_inode) { inode = req->r_inode; } else if (req->r_dentry) { /* ignore race with rename; old or new d_parent is okay */ struct dentry *parent = req->r_dentry->d_parent; struct inode *dir = d_inode(parent); if (dir->i_sb != mdsc->fsc->sb) { /* not this fs! */ inode = d_inode(req->r_dentry); } else if (ceph_snap(dir) != CEPH_NOSNAP) { /* direct snapped/virtual snapdir requests * based on parent dir inode */ struct dentry *dn = get_nonsnap_parent(parent); inode = d_inode(dn); dout("__choose_mds using nonsnap parent %p\n", inode); } else { /* dentry target */ inode = d_inode(req->r_dentry); if (!inode || mode == USE_AUTH_MDS) { /* dir + name */ inode = dir; hash = ceph_dentry_hash(dir, req->r_dentry); is_hash = true; } } } dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash, (int)hash, mode); if (!inode) goto random; ci = ceph_inode(inode); if (is_hash && S_ISDIR(inode->i_mode)) { struct ceph_inode_frag frag; int found; ceph_choose_frag(ci, hash, &frag, &found); if (found) { if (mode == USE_ANY_MDS && frag.ndist > 0) { u8 r; /* choose a random replica */ get_random_bytes(&r, 1); r %= frag.ndist; mds = frag.dist[r]; dout("choose_mds %p %llx.%llx " "frag %u mds%d (%d/%d)\n", inode, ceph_vinop(inode), frag.frag, mds, (int)r, frag.ndist); if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= CEPH_MDS_STATE_ACTIVE) return mds; } /* since this file/dir wasn't known to be * replicated, then we want to look for the * authoritative mds. */ mode = USE_AUTH_MDS; if (frag.mds >= 0) { /* choose auth mds */ mds = frag.mds; dout("choose_mds %p %llx.%llx " "frag %u mds%d (auth)\n", inode, ceph_vinop(inode), frag.frag, mds); if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= CEPH_MDS_STATE_ACTIVE) return mds; } } } spin_lock(&ci->i_ceph_lock); cap = NULL; if (mode == USE_AUTH_MDS) cap = ci->i_auth_cap; if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); if (!cap) { spin_unlock(&ci->i_ceph_lock); goto random; } mds = cap->session->s_mds; dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n", inode, ceph_vinop(inode), mds, cap == ci->i_auth_cap ? "auth " : "", cap); spin_unlock(&ci->i_ceph_lock); return mds; random: mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); dout("choose_mds chose random mds%d\n", mds); return mds; } /* * session messages */ static struct ceph_msg *create_session_msg(u32 op, u64 seq) { struct ceph_msg *msg; struct ceph_mds_session_head *h; msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS, false); if (!msg) { pr_err("create_session_msg ENOMEM creating msg\n"); return NULL; } h = msg->front.iov_base; h->op = cpu_to_le32(op); h->seq = cpu_to_le64(seq); return msg; } /* * session message, specialization for CEPH_SESSION_REQUEST_OPEN * to include additional client metadata fields. */ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq) { struct ceph_msg *msg; struct ceph_mds_session_head *h; int i = -1; int metadata_bytes = 0; int metadata_key_count = 0; struct ceph_options *opt = mdsc->fsc->client->options; void *p; const char* metadata[][2] = { {"hostname", utsname()->nodename}, {"kernel_version", utsname()->release}, {"entity_id", opt->name ? opt->name : ""}, {NULL, NULL} }; /* Calculate serialized length of metadata */ metadata_bytes = 4; /* map length */ for (i = 0; metadata[i][0] != NULL; ++i) { metadata_bytes += 8 + strlen(metadata[i][0]) + strlen(metadata[i][1]); metadata_key_count++; } /* Allocate the message */ msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes, GFP_NOFS, false); if (!msg) { pr_err("create_session_msg ENOMEM creating msg\n"); return NULL; } h = msg->front.iov_base; h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN); h->seq = cpu_to_le64(seq); /* * Serialize client metadata into waiting buffer space, using * the format that userspace expects for map<string, string> * * ClientSession messages with metadata are v2 */ msg->hdr.version = cpu_to_le16(2); msg->hdr.compat_version = cpu_to_le16(1); /* The write pointer, following the session_head structure */ p = msg->front.iov_base + sizeof(*h); /* Number of entries in the map */ ceph_encode_32(&p, metadata_key_count); /* Two length-prefixed strings for each entry in the map */ for (i = 0; metadata[i][0] != NULL; ++i) { size_t const key_len = strlen(metadata[i][0]); size_t const val_len = strlen(metadata[i][1]); ceph_encode_32(&p, key_len); memcpy(p, metadata[i][0], key_len); p += key_len; ceph_encode_32(&p, val_len); memcpy(p, metadata[i][1], val_len); p += val_len; } return msg; } /* * send session open request. * * called under mdsc->mutex */ static int __open_session(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_msg *msg; int mstate; int mds = session->s_mds; /* wait for mds to go active? */ mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); dout("open_session to mds%d (%s)\n", mds, ceph_mds_state_name(mstate)); session->s_state = CEPH_MDS_SESSION_OPENING; session->s_renew_requested = jiffies; /* send connect message */ msg = create_session_open_msg(mdsc, session->s_seq); if (!msg) return -ENOMEM; ceph_con_send(&session->s_con, msg); return 0; } /* * open sessions for any export targets for the given mds * * called under mdsc->mutex */ static struct ceph_mds_session * __open_export_target_session(struct ceph_mds_client *mdsc, int target) { struct ceph_mds_session *session; session = __ceph_lookup_mds_session(mdsc, target); if (!session) { session = register_session(mdsc, target); if (IS_ERR(session)) return session; } if (session->s_state == CEPH_MDS_SESSION_NEW || session->s_state == CEPH_MDS_SESSION_CLOSING) __open_session(mdsc, session); return session; } struct ceph_mds_session * ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target) { struct ceph_mds_session *session; dout("open_export_target_session to mds%d\n", target); mutex_lock(&mdsc->mutex); session = __open_export_target_session(mdsc, target); mutex_unlock(&mdsc->mutex); return session; } static void __open_export_target_sessions(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_mds_info *mi; struct ceph_mds_session *ts; int i, mds = session->s_mds; if (mds >= mdsc->mdsmap->m_max_mds) return; mi = &mdsc->mdsmap->m_info[mds]; dout("open_export_target_sessions for mds%d (%d targets)\n", session->s_mds, mi->num_export_targets); for (i = 0; i < mi->num_export_targets; i++) { ts = __open_export_target_session(mdsc, mi->export_targets[i]); if (!IS_ERR(ts)) ceph_put_mds_session(ts); } } void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { mutex_lock(&mdsc->mutex); __open_export_target_sessions(mdsc, session); mutex_unlock(&mdsc->mutex); } /* * session caps */ /* caller holds s_cap_lock, we drop it */ static void cleanup_cap_releases(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) __releases(session->s_cap_lock) { LIST_HEAD(tmp_list); list_splice_init(&session->s_cap_releases, &tmp_list); session->s_num_cap_releases = 0; spin_unlock(&session->s_cap_lock); dout("cleanup_cap_releases mds%d\n", session->s_mds); while (!list_empty(&tmp_list)) { struct ceph_cap *cap; /* zero out the in-progress message */ cap = list_first_entry(&tmp_list, struct ceph_cap, session_caps); list_del(&cap->session_caps); ceph_put_cap(mdsc, cap); } } static void cleanup_session_requests(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_mds_request *req; struct rb_node *p; dout("cleanup_session_requests mds%d\n", session->s_mds); mutex_lock(&mdsc->mutex); while (!list_empty(&session->s_unsafe)) { req = list_first_entry(&session->s_unsafe, struct ceph_mds_request, r_unsafe_item); list_del_init(&req->r_unsafe_item); pr_warn_ratelimited(" dropping unsafe request %llu\n", req->r_tid); __unregister_request(mdsc, req); } /* zero r_attempts, so kick_requests() will re-send requests */ p = rb_first(&mdsc->request_tree); while (p) { req = rb_entry(p, struct ceph_mds_request, r_node); p = rb_next(p); if (req->r_session && req->r_session->s_mds == session->s_mds) req->r_attempts = 0; } mutex_unlock(&mdsc->mutex); } /* * Helper to safely iterate over all caps associated with a session, with * special care taken to handle a racing __ceph_remove_cap(). * * Caller must hold session s_mutex. */ static int iterate_session_caps(struct ceph_mds_session *session, int (*cb)(struct inode *, struct ceph_cap *, void *), void *arg) { struct list_head *p; struct ceph_cap *cap; struct inode *inode, *last_inode = NULL; struct ceph_cap *old_cap = NULL; int ret; dout("iterate_session_caps %p mds%d\n", session, session->s_mds); spin_lock(&session->s_cap_lock); p = session->s_caps.next; while (p != &session->s_caps) { cap = list_entry(p, struct ceph_cap, session_caps); inode = igrab(&cap->ci->vfs_inode); if (!inode) { p = p->next; continue; } session->s_cap_iterator = cap; spin_unlock(&session->s_cap_lock); if (last_inode) { iput(last_inode); last_inode = NULL; } if (old_cap) { ceph_put_cap(session->s_mdsc, old_cap); old_cap = NULL; } ret = cb(inode, cap, arg); last_inode = inode; spin_lock(&session->s_cap_lock); p = p->next; if (cap->ci == NULL) { dout("iterate_session_caps finishing cap %p removal\n", cap); BUG_ON(cap->session != session); cap->session = NULL; list_del_init(&cap->session_caps); session->s_nr_caps--; if (cap->queue_release) { list_add_tail(&cap->session_caps, &session->s_cap_releases); session->s_num_cap_releases++; } else { old_cap = cap; /* put_cap it w/o locks held */ } } if (ret < 0) goto out; } ret = 0; out: session->s_cap_iterator = NULL; spin_unlock(&session->s_cap_lock); iput(last_inode); if (old_cap) ceph_put_cap(session->s_mdsc, old_cap); return ret; } static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) { struct ceph_inode_info *ci = ceph_inode(inode); LIST_HEAD(to_remove); int drop = 0; dout("removing cap %p, ci is %p, inode is %p\n", cap, ci, &ci->vfs_inode); spin_lock(&ci->i_ceph_lock); __ceph_remove_cap(cap, false); if (!ci->i_auth_cap) { struct ceph_cap_flush *cf; struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; while (true) { struct rb_node *n = rb_first(&ci->i_cap_flush_tree); if (!n) break; cf = rb_entry(n, struct ceph_cap_flush, i_node); rb_erase(&cf->i_node, &ci->i_cap_flush_tree); list_add(&cf->list, &to_remove); } spin_lock(&mdsc->cap_dirty_lock); list_for_each_entry(cf, &to_remove, list) rb_erase(&cf->g_node, &mdsc->cap_flush_tree); if (!list_empty(&ci->i_dirty_item)) { pr_warn_ratelimited( " dropping dirty %s state for %p %lld\n", ceph_cap_string(ci->i_dirty_caps), inode, ceph_ino(inode)); ci->i_dirty_caps = 0; list_del_init(&ci->i_dirty_item); drop = 1; } if (!list_empty(&ci->i_flushing_item)) { pr_warn_ratelimited( " dropping dirty+flushing %s state for %p %lld\n", ceph_cap_string(ci->i_flushing_caps), inode, ceph_ino(inode)); ci->i_flushing_caps = 0; list_del_init(&ci->i_flushing_item); mdsc->num_cap_flushing--; drop = 1; } spin_unlock(&mdsc->cap_dirty_lock); if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) { list_add(&ci->i_prealloc_cap_flush->list, &to_remove); ci->i_prealloc_cap_flush = NULL; } } spin_unlock(&ci->i_ceph_lock); while (!list_empty(&to_remove)) { struct ceph_cap_flush *cf; cf = list_first_entry(&to_remove, struct ceph_cap_flush, list); list_del(&cf->list); ceph_free_cap_flush(cf); } while (drop--) iput(inode); return 0; } /* * caller must hold session s_mutex */ static void remove_session_caps(struct ceph_mds_session *session) { dout("remove_session_caps on %p\n", session); iterate_session_caps(session, remove_session_caps_cb, NULL); spin_lock(&session->s_cap_lock); if (session->s_nr_caps > 0) { struct super_block *sb = session->s_mdsc->fsc->sb; struct inode *inode; struct ceph_cap *cap, *prev = NULL; struct ceph_vino vino; /* * iterate_session_caps() skips inodes that are being * deleted, we need to wait until deletions are complete. * __wait_on_freeing_inode() is designed for the job, * but it is not exported, so use lookup inode function * to access it. */ while (!list_empty(&session->s_caps)) { cap = list_entry(session->s_caps.next, struct ceph_cap, session_caps); if (cap == prev) break; prev = cap; vino = cap->ci->i_vino; spin_unlock(&session->s_cap_lock); inode = ceph_find_inode(sb, vino); iput(inode); spin_lock(&session->s_cap_lock); } } // drop cap expires and unlock s_cap_lock cleanup_cap_releases(session->s_mdsc, session); BUG_ON(session->s_nr_caps > 0); BUG_ON(!list_empty(&session->s_cap_flushing)); } /* * wake up any threads waiting on this session's caps. if the cap is * old (didn't get renewed on the client reconnect), remove it now. * * caller must hold s_mutex. */ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, void *arg) { struct ceph_inode_info *ci = ceph_inode(inode); wake_up_all(&ci->i_cap_wq); if (arg) { spin_lock(&ci->i_ceph_lock); ci->i_wanted_max_size = 0; ci->i_requested_max_size = 0; spin_unlock(&ci->i_ceph_lock); } return 0; } static void wake_up_session_caps(struct ceph_mds_session *session, int reconnect) { dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); iterate_session_caps(session, wake_up_session_cb, (void *)(unsigned long)reconnect); } /* * Send periodic message to MDS renewing all currently held caps. The * ack will reset the expiration for all caps from this session. * * caller holds s_mutex */ static int send_renew_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_msg *msg; int state; if (time_after_eq(jiffies, session->s_cap_ttl) && time_after_eq(session->s_cap_ttl, session->s_renew_requested)) pr_info("mds%d caps stale\n", session->s_mds); session->s_renew_requested = jiffies; /* do not try to renew caps until a recovering mds has reconnected * with its clients. */ state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); if (state < CEPH_MDS_STATE_RECONNECT) { dout("send_renew_caps ignoring mds%d (%s)\n", session->s_mds, ceph_mds_state_name(state)); return 0; } dout("send_renew_caps to mds%d (%s)\n", session->s_mds, ceph_mds_state_name(state)); msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, ++session->s_renew_seq); if (!msg) return -ENOMEM; ceph_con_send(&session->s_con, msg); return 0; } static int send_flushmsg_ack(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, u64 seq) { struct ceph_msg *msg; dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n", session->s_mds, ceph_session_state_name(session->s_state), seq); msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq); if (!msg) return -ENOMEM; ceph_con_send(&session->s_con, msg); return 0; } /* * Note new cap ttl, and any transition from stale -> not stale (fresh?). * * Called under session->s_mutex */ static void renewed_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, int is_renew) { int was_stale; int wake = 0; spin_lock(&session->s_cap_lock); was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl); session->s_cap_ttl = session->s_renew_requested + mdsc->mdsmap->m_session_timeout*HZ; if (was_stale) { if (time_before(jiffies, session->s_cap_ttl)) { pr_info("mds%d caps renewed\n", session->s_mds); wake = 1; } else { pr_info("mds%d caps still stale\n", session->s_mds); } } dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n", session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh", time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); spin_unlock(&session->s_cap_lock); if (wake) wake_up_session_caps(session, 0); } /* * send a session close request */ static int request_close_session(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_msg *msg; dout("request_close_session mds%d state %s seq %lld\n", session->s_mds, ceph_session_state_name(session->s_state), session->s_seq); msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); if (!msg) return -ENOMEM; ceph_con_send(&session->s_con, msg); return 0; } /* * Called with s_mutex held. */ static int __close_session(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { if (session->s_state >= CEPH_MDS_SESSION_CLOSING) return 0; session->s_state = CEPH_MDS_SESSION_CLOSING; return request_close_session(mdsc, session); } /* * Trim old(er) caps. * * Because we can't cache an inode without one or more caps, we do * this indirectly: if a cap is unused, we prune its aliases, at which * point the inode will hopefully get dropped to. * * Yes, this is a bit sloppy. Our only real goal here is to respond to * memory pressure from the MDS, though, so it needn't be perfect. */ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) { struct ceph_mds_session *session = arg; struct ceph_inode_info *ci = ceph_inode(inode); int used, wanted, oissued, mine; if (session->s_trim_caps <= 0) return -1; spin_lock(&ci->i_ceph_lock); mine = cap->issued | cap->implemented; used = __ceph_caps_used(ci); wanted = __ceph_caps_file_wanted(ci); oissued = __ceph_caps_issued_other(ci, cap); dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n", inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued), ceph_cap_string(used), ceph_cap_string(wanted)); if (cap == ci->i_auth_cap) { if (ci->i_dirty_caps || ci->i_flushing_caps || !list_empty(&ci->i_cap_snaps)) goto out; if ((used | wanted) & CEPH_CAP_ANY_WR) goto out; } /* The inode has cached pages, but it's no longer used. * we can safely drop it */ if (wanted == 0 && used == CEPH_CAP_FILE_CACHE && !(oissued & CEPH_CAP_FILE_CACHE)) { used = 0; oissued = 0; } if ((used | wanted) & ~oissued & mine) goto out; /* we need these caps */ session->s_trim_caps--; if (oissued) { /* we aren't the only cap.. just remove us */ __ceph_remove_cap(cap, true); } else { /* try dropping referring dentries */ spin_unlock(&ci->i_ceph_lock); d_prune_aliases(inode); dout("trim_caps_cb %p cap %p pruned, count now %d\n", inode, cap, atomic_read(&inode->i_count)); return 0; } out: spin_unlock(&ci->i_ceph_lock); return 0; } /* * Trim session cap count down to some max number. */ static int trim_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, int max_caps) { int trim_caps = session->s_nr_caps - max_caps; dout("trim_caps mds%d start: %d / %d, trim %d\n", session->s_mds, session->s_nr_caps, max_caps, trim_caps); if (trim_caps > 0) { session->s_trim_caps = trim_caps; iterate_session_caps(session, trim_caps_cb, session); dout("trim_caps mds%d done: %d / %d, trimmed %d\n", session->s_mds, session->s_nr_caps, max_caps, trim_caps - session->s_trim_caps); session->s_trim_caps = 0; } ceph_send_cap_releases(mdsc, session); return 0; } static int check_capsnap_flush(struct ceph_inode_info *ci, u64 want_snap_seq) { int ret = 1; spin_lock(&ci->i_ceph_lock); if (want_snap_seq > 0 && !list_empty(&ci->i_cap_snaps)) { struct ceph_cap_snap *capsnap = list_first_entry(&ci->i_cap_snaps, struct ceph_cap_snap, ci_item); ret = capsnap->follows >= want_snap_seq; } spin_unlock(&ci->i_ceph_lock); return ret; } static int check_caps_flush(struct ceph_mds_client *mdsc, u64 want_flush_tid) { struct rb_node *n; struct ceph_cap_flush *cf; int ret = 1; spin_lock(&mdsc->cap_dirty_lock); n = rb_first(&mdsc->cap_flush_tree); cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL; if (cf && cf->tid <= want_flush_tid) { dout("check_caps_flush still flushing tid %llu <= %llu\n", cf->tid, want_flush_tid); ret = 0; } spin_unlock(&mdsc->cap_dirty_lock); return ret; } /* * flush all dirty inode data to disk. * * returns true if we've flushed through want_flush_tid */ static void wait_caps_flush(struct ceph_mds_client *mdsc, u64 want_flush_tid, u64 want_snap_seq) { int mds; dout("check_caps_flush want %llu snap want %llu\n", want_flush_tid, want_snap_seq); mutex_lock(&mdsc->mutex); for (mds = 0; mds < mdsc->max_sessions; ) { struct ceph_mds_session *session = mdsc->sessions[mds]; struct inode *inode = NULL; if (!session) { mds++; continue; } get_session(session); mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); if (!list_empty(&session->s_cap_snaps_flushing)) { struct ceph_cap_snap *capsnap = list_first_entry(&session->s_cap_snaps_flushing, struct ceph_cap_snap, flushing_item); struct ceph_inode_info *ci = capsnap->ci; if (!check_capsnap_flush(ci, want_snap_seq)) { dout("check_cap_flush still flushing snap %p " "follows %lld <= %lld to mds%d\n", &ci->vfs_inode, capsnap->follows, want_snap_seq, mds); inode = igrab(&ci->vfs_inode); } } mutex_unlock(&session->s_mutex); ceph_put_mds_session(session); if (inode) { wait_event(mdsc->cap_flushing_wq, check_capsnap_flush(ceph_inode(inode), want_snap_seq)); iput(inode); } else { mds++; } mutex_lock(&mdsc->mutex); } mutex_unlock(&mdsc->mutex); wait_event(mdsc->cap_flushing_wq, check_caps_flush(mdsc, want_flush_tid)); dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid); } /* * called under s_mutex */ void ceph_send_cap_releases(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_msg *msg = NULL; struct ceph_mds_cap_release *head; struct ceph_mds_cap_item *item; struct ceph_cap *cap; LIST_HEAD(tmp_list); int num_cap_releases; spin_lock(&session->s_cap_lock); again: list_splice_init(&session->s_cap_releases, &tmp_list); num_cap_releases = session->s_num_cap_releases; session->s_num_cap_releases = 0; spin_unlock(&session->s_cap_lock); while (!list_empty(&tmp_list)) { if (!msg) { msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE, GFP_NOFS, false); if (!msg) goto out_err; head = msg->front.iov_base; head->num = cpu_to_le32(0); msg->front.iov_len = sizeof(*head); } cap = list_first_entry(&tmp_list, struct ceph_cap, session_caps); list_del(&cap->session_caps); num_cap_releases--; head = msg->front.iov_base; le32_add_cpu(&head->num, 1); item = msg->front.iov_base + msg->front.iov_len; item->ino = cpu_to_le64(cap->cap_ino); item->cap_id = cpu_to_le64(cap->cap_id); item->migrate_seq = cpu_to_le32(cap->mseq); item->seq = cpu_to_le32(cap->issue_seq); msg->front.iov_len += sizeof(*item); ceph_put_cap(mdsc, cap); if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); dout("send_cap_releases mds%d %p\n", session->s_mds, msg); ceph_con_send(&session->s_con, msg); msg = NULL; } } BUG_ON(num_cap_releases != 0); spin_lock(&session->s_cap_lock); if (!list_empty(&session->s_cap_releases)) goto again; spin_unlock(&session->s_cap_lock); if (msg) { msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); dout("send_cap_releases mds%d %p\n", session->s_mds, msg); ceph_con_send(&session->s_con, msg); } return; out_err: pr_err("send_cap_releases mds%d, failed to allocate message\n", session->s_mds); spin_lock(&session->s_cap_lock); list_splice(&tmp_list, &session->s_cap_releases); session->s_num_cap_releases += num_cap_releases; spin_unlock(&session->s_cap_lock); } /* * requests */ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req, struct inode *dir) { struct ceph_inode_info *ci = ceph_inode(dir); struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options; size_t size = sizeof(*rinfo->dir_in) + sizeof(*rinfo->dir_dname_len) + sizeof(*rinfo->dir_dname) + sizeof(*rinfo->dir_dlease); int order, num_entries; spin_lock(&ci->i_ceph_lock); num_entries = ci->i_files + ci->i_subdirs; spin_unlock(&ci->i_ceph_lock); num_entries = max(num_entries, 1); num_entries = min(num_entries, opt->max_readdir); order = get_order(size * num_entries); while (order >= 0) { rinfo->dir_in = (void*)__get_free_pages(GFP_KERNEL | __GFP_NOWARN, order); if (rinfo->dir_in) break; order--; } if (!rinfo->dir_in) return -ENOMEM; num_entries = (PAGE_SIZE << order) / size; num_entries = min(num_entries, opt->max_readdir); rinfo->dir_buf_size = PAGE_SIZE << order; req->r_num_caps = num_entries + 1; req->r_args.readdir.max_entries = cpu_to_le32(num_entries); req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes); return 0; } /* * Create an mds request. */ struct ceph_mds_request * ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) { struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); if (!req) return ERR_PTR(-ENOMEM); mutex_init(&req->r_fill_mutex); req->r_mdsc = mdsc; req->r_started = jiffies; req->r_resend_mds = -1; INIT_LIST_HEAD(&req->r_unsafe_dir_item); INIT_LIST_HEAD(&req->r_unsafe_target_item); req->r_fmode = -1; kref_init(&req->r_kref); INIT_LIST_HEAD(&req->r_wait); init_completion(&req->r_completion); init_completion(&req->r_safe_completion); INIT_LIST_HEAD(&req->r_unsafe_item); req->r_stamp = CURRENT_TIME; req->r_op = op; req->r_direct_mode = mode; return req; } /* * return oldest (lowest) request, tid in request tree, 0 if none. * * called under mdsc->mutex. */ static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) { if (RB_EMPTY_ROOT(&mdsc->request_tree)) return NULL; return rb_entry(rb_first(&mdsc->request_tree), struct ceph_mds_request, r_node); } static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc) { return mdsc->oldest_tid; } /* * Build a dentry's path. Allocate on heap; caller must kfree. Based * on build_path_from_dentry in fs/cifs/dir.c. * * If @stop_on_nosnap, generate path relative to the first non-snapped * inode. * * Encode hidden .snap dirs as a double /, i.e. * foo/.snap/bar -> foo//bar */ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, int stop_on_nosnap) { struct dentry *temp; char *path; int len, pos; unsigned seq; if (dentry == NULL) return ERR_PTR(-EINVAL); retry: len = 0; seq = read_seqbegin(&rename_lock); rcu_read_lock(); for (temp = dentry; !IS_ROOT(temp);) { struct inode *inode = d_inode(temp); if (inode && ceph_snap(inode) == CEPH_SNAPDIR) len++; /* slash only */ else if (stop_on_nosnap && inode && ceph_snap(inode) == CEPH_NOSNAP) break; else len += 1 + temp->d_name.len; temp = temp->d_parent; } rcu_read_unlock(); if (len) len--; /* no leading '/' */ path = kmalloc(len+1, GFP_NOFS); if (path == NULL) return ERR_PTR(-ENOMEM); pos = len; path[pos] = 0; /* trailing null */ rcu_read_lock(); for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) { struct inode *inode; spin_lock(&temp->d_lock); inode = d_inode(temp); if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { dout("build_path path+%d: %p SNAPDIR\n", pos, temp); } else if (stop_on_nosnap && inode && ceph_snap(inode) == CEPH_NOSNAP) { spin_unlock(&temp->d_lock); break; } else { pos -= temp->d_name.len; if (pos < 0) { spin_unlock(&temp->d_lock); break; } strncpy(path + pos, temp->d_name.name, temp->d_name.len); } spin_unlock(&temp->d_lock); if (pos) path[--pos] = '/'; temp = temp->d_parent; } rcu_read_unlock(); if (pos != 0 || read_seqretry(&rename_lock, seq)) { pr_err("build_path did not end path lookup where " "expected, namelen is %d, pos is %d\n", len, pos); /* presumably this is only possible if racing with a rename of one of the parent directories (we can not lock the dentries above us to prevent this, but retrying should be harmless) */ kfree(path); goto retry; } *base = ceph_ino(d_inode(temp)); *plen = len; dout("build_path on %p %d built %llx '%.*s'\n", dentry, d_count(dentry), *base, len, path); return path; } static int build_dentry_path(struct dentry *dentry, const char **ppath, int *ppathlen, u64 *pino, int *pfreepath) { char *path; if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) { *pino = ceph_ino(d_inode(dentry->d_parent)); *ppath = dentry->d_name.name; *ppathlen = dentry->d_name.len; return 0; } path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); if (IS_ERR(path)) return PTR_ERR(path); *ppath = path; *pfreepath = 1; return 0; } static int build_inode_path(struct inode *inode, const char **ppath, int *ppathlen, u64 *pino, int *pfreepath) { struct dentry *dentry; char *path; if (ceph_snap(inode) == CEPH_NOSNAP) { *pino = ceph_ino(inode); *ppathlen = 0; return 0; } dentry = d_find_alias(inode); path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); dput(dentry); if (IS_ERR(path)) return PTR_ERR(path); *ppath = path; *pfreepath = 1; return 0; } /* * request arguments may be specified via an inode *, a dentry *, or * an explicit ino+path. */ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, const char *rpath, u64 rino, const char **ppath, int *pathlen, u64 *ino, int *freepath) { int r = 0; if (rinode) { r = build_inode_path(rinode, ppath, pathlen, ino, freepath); dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), ceph_snap(rinode)); } else if (rdentry) { r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath); dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, *ppath); } else if (rpath || rino) { *ino = rino; *ppath = rpath; *pathlen = rpath ? strlen(rpath) : 0; dout(" path %.*s\n", *pathlen, rpath); } return r; } /* * called under mdsc->mutex */ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, struct ceph_mds_request *req, int mds, bool drop_cap_releases) { struct ceph_msg *msg; struct ceph_mds_request_head *head; const char *path1 = NULL; const char *path2 = NULL; u64 ino1 = 0, ino2 = 0; int pathlen1 = 0, pathlen2 = 0; int freepath1 = 0, freepath2 = 0; int len; u16 releases; void *p, *end; int ret; ret = set_request_path_attr(req->r_inode, req->r_dentry, req->r_path1, req->r_ino1.ino, &path1, &pathlen1, &ino1, &freepath1); if (ret < 0) { msg = ERR_PTR(ret); goto out; } ret = set_request_path_attr(NULL, req->r_old_dentry, req->r_path2, req->r_ino2.ino, &path2, &pathlen2, &ino2, &freepath2); if (ret < 0) { msg = ERR_PTR(ret); goto out_free1; } len = sizeof(*head) + pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) + sizeof(struct ceph_timespec); /* calculate (max) length for cap releases */ len += sizeof(struct ceph_mds_request_release) * (!!req->r_inode_drop + !!req->r_dentry_drop + !!req->r_old_inode_drop + !!req->r_old_dentry_drop); if (req->r_dentry_drop) len += req->r_dentry->d_name.len; if (req->r_old_dentry_drop) len += req->r_old_dentry->d_name.len; msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false); if (!msg) { msg = ERR_PTR(-ENOMEM); goto out_free2; } msg->hdr.version = cpu_to_le16(2); msg->hdr.tid = cpu_to_le64(req->r_tid); head = msg->front.iov_base; p = msg->front.iov_base + sizeof(*head); end = msg->front.iov_base + msg->front.iov_len; head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); head->op = cpu_to_le32(req->r_op); head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid)); head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid)); head->args = req->r_args; ceph_encode_filepath(&p, end, ino1, path1); ceph_encode_filepath(&p, end, ino2, path2); /* make note of release offset, in case we need to replay */ req->r_request_release_offset = p - msg->front.iov_base; /* cap releases */ releases = 0; if (req->r_inode_drop) releases += ceph_encode_inode_release(&p, req->r_inode ? req->r_inode : d_inode(req->r_dentry), mds, req->r_inode_drop, req->r_inode_unless, 0); if (req->r_dentry_drop) releases += ceph_encode_dentry_release(&p, req->r_dentry, mds, req->r_dentry_drop, req->r_dentry_unless); if (req->r_old_dentry_drop) releases += ceph_encode_dentry_release(&p, req->r_old_dentry, mds, req->r_old_dentry_drop, req->r_old_dentry_unless); if (req->r_old_inode_drop) releases += ceph_encode_inode_release(&p, d_inode(req->r_old_dentry), mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); if (drop_cap_releases) { releases = 0; p = msg->front.iov_base + req->r_request_release_offset; } head->num_releases = cpu_to_le16(releases); /* time stamp */ { struct ceph_timespec ts; ceph_encode_timespec(&ts, &req->r_stamp); ceph_encode_copy(&p, &ts, sizeof(ts)); } BUG_ON(p > end); msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); if (req->r_pagelist) { struct ceph_pagelist *pagelist = req->r_pagelist; atomic_inc(&pagelist->refcnt); ceph_msg_data_add_pagelist(msg, pagelist); msg->hdr.data_len = cpu_to_le32(pagelist->length); } else { msg->hdr.data_len = 0; } msg->hdr.data_off = cpu_to_le16(0); out_free2: if (freepath2) kfree((char *)path2); out_free1: if (freepath1) kfree((char *)path1); out: return msg; } /* * called under mdsc->mutex if error, under no mutex if * success. */ static void complete_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) { if (req->r_callback) req->r_callback(mdsc, req); else complete_all(&req->r_completion); } /* * called under mdsc->mutex */ static int __prepare_send_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req, int mds, bool drop_cap_releases) { struct ceph_mds_request_head *rhead; struct ceph_msg *msg; int flags = 0; req->r_attempts++; if (req->r_inode) { struct ceph_cap *cap = ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds); if (cap) req->r_sent_on_mseq = cap->mseq; else req->r_sent_on_mseq = -1; } dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); if (req->r_got_unsafe) { void *p; /* * Replay. Do not regenerate message (and rebuild * paths, etc.); just use the original message. * Rebuilding paths will break for renames because * d_move mangles the src name. */ msg = req->r_request; rhead = msg->front.iov_base; flags = le32_to_cpu(rhead->flags); flags |= CEPH_MDS_FLAG_REPLAY; rhead->flags = cpu_to_le32(flags); if (req->r_target_inode) rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); rhead->num_retry = req->r_attempts - 1; /* remove cap/dentry releases from message */ rhead->num_releases = 0; /* time stamp */ p = msg->front.iov_base + req->r_request_release_offset; { struct ceph_timespec ts; ceph_encode_timespec(&ts, &req->r_stamp); ceph_encode_copy(&p, &ts, sizeof(ts)); } msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); return 0; } if (req->r_request) { ceph_msg_put(req->r_request); req->r_request = NULL; } msg = create_request_message(mdsc, req, mds, drop_cap_releases); if (IS_ERR(msg)) { req->r_err = PTR_ERR(msg); return PTR_ERR(msg); } req->r_request = msg; rhead = msg->front.iov_base; rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); if (req->r_got_unsafe) flags |= CEPH_MDS_FLAG_REPLAY; if (req->r_locked_dir) flags |= CEPH_MDS_FLAG_WANT_DENTRY; rhead->flags = cpu_to_le32(flags); rhead->num_fwd = req->r_num_fwd; rhead->num_retry = req->r_attempts - 1; rhead->ino = 0; dout(" r_locked_dir = %p\n", req->r_locked_dir); return 0; } /* * send request, or put it on the appropriate wait list. */ static int __do_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) { struct ceph_mds_session *session = NULL; int mds = -1; int err = 0; if (req->r_err || req->r_got_result) { if (req->r_aborted) __unregister_request(mdsc, req); goto out; } if (req->r_timeout && time_after_eq(jiffies, req->r_started + req->r_timeout)) { dout("do_request timed out\n"); err = -EIO; goto finish; } if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { dout("do_request forced umount\n"); err = -EIO; goto finish; } put_request_session(req); mds = __choose_mds(mdsc, req); if (mds < 0 || ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { dout("do_request no mds or not active, waiting for map\n"); list_add(&req->r_wait, &mdsc->waiting_for_map); goto out; } /* get, open session */ session = __ceph_lookup_mds_session(mdsc, mds); if (!session) { session = register_session(mdsc, mds); if (IS_ERR(session)) { err = PTR_ERR(session); goto finish; } } req->r_session = get_session(session); dout("do_request mds%d session %p state %s\n", mds, session, ceph_session_state_name(session->s_state)); if (session->s_state != CEPH_MDS_SESSION_OPEN && session->s_state != CEPH_MDS_SESSION_HUNG) { if (session->s_state == CEPH_MDS_SESSION_NEW || session->s_state == CEPH_MDS_SESSION_CLOSING) __open_session(mdsc, session); list_add(&req->r_wait, &session->s_waiting); goto out_session; } /* send request */ req->r_resend_mds = -1; /* forget any previous mds hint */ if (req->r_request_started == 0) /* note request start time */ req->r_request_started = jiffies; err = __prepare_send_request(mdsc, req, mds, false); if (!err) { ceph_msg_get(req->r_request); ceph_con_send(&session->s_con, req->r_request); } out_session: ceph_put_mds_session(session); finish: if (err) { dout("__do_request early error %d\n", err); req->r_err = err; complete_request(mdsc, req); __unregister_request(mdsc, req); } out: return err; } /* * called under mdsc->mutex */ static void __wake_requests(struct ceph_mds_client *mdsc, struct list_head *head) { struct ceph_mds_request *req; LIST_HEAD(tmp_list); list_splice_init(head, &tmp_list); while (!list_empty(&tmp_list)) { req = list_entry(tmp_list.next, struct ceph_mds_request, r_wait); list_del_init(&req->r_wait); dout(" wake request %p tid %llu\n", req, req->r_tid); __do_request(mdsc, req); } } /* * Wake up threads with requests pending for @mds, so that they can * resubmit their requests to a possibly different mds. */ static void kick_requests(struct ceph_mds_client *mdsc, int mds) { struct ceph_mds_request *req; struct rb_node *p = rb_first(&mdsc->request_tree); dout("kick_requests mds%d\n", mds); while (p) { req = rb_entry(p, struct ceph_mds_request, r_node); p = rb_next(p); if (req->r_got_unsafe) continue; if (req->r_attempts > 0) continue; /* only new requests */ if (req->r_session && req->r_session->s_mds == mds) { dout(" kicking tid %llu\n", req->r_tid); list_del_init(&req->r_wait); __do_request(mdsc, req); } } } void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) { dout("submit_request on %p\n", req); mutex_lock(&mdsc->mutex); __register_request(mdsc, req, NULL); __do_request(mdsc, req); mutex_unlock(&mdsc->mutex); } /* * Synchrously perform an mds request. Take care of all of the * session setup, forwarding, retry details. */ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, struct inode *dir, struct ceph_mds_request *req) { int err; dout("do_request on %p\n", req); /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */ if (req->r_inode) ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); if (req->r_locked_dir) ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); if (req->r_old_dentry_dir) ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), CEPH_CAP_PIN); /* issue */ mutex_lock(&mdsc->mutex); __register_request(mdsc, req, dir); __do_request(mdsc, req); if (req->r_err) { err = req->r_err; goto out; } /* wait */ mutex_unlock(&mdsc->mutex); dout("do_request waiting\n"); if (!req->r_timeout && req->r_wait_for_completion) { err = req->r_wait_for_completion(mdsc, req); } else { long timeleft = wait_for_completion_killable_timeout( &req->r_completion, ceph_timeout_jiffies(req->r_timeout)); if (timeleft > 0) err = 0; else if (!timeleft) err = -EIO; /* timed out */ else err = timeleft; /* killed */ } dout("do_request waited, got %d\n", err); mutex_lock(&mdsc->mutex); /* only abort if we didn't race with a real reply */ if (req->r_got_result) { err = le32_to_cpu(req->r_reply_info.head->result); } else if (err < 0) { dout("aborted request %lld with %d\n", req->r_tid, err); /* * ensure we aren't running concurrently with * ceph_fill_trace or ceph_readdir_prepopulate, which * rely on locks (dir mutex) held by our caller. */ mutex_lock(&req->r_fill_mutex); req->r_err = err; req->r_aborted = true; mutex_unlock(&req->r_fill_mutex); if (req->r_locked_dir && (req->r_op & CEPH_MDS_OP_WRITE)) ceph_invalidate_dir_request(req); } else { err = req->r_err; } out: mutex_unlock(&mdsc->mutex); dout("do_request %p done, result %d\n", req, err); return err; } /* * Invalidate dir's completeness, dentry lease state on an aborted MDS * namespace request. */ void ceph_invalidate_dir_request(struct ceph_mds_request *req) { struct inode *inode = req->r_locked_dir; dout("invalidate_dir_request %p (complete, lease(s))\n", inode); ceph_dir_clear_complete(inode); if (req->r_dentry) ceph_invalidate_dentry_lease(req->r_dentry); if (req->r_old_dentry) ceph_invalidate_dentry_lease(req->r_old_dentry); } /* * Handle mds reply. * * We take the session mutex and parse and process the reply immediately. * This preserves the logical ordering of replies, capabilities, etc., sent * by the MDS as they are applied to our local cache. */ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) { struct ceph_mds_client *mdsc = session->s_mdsc; struct ceph_mds_request *req; struct ceph_mds_reply_head *head = msg->front.iov_base; struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ struct ceph_snap_realm *realm; u64 tid; int err, result; int mds = session->s_mds; if (msg->front.iov_len < sizeof(*head)) { pr_err("mdsc_handle_reply got corrupt (short) reply\n"); ceph_msg_dump(msg); return; } /* get request, session */ tid = le64_to_cpu(msg->hdr.tid); mutex_lock(&mdsc->mutex); req = __lookup_request(mdsc, tid); if (!req) { dout("handle_reply on unknown tid %llu\n", tid); mutex_unlock(&mdsc->mutex); return; } dout("handle_reply %p\n", req); /* correct session? */ if (req->r_session != session) { pr_err("mdsc_handle_reply got %llu on session mds%d" " not mds%d\n", tid, session->s_mds, req->r_session ? req->r_session->s_mds : -1); mutex_unlock(&mdsc->mutex); goto out; } /* dup? */ if ((req->r_got_unsafe && !head->safe) || (req->r_got_safe && head->safe)) { pr_warn("got a dup %s reply on %llu from mds%d\n", head->safe ? "safe" : "unsafe", tid, mds); mutex_unlock(&mdsc->mutex); goto out; } if (req->r_got_safe) { pr_warn("got unsafe after safe on %llu from mds%d\n", tid, mds); mutex_unlock(&mdsc->mutex); goto out; } result = le32_to_cpu(head->result); /* * Handle an ESTALE * if we're not talking to the authority, send to them * if the authority has changed while we weren't looking, * send to new authority * Otherwise we just have to return an ESTALE */ if (result == -ESTALE) { dout("got ESTALE on request %llu", req->r_tid); req->r_resend_mds = -1; if (req->r_direct_mode != USE_AUTH_MDS) { dout("not using auth, setting for that now"); req->r_direct_mode = USE_AUTH_MDS; __do_request(mdsc, req); mutex_unlock(&mdsc->mutex); goto out; } else { int mds = __choose_mds(mdsc, req); if (mds >= 0 && mds != req->r_session->s_mds) { dout("but auth changed, so resending"); __do_request(mdsc, req); mutex_unlock(&mdsc->mutex); goto out; } } dout("have to return ESTALE on request %llu", req->r_tid); } if (head->safe) { req->r_got_safe = true; __unregister_request(mdsc, req); if (req->r_got_unsafe) { /* * We already handled the unsafe response, now do the * cleanup. No need to examine the response; the MDS * doesn't include any result info in the safe * response. And even if it did, there is nothing * useful we could do with a revised return value. */ dout("got safe reply %llu, mds%d\n", tid, mds); list_del_init(&req->r_unsafe_item); /* last unsafe request during umount? */ if (mdsc->stopping && !__get_oldest_req(mdsc)) complete_all(&mdsc->safe_umount_waiters); mutex_unlock(&mdsc->mutex); goto out; } } else { req->r_got_unsafe = true; list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); if (req->r_unsafe_dir) { struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); spin_lock(&ci->i_unsafe_lock); list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops); spin_unlock(&ci->i_unsafe_lock); } } dout("handle_reply tid %lld result %d\n", tid, result); rinfo = &req->r_reply_info; err = parse_reply_info(msg, rinfo, session->s_con.peer_features); mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); if (err < 0) { pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid); ceph_msg_dump(msg); goto out_err; } /* snap trace */ realm = NULL; if (rinfo->snapblob_len) { down_write(&mdsc->snap_rwsem); ceph_update_snap_trace(mdsc, rinfo->snapblob, rinfo->snapblob + rinfo->snapblob_len, le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, &realm); downgrade_write(&mdsc->snap_rwsem); } else { down_read(&mdsc->snap_rwsem); } /* insert trace into our cache */ mutex_lock(&req->r_fill_mutex); err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); if (err == 0) { if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || req->r_op == CEPH_MDS_OP_LSSNAP)) ceph_readdir_prepopulate(req, req->r_session); ceph_unreserve_caps(mdsc, &req->r_caps_reservation); } mutex_unlock(&req->r_fill_mutex); up_read(&mdsc->snap_rwsem); if (realm) ceph_put_snap_realm(mdsc, realm); if (err == 0 && req->r_got_unsafe && req->r_target_inode) { struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); spin_lock(&ci->i_unsafe_lock); list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops); spin_unlock(&ci->i_unsafe_lock); } out_err: mutex_lock(&mdsc->mutex); if (!req->r_aborted) { if (err) { req->r_err = err; } else { req->r_reply = ceph_msg_get(msg); req->r_got_result = true; } } else { dout("reply arrived after request %lld was aborted\n", tid); } mutex_unlock(&mdsc->mutex); mutex_unlock(&session->s_mutex); /* kick calling process */ complete_request(mdsc, req); out: ceph_mdsc_put_request(req); return; } /* * handle mds notification that our request has been forwarded. */ static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, struct ceph_msg *msg) { struct ceph_mds_request *req; u64 tid = le64_to_cpu(msg->hdr.tid); u32 next_mds; u32 fwd_seq; int err = -EINVAL; void *p = msg->front.iov_base; void *end = p + msg->front.iov_len; ceph_decode_need(&p, end, 2*sizeof(u32), bad); next_mds = ceph_decode_32(&p); fwd_seq = ceph_decode_32(&p); mutex_lock(&mdsc->mutex); req = __lookup_request(mdsc, tid); if (!req) { dout("forward tid %llu to mds%d - req dne\n", tid, next_mds); goto out; /* dup reply? */ } if (req->r_aborted) { dout("forward tid %llu aborted, unregistering\n", tid); __unregister_request(mdsc, req); } else if (fwd_seq <= req->r_num_fwd) { dout("forward tid %llu to mds%d - old seq %d <= %d\n", tid, next_mds, req->r_num_fwd, fwd_seq); } else { /* resend. forward race not possible; mds would drop */ dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); BUG_ON(req->r_err); BUG_ON(req->r_got_result); req->r_attempts = 0; req->r_num_fwd = fwd_seq; req->r_resend_mds = next_mds; put_request_session(req); __do_request(mdsc, req); } ceph_mdsc_put_request(req); out: mutex_unlock(&mdsc->mutex); return; bad: pr_err("mdsc_handle_forward decode error err=%d\n", err); } /* * handle a mds session control message */ static void handle_session(struct ceph_mds_session *session, struct ceph_msg *msg) { struct ceph_mds_client *mdsc = session->s_mdsc; u32 op; u64 seq; int mds = session->s_mds; struct ceph_mds_session_head *h = msg->front.iov_base; int wake = 0; /* decode */ if (msg->front.iov_len != sizeof(*h)) goto bad; op = le32_to_cpu(h->op); seq = le64_to_cpu(h->seq); mutex_lock(&mdsc->mutex); if (op == CEPH_SESSION_CLOSE) __unregister_session(mdsc, session); /* FIXME: this ttl calculation is generous */ session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); dout("handle_session mds%d %s %p state %s seq %llu\n", mds, ceph_session_op_name(op), session, ceph_session_state_name(session->s_state), seq); if (session->s_state == CEPH_MDS_SESSION_HUNG) { session->s_state = CEPH_MDS_SESSION_OPEN; pr_info("mds%d came back\n", session->s_mds); } switch (op) { case CEPH_SESSION_OPEN: if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) pr_info("mds%d reconnect success\n", session->s_mds); session->s_state = CEPH_MDS_SESSION_OPEN; renewed_caps(mdsc, session, 0); wake = 1; if (mdsc->stopping) __close_session(mdsc, session); break; case CEPH_SESSION_RENEWCAPS: if (session->s_renew_seq == seq) renewed_caps(mdsc, session, 1); break; case CEPH_SESSION_CLOSE: if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) pr_info("mds%d reconnect denied\n", session->s_mds); cleanup_session_requests(mdsc, session); remove_session_caps(session); wake = 2; /* for good measure */ wake_up_all(&mdsc->session_close_wq); break; case CEPH_SESSION_STALE: pr_info("mds%d caps went stale, renewing\n", session->s_mds); spin_lock(&session->s_gen_ttl_lock); session->s_cap_gen++; session->s_cap_ttl = jiffies - 1; spin_unlock(&session->s_gen_ttl_lock); send_renew_caps(mdsc, session); break; case CEPH_SESSION_RECALL_STATE: trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); break; case CEPH_SESSION_FLUSHMSG: send_flushmsg_ack(mdsc, session, seq); break; case CEPH_SESSION_FORCE_RO: dout("force_session_readonly %p\n", session); spin_lock(&session->s_cap_lock); session->s_readonly = true; spin_unlock(&session->s_cap_lock); wake_up_session_caps(session, 0); break; default: pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); WARN_ON(1); } mutex_unlock(&session->s_mutex); if (wake) { mutex_lock(&mdsc->mutex); __wake_requests(mdsc, &session->s_waiting); if (wake == 2) kick_requests(mdsc, mds); mutex_unlock(&mdsc->mutex); } return; bad: pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds, (int)msg->front.iov_len); ceph_msg_dump(msg); return; } /* * called under session->mutex. */ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_mds_request *req, *nreq; struct rb_node *p; int err; dout("replay_unsafe_requests mds%d\n", session->s_mds); mutex_lock(&mdsc->mutex); list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { err = __prepare_send_request(mdsc, req, session->s_mds, true); if (!err) { ceph_msg_get(req->r_request); ceph_con_send(&session->s_con, req->r_request); } } /* * also re-send old requests when MDS enters reconnect stage. So that MDS * can process completed request in clientreplay stage. */ p = rb_first(&mdsc->request_tree); while (p) { req = rb_entry(p, struct ceph_mds_request, r_node); p = rb_next(p); if (req->r_got_unsafe) continue; if (req->r_attempts == 0) continue; /* only old requests */ if (req->r_session && req->r_session->s_mds == session->s_mds) { err = __prepare_send_request(mdsc, req, session->s_mds, true); if (!err) { ceph_msg_get(req->r_request); ceph_con_send(&session->s_con, req->r_request); } } } mutex_unlock(&mdsc->mutex); } /* * Encode information about a cap for a reconnect with the MDS. */ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) { union { struct ceph_mds_cap_reconnect v2; struct ceph_mds_cap_reconnect_v1 v1; } rec; size_t reclen; struct ceph_inode_info *ci; struct ceph_reconnect_state *recon_state = arg; struct ceph_pagelist *pagelist = recon_state->pagelist; char *path; int pathlen, err; u64 pathbase; struct dentry *dentry; ci = cap->ci; dout(" adding %p ino %llx.%llx cap %p %lld %s\n", inode, ceph_vinop(inode), cap, cap->cap_id, ceph_cap_string(cap->issued)); err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode)); if (err) return err; dentry = d_find_alias(inode); if (dentry) { path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); if (IS_ERR(path)) { err = PTR_ERR(path); goto out_dput; } } else { path = NULL; pathlen = 0; } err = ceph_pagelist_encode_string(pagelist, path, pathlen); if (err) goto out_free; spin_lock(&ci->i_ceph_lock); cap->seq = 0; /* reset cap seq */ cap->issue_seq = 0; /* and issue_seq */ cap->mseq = 0; /* and migrate_seq */ cap->cap_gen = cap->session->s_cap_gen; if (recon_state->flock) { rec.v2.cap_id = cpu_to_le64(cap->cap_id); rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); rec.v2.issued = cpu_to_le32(cap->issued); rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); rec.v2.pathbase = cpu_to_le64(pathbase); rec.v2.flock_len = 0; reclen = sizeof(rec.v2); } else { rec.v1.cap_id = cpu_to_le64(cap->cap_id); rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); rec.v1.issued = cpu_to_le32(cap->issued); rec.v1.size = cpu_to_le64(inode->i_size); ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime); ceph_encode_timespec(&rec.v1.atime, &inode->i_atime); rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); rec.v1.pathbase = cpu_to_le64(pathbase); reclen = sizeof(rec.v1); } spin_unlock(&ci->i_ceph_lock); if (recon_state->flock) { int num_fcntl_locks, num_flock_locks; struct ceph_filelock *flocks; encode_again: ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); flocks = kmalloc((num_fcntl_locks+num_flock_locks) * sizeof(struct ceph_filelock), GFP_NOFS); if (!flocks) { err = -ENOMEM; goto out_free; } err = ceph_encode_locks_to_buffer(inode, flocks, num_fcntl_locks, num_flock_locks); if (err) { kfree(flocks); if (err == -ENOSPC) goto encode_again; goto out_free; } /* * number of encoded locks is stable, so copy to pagelist */ rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) + (num_fcntl_locks+num_flock_locks) * sizeof(struct ceph_filelock)); err = ceph_pagelist_append(pagelist, &rec, reclen); if (!err) err = ceph_locks_to_pagelist(flocks, pagelist, num_fcntl_locks, num_flock_locks); kfree(flocks); } else { err = ceph_pagelist_append(pagelist, &rec, reclen); } recon_state->nr_caps++; out_free: kfree(path); out_dput: dput(dentry); return err; } /* * If an MDS fails and recovers, clients need to reconnect in order to * reestablish shared state. This includes all caps issued through * this session _and_ the snap_realm hierarchy. Because it's not * clear which snap realms the mds cares about, we send everything we * know about.. that ensures we'll then get any new info the * recovering MDS might have. * * This is a relatively heavyweight operation, but it's rare. * * called with mdsc->mutex held. */ static void send_mds_reconnect(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_msg *reply; struct rb_node *p; int mds = session->s_mds; int err = -ENOMEM; int s_nr_caps; struct ceph_pagelist *pagelist; struct ceph_reconnect_state recon_state; pr_info("mds%d reconnect start\n", mds); pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); if (!pagelist) goto fail_nopagelist; ceph_pagelist_init(pagelist); reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false); if (!reply) goto fail_nomsg; mutex_lock(&session->s_mutex); session->s_state = CEPH_MDS_SESSION_RECONNECTING; session->s_seq = 0; dout("session %p state %s\n", session, ceph_session_state_name(session->s_state)); spin_lock(&session->s_gen_ttl_lock); session->s_cap_gen++; spin_unlock(&session->s_gen_ttl_lock); spin_lock(&session->s_cap_lock); /* don't know if session is readonly */ session->s_readonly = 0; /* * notify __ceph_remove_cap() that we are composing cap reconnect. * If a cap get released before being added to the cap reconnect, * __ceph_remove_cap() should skip queuing cap release. */ session->s_cap_reconnect = 1; /* drop old cap expires; we're about to reestablish that state */ cleanup_cap_releases(mdsc, session); /* trim unused caps to reduce MDS's cache rejoin time */ if (mdsc->fsc->sb->s_root) shrink_dcache_parent(mdsc->fsc->sb->s_root); ceph_con_close(&session->s_con); ceph_con_open(&session->s_con, CEPH_ENTITY_TYPE_MDS, mds, ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); /* replay unsafe requests */ replay_unsafe_requests(mdsc, session); down_read(&mdsc->snap_rwsem); /* traverse this session's caps */ s_nr_caps = session->s_nr_caps; err = ceph_pagelist_encode_32(pagelist, s_nr_caps); if (err) goto fail; recon_state.nr_caps = 0; recon_state.pagelist = pagelist; recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK; err = iterate_session_caps(session, encode_caps_cb, &recon_state); if (err < 0) goto fail; spin_lock(&session->s_cap_lock); session->s_cap_reconnect = 0; spin_unlock(&session->s_cap_lock); /* * snaprealms. we provide mds with the ino, seq (version), and * parent for all of our realms. If the mds has any newer info, * it will tell us. */ for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { struct ceph_snap_realm *realm = rb_entry(p, struct ceph_snap_realm, node); struct ceph_mds_snaprealm_reconnect sr_rec; dout(" adding snap realm %llx seq %lld parent %llx\n", realm->ino, realm->seq, realm->parent_ino); sr_rec.ino = cpu_to_le64(realm->ino); sr_rec.seq = cpu_to_le64(realm->seq); sr_rec.parent = cpu_to_le64(realm->parent_ino); err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec)); if (err) goto fail; } if (recon_state.flock) reply->hdr.version = cpu_to_le16(2); /* raced with cap release? */ if (s_nr_caps != recon_state.nr_caps) { struct page *page = list_first_entry(&pagelist->head, struct page, lru); __le32 *addr = kmap_atomic(page); *addr = cpu_to_le32(recon_state.nr_caps); kunmap_atomic(addr); } reply->hdr.data_len = cpu_to_le32(pagelist->length); ceph_msg_data_add_pagelist(reply, pagelist); ceph_early_kick_flushing_caps(mdsc, session); ceph_con_send(&session->s_con, reply); mutex_unlock(&session->s_mutex); mutex_lock(&mdsc->mutex); __wake_requests(mdsc, &session->s_waiting); mutex_unlock(&mdsc->mutex); up_read(&mdsc->snap_rwsem); return; fail: ceph_msg_put(reply); up_read(&mdsc->snap_rwsem); mutex_unlock(&session->s_mutex); fail_nomsg: ceph_pagelist_release(pagelist); fail_nopagelist: pr_err("error %d preparing reconnect for mds%d\n", err, mds); return; } /* * compare old and new mdsmaps, kicking requests * and closing out old connections as necessary * * called under mdsc->mutex. */ static void check_new_map(struct ceph_mds_client *mdsc, struct ceph_mdsmap *newmap, struct ceph_mdsmap *oldmap) { int i; int oldstate, newstate; struct ceph_mds_session *s; dout("check_new_map new %u old %u\n", newmap->m_epoch, oldmap->m_epoch); for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) { if (mdsc->sessions[i] == NULL) continue; s = mdsc->sessions[i]; oldstate = ceph_mdsmap_get_state(oldmap, i); newstate = ceph_mdsmap_get_state(newmap, i); dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n", i, ceph_mds_state_name(oldstate), ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "", ceph_mds_state_name(newstate), ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", ceph_session_state_name(s->s_state)); if (i >= newmap->m_max_mds || memcmp(ceph_mdsmap_get_addr(oldmap, i), ceph_mdsmap_get_addr(newmap, i), sizeof(struct ceph_entity_addr))) { if (s->s_state == CEPH_MDS_SESSION_OPENING) { /* the session never opened, just close it * out now */ __wake_requests(mdsc, &s->s_waiting); __unregister_session(mdsc, s); } else { /* just close it */ mutex_unlock(&mdsc->mutex); mutex_lock(&s->s_mutex); mutex_lock(&mdsc->mutex); ceph_con_close(&s->s_con); mutex_unlock(&s->s_mutex); s->s_state = CEPH_MDS_SESSION_RESTARTING; } } else if (oldstate == newstate) { continue; /* nothing new with this mds */ } /* * send reconnect? */ if (s->s_state == CEPH_MDS_SESSION_RESTARTING && newstate >= CEPH_MDS_STATE_RECONNECT) { mutex_unlock(&mdsc->mutex); send_mds_reconnect(mdsc, s); mutex_lock(&mdsc->mutex); } /* * kick request on any mds that has gone active. */ if (oldstate < CEPH_MDS_STATE_ACTIVE && newstate >= CEPH_MDS_STATE_ACTIVE) { if (oldstate != CEPH_MDS_STATE_CREATING && oldstate != CEPH_MDS_STATE_STARTING) pr_info("mds%d recovery completed\n", s->s_mds); kick_requests(mdsc, i); ceph_kick_flushing_caps(mdsc, s); wake_up_session_caps(s, 1); } } for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) { s = mdsc->sessions[i]; if (!s) continue; if (!ceph_mdsmap_is_laggy(newmap, i)) continue; if (s->s_state == CEPH_MDS_SESSION_OPEN || s->s_state == CEPH_MDS_SESSION_HUNG || s->s_state == CEPH_MDS_SESSION_CLOSING) { dout(" connecting to export targets of laggy mds%d\n", i); __open_export_target_sessions(mdsc, s); } } } /* * leases */ /* * caller must hold session s_mutex, dentry->d_lock */ void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry) { struct ceph_dentry_info *di = ceph_dentry(dentry); ceph_put_mds_session(di->lease_session); di->lease_session = NULL; } static void handle_lease(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, struct ceph_msg *msg) { struct super_block *sb = mdsc->fsc->sb; struct inode *inode; struct dentry *parent, *dentry; struct ceph_dentry_info *di; int mds = session->s_mds; struct ceph_mds_lease *h = msg->front.iov_base; u32 seq; struct ceph_vino vino; struct qstr dname; int release = 0; dout("handle_lease from mds%d\n", mds); /* decode */ if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) goto bad; vino.ino = le64_to_cpu(h->ino); vino.snap = CEPH_NOSNAP; seq = le32_to_cpu(h->seq); dname.name = (void *)h + sizeof(*h) + sizeof(u32); dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); if (dname.len != get_unaligned_le32(h+1)) goto bad; /* lookup inode */ inode = ceph_find_inode(sb, vino); dout("handle_lease %s, ino %llx %p %.*s\n", ceph_lease_op_name(h->action), vino.ino, inode, dname.len, dname.name); mutex_lock(&session->s_mutex); session->s_seq++; if (inode == NULL) { dout("handle_lease no inode %llx\n", vino.ino); goto release; } /* dentry */ parent = d_find_alias(inode); if (!parent) { dout("no parent dentry on inode %p\n", inode); WARN_ON(1); goto release; /* hrm... */ } dname.hash = full_name_hash(dname.name, dname.len); dentry = d_lookup(parent, &dname); dput(parent); if (!dentry) goto release; spin_lock(&dentry->d_lock); di = ceph_dentry(dentry); switch (h->action) { case CEPH_MDS_LEASE_REVOKE: if (di->lease_session == session) { if (ceph_seq_cmp(di->lease_seq, seq) > 0) h->seq = cpu_to_le32(di->lease_seq); __ceph_mdsc_drop_dentry_lease(dentry); } release = 1; break; case CEPH_MDS_LEASE_RENEW: if (di->lease_session == session && di->lease_gen == session->s_cap_gen && di->lease_renew_from && di->lease_renew_after == 0) { unsigned long duration = msecs_to_jiffies(le32_to_cpu(h->duration_ms)); di->lease_seq = seq; dentry->d_time = di->lease_renew_from + duration; di->lease_renew_after = di->lease_renew_from + (duration >> 1); di->lease_renew_from = 0; } break; } spin_unlock(&dentry->d_lock); dput(dentry); if (!release) goto out; release: /* let's just reuse the same message */ h->action = CEPH_MDS_LEASE_REVOKE_ACK; ceph_msg_get(msg); ceph_con_send(&session->s_con, msg); out: iput(inode); mutex_unlock(&session->s_mutex); return; bad: pr_err("corrupt lease message\n"); ceph_msg_dump(msg); } void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, struct inode *inode, struct dentry *dentry, char action, u32 seq) { struct ceph_msg *msg; struct ceph_mds_lease *lease; int len = sizeof(*lease) + sizeof(u32); int dnamelen = 0; dout("lease_send_msg inode %p dentry %p %s to mds%d\n", inode, dentry, ceph_lease_op_name(action), session->s_mds); dnamelen = dentry->d_name.len; len += dnamelen; msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false); if (!msg) return; lease = msg->front.iov_base; lease->action = action; lease->ino = cpu_to_le64(ceph_vino(inode).ino); lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap); lease->seq = cpu_to_le32(seq); put_unaligned_le32(dnamelen, lease + 1); memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen); /* * if this is a preemptive lease RELEASE, no need to * flush request stream, since the actual request will * soon follow. */ msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE); ceph_con_send(&session->s_con, msg); } /* * Preemptively release a lease we expect to invalidate anyway. * Pass @inode always, @dentry is optional. */ void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode, struct dentry *dentry) { struct ceph_dentry_info *di; struct ceph_mds_session *session; u32 seq; BUG_ON(inode == NULL); BUG_ON(dentry == NULL); /* is dentry lease valid? */ spin_lock(&dentry->d_lock); di = ceph_dentry(dentry); if (!di || !di->lease_session || di->lease_session->s_mds < 0 || di->lease_gen != di->lease_session->s_cap_gen || !time_before(jiffies, dentry->d_time)) { dout("lease_release inode %p dentry %p -- " "no lease\n", inode, dentry); spin_unlock(&dentry->d_lock); return; } /* we do have a lease on this dentry; note mds and seq */ session = ceph_get_mds_session(di->lease_session); seq = di->lease_seq; __ceph_mdsc_drop_dentry_lease(dentry); spin_unlock(&dentry->d_lock); dout("lease_release inode %p dentry %p to mds%d\n", inode, dentry, session->s_mds); ceph_mdsc_lease_send_msg(session, inode, dentry, CEPH_MDS_LEASE_RELEASE, seq); ceph_put_mds_session(session); } /* * drop all leases (and dentry refs) in preparation for umount */ static void drop_leases(struct ceph_mds_client *mdsc) { int i; dout("drop_leases\n"); mutex_lock(&mdsc->mutex); for (i = 0; i < mdsc->max_sessions; i++) { struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); if (!s) continue; mutex_unlock(&mdsc->mutex); mutex_lock(&s->s_mutex); mutex_unlock(&s->s_mutex); ceph_put_mds_session(s); mutex_lock(&mdsc->mutex); } mutex_unlock(&mdsc->mutex); } /* * delayed work -- periodically trim expired leases, renew caps with mds */ static void schedule_delayed(struct ceph_mds_client *mdsc) { int delay = 5; unsigned hz = round_jiffies_relative(HZ * delay); schedule_delayed_work(&mdsc->delayed_work, hz); } static void delayed_work(struct work_struct *work) { int i; struct ceph_mds_client *mdsc = container_of(work, struct ceph_mds_client, delayed_work.work); int renew_interval; int renew_caps; dout("mdsc delayed_work\n"); ceph_check_delayed_caps(mdsc); mutex_lock(&mdsc->mutex); renew_interval = mdsc->mdsmap->m_session_timeout >> 2; renew_caps = time_after_eq(jiffies, HZ*renew_interval + mdsc->last_renew_caps); if (renew_caps) mdsc->last_renew_caps = jiffies; for (i = 0; i < mdsc->max_sessions; i++) { struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); if (s == NULL) continue; if (s->s_state == CEPH_MDS_SESSION_CLOSING) { dout("resending session close request for mds%d\n", s->s_mds); request_close_session(mdsc, s); ceph_put_mds_session(s); continue; } if (s->s_ttl && time_after(jiffies, s->s_ttl)) { if (s->s_state == CEPH_MDS_SESSION_OPEN) { s->s_state = CEPH_MDS_SESSION_HUNG; pr_info("mds%d hung\n", s->s_mds); } } if (s->s_state < CEPH_MDS_SESSION_OPEN) { /* this mds is failed or recovering, just wait */ ceph_put_mds_session(s); continue; } mutex_unlock(&mdsc->mutex); mutex_lock(&s->s_mutex); if (renew_caps) send_renew_caps(mdsc, s); else ceph_con_keepalive(&s->s_con); if (s->s_state == CEPH_MDS_SESSION_OPEN || s->s_state == CEPH_MDS_SESSION_HUNG) ceph_send_cap_releases(mdsc, s); mutex_unlock(&s->s_mutex); ceph_put_mds_session(s); mutex_lock(&mdsc->mutex); } mutex_unlock(&mdsc->mutex); schedule_delayed(mdsc); } int ceph_mdsc_init(struct ceph_fs_client *fsc) { struct ceph_mds_client *mdsc; mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS); if (!mdsc) return -ENOMEM; mdsc->fsc = fsc; fsc->mdsc = mdsc; mutex_init(&mdsc->mutex); mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); if (mdsc->mdsmap == NULL) { kfree(mdsc); return -ENOMEM; } init_completion(&mdsc->safe_umount_waiters); init_waitqueue_head(&mdsc->session_close_wq); INIT_LIST_HEAD(&mdsc->waiting_for_map); mdsc->sessions = NULL; atomic_set(&mdsc->num_sessions, 0); mdsc->max_sessions = 0; mdsc->stopping = 0; mdsc->last_snap_seq = 0; init_rwsem(&mdsc->snap_rwsem); mdsc->snap_realms = RB_ROOT; INIT_LIST_HEAD(&mdsc->snap_empty); spin_lock_init(&mdsc->snap_empty_lock); mdsc->last_tid = 0; mdsc->oldest_tid = 0; mdsc->request_tree = RB_ROOT; INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); mdsc->last_renew_caps = jiffies; INIT_LIST_HEAD(&mdsc->cap_delay_list); spin_lock_init(&mdsc->cap_delay_lock); INIT_LIST_HEAD(&mdsc->snap_flush_list); spin_lock_init(&mdsc->snap_flush_lock); mdsc->last_cap_flush_tid = 1; mdsc->cap_flush_tree = RB_ROOT; INIT_LIST_HEAD(&mdsc->cap_dirty); INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); mdsc->num_cap_flushing = 0; spin_lock_init(&mdsc->cap_dirty_lock); init_waitqueue_head(&mdsc->cap_flushing_wq); spin_lock_init(&mdsc->dentry_lru_lock); INIT_LIST_HEAD(&mdsc->dentry_lru); ceph_caps_init(mdsc); ceph_adjust_min_caps(mdsc, fsc->min_caps); init_rwsem(&mdsc->pool_perm_rwsem); mdsc->pool_perm_tree = RB_ROOT; return 0; } /* * Wait for safe replies on open mds requests. If we time out, drop * all requests from the tree to avoid dangling dentry refs. */ static void wait_requests(struct ceph_mds_client *mdsc) { struct ceph_options *opts = mdsc->fsc->client->options; struct ceph_mds_request *req; mutex_lock(&mdsc->mutex); if (__get_oldest_req(mdsc)) { mutex_unlock(&mdsc->mutex); dout("wait_requests waiting for requests\n"); wait_for_completion_timeout(&mdsc->safe_umount_waiters, ceph_timeout_jiffies(opts->mount_timeout)); /* tear down remaining requests */ mutex_lock(&mdsc->mutex); while ((req = __get_oldest_req(mdsc))) { dout("wait_requests timed out on tid %llu\n", req->r_tid); __unregister_request(mdsc, req); } } mutex_unlock(&mdsc->mutex); dout("wait_requests done\n"); } /* * called before mount is ro, and before dentries are torn down. * (hmm, does this still race with new lookups?) */ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) { dout("pre_umount\n"); mdsc->stopping = 1; drop_leases(mdsc); ceph_flush_dirty_caps(mdsc); wait_requests(mdsc); /* * wait for reply handlers to drop their request refs and * their inode/dcache refs */ ceph_msgr_flush(); } /* * wait for all write mds requests to flush. */ static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) { struct ceph_mds_request *req = NULL, *nextreq; struct rb_node *n; mutex_lock(&mdsc->mutex); dout("wait_unsafe_requests want %lld\n", want_tid); restart: req = __get_oldest_req(mdsc); while (req && req->r_tid <= want_tid) { /* find next request */ n = rb_next(&req->r_node); if (n) nextreq = rb_entry(n, struct ceph_mds_request, r_node); else nextreq = NULL; if (req->r_op != CEPH_MDS_OP_SETFILELOCK && (req->r_op & CEPH_MDS_OP_WRITE)) { /* write op */ ceph_mdsc_get_request(req); if (nextreq) ceph_mdsc_get_request(nextreq); mutex_unlock(&mdsc->mutex); dout("wait_unsafe_requests wait on %llu (want %llu)\n", req->r_tid, want_tid); wait_for_completion(&req->r_safe_completion); mutex_lock(&mdsc->mutex); ceph_mdsc_put_request(req); if (!nextreq) break; /* next dne before, so we're done! */ if (RB_EMPTY_NODE(&nextreq->r_node)) { /* next request was removed from tree */ ceph_mdsc_put_request(nextreq); goto restart; } ceph_mdsc_put_request(nextreq); /* won't go away */ } req = nextreq; } mutex_unlock(&mdsc->mutex); dout("wait_unsafe_requests done\n"); } void ceph_mdsc_sync(struct ceph_mds_client *mdsc) { u64 want_tid, want_flush, want_snap; if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) return; dout("sync\n"); mutex_lock(&mdsc->mutex); want_tid = mdsc->last_tid; mutex_unlock(&mdsc->mutex); ceph_flush_dirty_caps(mdsc); spin_lock(&mdsc->cap_dirty_lock); want_flush = mdsc->last_cap_flush_tid; spin_unlock(&mdsc->cap_dirty_lock); down_read(&mdsc->snap_rwsem); want_snap = mdsc->last_snap_seq; up_read(&mdsc->snap_rwsem); dout("sync want tid %lld flush_seq %lld snap_seq %lld\n", want_tid, want_flush, want_snap); wait_unsafe_requests(mdsc, want_tid); wait_caps_flush(mdsc, want_flush, want_snap); } /* * true if all sessions are closed, or we force unmount */ static bool done_closing_sessions(struct ceph_mds_client *mdsc) { if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) return true; return atomic_read(&mdsc->num_sessions) == 0; } /* * called after sb is ro. */ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) { struct ceph_options *opts = mdsc->fsc->client->options; struct ceph_mds_session *session; int i; dout("close_sessions\n"); /* close sessions */ mutex_lock(&mdsc->mutex); for (i = 0; i < mdsc->max_sessions; i++) { session = __ceph_lookup_mds_session(mdsc, i); if (!session) continue; mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); __close_session(mdsc, session); mutex_unlock(&session->s_mutex); ceph_put_mds_session(session); mutex_lock(&mdsc->mutex); } mutex_unlock(&mdsc->mutex); dout("waiting for sessions to close\n"); wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc), ceph_timeout_jiffies(opts->mount_timeout)); /* tear down remaining sessions */ mutex_lock(&mdsc->mutex); for (i = 0; i < mdsc->max_sessions; i++) { if (mdsc->sessions[i]) { session = get_session(mdsc->sessions[i]); __unregister_session(mdsc, session); mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); remove_session_caps(session); mutex_unlock(&session->s_mutex); ceph_put_mds_session(session); mutex_lock(&mdsc->mutex); } } WARN_ON(!list_empty(&mdsc->cap_delay_list)); mutex_unlock(&mdsc->mutex); ceph_cleanup_empty_realms(mdsc); cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ dout("stopped\n"); } void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) { struct ceph_mds_session *session; int mds; dout("force umount\n"); mutex_lock(&mdsc->mutex); for (mds = 0; mds < mdsc->max_sessions; mds++) { session = __ceph_lookup_mds_session(mdsc, mds); if (!session) continue; mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); __close_session(mdsc, session); if (session->s_state == CEPH_MDS_SESSION_CLOSING) { cleanup_session_requests(mdsc, session); remove_session_caps(session); } mutex_unlock(&session->s_mutex); ceph_put_mds_session(session); mutex_lock(&mdsc->mutex); kick_requests(mdsc, mds); } __wake_requests(mdsc, &mdsc->waiting_for_map); mutex_unlock(&mdsc->mutex); } static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) { dout("stop\n"); cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ if (mdsc->mdsmap) ceph_mdsmap_destroy(mdsc->mdsmap); kfree(mdsc->sessions); ceph_caps_finalize(mdsc); ceph_pool_perm_destroy(mdsc); } void ceph_mdsc_destroy(struct ceph_fs_client *fsc) { struct ceph_mds_client *mdsc = fsc->mdsc; dout("mdsc_destroy %p\n", mdsc); ceph_mdsc_stop(mdsc); /* flush out any connection work with references to us */ ceph_msgr_flush(); fsc->mdsc = NULL; kfree(mdsc); dout("mdsc_destroy %p done\n", mdsc); } /* * handle mds map update. */ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) { u32 epoch; u32 maplen; void *p = msg->front.iov_base; void *end = p + msg->front.iov_len; struct ceph_mdsmap *newmap, *oldmap; struct ceph_fsid fsid; int err = -EINVAL; ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); ceph_decode_copy(&p, &fsid, sizeof(fsid)); if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0) return; epoch = ceph_decode_32(&p); maplen = ceph_decode_32(&p); dout("handle_map epoch %u len %d\n", epoch, (int)maplen); /* do we need it? */ ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch); mutex_lock(&mdsc->mutex); if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { dout("handle_map epoch %u <= our %u\n", epoch, mdsc->mdsmap->m_epoch); mutex_unlock(&mdsc->mutex); return; } newmap = ceph_mdsmap_decode(&p, end); if (IS_ERR(newmap)) { err = PTR_ERR(newmap); goto bad_unlock; } /* swap into place */ if (mdsc->mdsmap) { oldmap = mdsc->mdsmap; mdsc->mdsmap = newmap; check_new_map(mdsc, newmap, oldmap); ceph_mdsmap_destroy(oldmap); } else { mdsc->mdsmap = newmap; /* first mds map */ } mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; __wake_requests(mdsc, &mdsc->waiting_for_map); mutex_unlock(&mdsc->mutex); schedule_delayed(mdsc); return; bad_unlock: mutex_unlock(&mdsc->mutex); bad: pr_err("error decoding mdsmap %d\n", err); return; } static struct ceph_connection *con_get(struct ceph_connection *con) { struct ceph_mds_session *s = con->private; if (get_session(s)) { dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref)); return con; } dout("mdsc con_get %p FAIL\n", s); return NULL; } static void con_put(struct ceph_connection *con) { struct ceph_mds_session *s = con->private; dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1); ceph_put_mds_session(s); } /* * if the client is unresponsive for long enough, the mds will kill * the session entirely. */ static void peer_reset(struct ceph_connection *con) { struct ceph_mds_session *s = con->private; struct ceph_mds_client *mdsc = s->s_mdsc; pr_warn("mds%d closed our session\n", s->s_mds); send_mds_reconnect(mdsc, s); } static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) { struct ceph_mds_session *s = con->private; struct ceph_mds_client *mdsc = s->s_mdsc; int type = le16_to_cpu(msg->hdr.type); mutex_lock(&mdsc->mutex); if (__verify_registered_session(mdsc, s) < 0) { mutex_unlock(&mdsc->mutex); goto out; } mutex_unlock(&mdsc->mutex); switch (type) { case CEPH_MSG_MDS_MAP: ceph_mdsc_handle_map(mdsc, msg); break; case CEPH_MSG_CLIENT_SESSION: handle_session(s, msg); break; case CEPH_MSG_CLIENT_REPLY: handle_reply(s, msg); break; case CEPH_MSG_CLIENT_REQUEST_FORWARD: handle_forward(mdsc, s, msg); break; case CEPH_MSG_CLIENT_CAPS: ceph_handle_caps(s, msg); break; case CEPH_MSG_CLIENT_SNAP: ceph_handle_snap(mdsc, s, msg); break; case CEPH_MSG_CLIENT_LEASE: handle_lease(mdsc, s, msg); break; default: pr_err("received unknown message type %d %s\n", type, ceph_msg_type_name(type)); } out: ceph_msg_put(msg); } /* * authentication */ /* * Note: returned pointer is the address of a structure that's * managed separately. Caller must *not* attempt to free it. */ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, int *proto, int force_new) { struct ceph_mds_session *s = con->private; struct ceph_mds_client *mdsc = s->s_mdsc; struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; struct ceph_auth_handshake *auth = &s->s_auth; if (force_new && auth->authorizer) { ceph_auth_destroy_authorizer(ac, auth->authorizer); auth->authorizer = NULL; } if (!auth->authorizer) { int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, auth); if (ret) return ERR_PTR(ret); } else { int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS, auth); if (ret) return ERR_PTR(ret); } *proto = ac->protocol; return auth; } static int verify_authorizer_reply(struct ceph_connection *con, int len) { struct ceph_mds_session *s = con->private; struct ceph_mds_client *mdsc = s->s_mdsc; struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer, len); } static int invalidate_authorizer(struct ceph_connection *con) { struct ceph_mds_session *s = con->private; struct ceph_mds_client *mdsc = s->s_mdsc; struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); return ceph_monc_validate_auth(&mdsc->fsc->client->monc); } static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip) { struct ceph_msg *msg; int type = (int) le16_to_cpu(hdr->type); int front_len = (int) le32_to_cpu(hdr->front_len); if (con->in_msg) return con->in_msg; *skip = 0; msg = ceph_msg_new(type, front_len, GFP_NOFS, false); if (!msg) { pr_err("unable to allocate msg type %d len %d\n", type, front_len); return NULL; } return msg; } static int mds_sign_message(struct ceph_msg *msg) { struct ceph_mds_session *s = msg->con->private; struct ceph_auth_handshake *auth = &s->s_auth; return ceph_auth_sign_message(auth, msg); } static int mds_check_message_signature(struct ceph_msg *msg) { struct ceph_mds_session *s = msg->con->private; struct ceph_auth_handshake *auth = &s->s_auth; return ceph_auth_check_message_signature(auth, msg); } static const struct ceph_connection_operations mds_con_ops = { .get = con_get, .put = con_put, .dispatch = dispatch, .get_authorizer = get_authorizer, .verify_authorizer_reply = verify_authorizer_reply, .invalidate_authorizer = invalidate_authorizer, .peer_reset = peer_reset, .alloc_msg = mds_alloc_msg, .sign_message = mds_sign_message, .check_message_signature = mds_check_message_signature, }; /* eof */
gpl-2.0
lentinj/u-boot
drivers/hwmon/ds1775.c
150
3044
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ /* * Dallas Semiconductor's DS1775 Digital Thermometer and Thermostat */ #include <common.h> #include <i2c.h> #include <dtt.h> #define DTT_I2C_DEV_CODE CONFIG_SYS_I2C_DTT_ADDR /* Dallas Semi's DS1775 device code */ #define DTT_READ_TEMP 0x0 #define DTT_CONFIG 0x1 #define DTT_TEMP_HYST 0x2 #define DTT_TEMP_OS 0x3 int dtt_read(int sensor, int reg) { int dlen; uchar data[2]; /* * Calculate sensor address and command */ sensor = DTT_I2C_DEV_CODE + (sensor & 0x07); /* Calculate addr of ds1775 */ /* * Prepare to handle 2 byte result */ if ((reg == DTT_READ_TEMP) || (reg == DTT_TEMP_OS) || (reg == DTT_TEMP_HYST)) dlen = 2; else dlen = 1; /* * Now try to read the register */ if (i2c_read(sensor, reg, 1, data, dlen) != 0) return 1; /* * Handle 2 byte result */ if (dlen == 2) return ((int)((short)data[1] + (((short)data[0]) << 8))); return (int) data[0]; } int dtt_write(int sensor, int reg, int val) { int dlen; uchar data[2]; /* * Calculate sensor address and register */ sensor = DTT_I2C_DEV_CODE + (sensor & 0x07); /* * Handle various data sizes */ if ((reg == DTT_READ_TEMP) || (reg == DTT_TEMP_OS) || (reg == DTT_TEMP_HYST)) { dlen = 2; data[0] = (char)((val >> 8) & 0xff); /* MSB first */ data[1] = (char)(val & 0xff); } else { dlen = 1; data[0] = (char)(val & 0xff); } /* * Write value to device */ if (i2c_write(sensor, reg, 1, data, dlen) != 0) return 1; return 0; } int dtt_init_one(int sensor) { int val; /* * Setup High Temp */ val = ((CONFIG_SYS_DTT_MAX_TEMP * 2) << 7) & 0xff80; if (dtt_write(sensor, DTT_TEMP_OS, val) != 0) return 1; udelay(50000); /* Max 50ms */ /* * Setup Low Temp - hysteresis */ val = (((CONFIG_SYS_DTT_MAX_TEMP - CONFIG_SYS_DTT_HYSTERESIS) * 2) << 7) & 0xff80; if (dtt_write(sensor, DTT_TEMP_HYST, val) != 0) return 1; udelay(50000); /* Max 50ms */ /* * Setup configuraton register * * Fault Tolerance limits 4, Thermometer resolution bits is 9, * Polarity = Active Low,continuous conversion mode, Thermostat * mode is interrupt mode */ val = 0xa; if (dtt_write(sensor, DTT_CONFIG, val) != 0) return 1; udelay(50000); /* Max 50ms */ return 0; } int dtt_get_temp(int sensor) { return (dtt_read(sensor, DTT_READ_TEMP) / 256); }
gpl-2.0
Tegra4/android_kernel_hp_phobos-old
arch/arm/mach-tegra/p852/board-p852-sku1-c0x.c
406
2913
/* * arch/arm/mach-tegra/board-p852-sku1-c0x.c * * Copyright (C) 2010-2011, NVIDIA Corporation. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "board-p852.h" static inline void p852_sku1_c0x_spi_init(void) { p852_sku_peripherals |= P852_SKU_SPI_ENABLE; p852_spi_peripherals |= ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI1_SHIFT) | ((P852_SPI_MASTER | P852_SPI_ENABLE) << P852_SPI4_SHIFT); } static inline void p852_sku1_c0x_i2s_init(void) { p852_sku_peripherals |= P852_SKU_I2S_ENABLE; p852_i2s_peripherals |= ((P852_I2S_ENABLE | P852_I2S_TDM) << P852_I2S1_SHIFT) | ((P852_I2S_ENABLE | P852_I2S_TDM) << P852_I2S2_SHIFT); } static inline void p852_sku1_c0x_sdhci_init(void) { p852_sku_peripherals |= P852_SKU_SDHCI_ENABLE; p852_sdhci_peripherals |= ((P852_SDHCI_ENABLE) << P852_SDHCI4_SHIFT) | ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN) << P852_SDHCI1_SHIFT) | ((P852_SDHCI_ENABLE | P852_SDHCI_CD_EN | P852_SDHCI_WP_EN) << P852_SDHCI3_SHIFT); p852_sdhci_platform_data[0].cd_gpio = TEGRA_GPIO_PV0; p852_sdhci_platform_data[0].wp_gpio = TEGRA_GPIO_PV1; p852_sdhci_platform_data[2].cd_gpio = TEGRA_GPIO_PD7; p852_sdhci_platform_data[2].wp_gpio = TEGRA_GPIO_PT4; } static inline void p852_sku1_c0x_uart_init(void) { p852_sku_peripherals |= P852_SKU_UART_ENABLE; p852_uart_peripherals |= ((P852_UART_ENABLE | P852_UART_DB) << P852_UARTD_SHIFT) | ((P852_UART_ENABLE | P852_UART_HS) << P852_UARTB_SHIFT) | ((P852_UART_ENABLE | P852_UART_HS | P852_UART_ALT_PIN_CFG) << P852_UARTA_SHIFT); } static inline void p852_sku1_c0x_display_init(void) { p852_sku_peripherals |= P852_SKU_DISPLAY_ENABLE; p852_display_peripherals |= (P852_DISP_ENABLE << P852_DISPB_SHIFT); } static inline void p852_sku1_c0x_ulpi_init(void) { p852_sku_peripherals |= P852_SKU_ULPI_DISABLE; } static inline void p852_sku1_c0x_i2c_init(void) { p852_sku_peripherals |= P852_SKU_I2C_ENABLE; p852_i2c_peripherals |= ((P852_I2C_ENABLE) << P852_I2C1_SHIFT) | ((P852_I2C_ENABLE) << P852_I2C2_SHIFT) | ((P852_I2C_ENABLE) << P852_I2C3_SHIFT) | ((P852_I2C_ENABLE) << P852_I2C4_SHIFT); } void __init p852_sku1_c0x_init(void) { p852_sku_peripherals |= P852_SKU_NOR_ENABLE; p852_sku1_c0x_spi_init(); p852_sku1_c0x_i2s_init(); p852_sku1_c0x_uart_init(); p852_sku1_c0x_sdhci_init(); p852_sku1_c0x_i2c_init(); p852_sku1_c0x_display_init(); p852_sku1_c0x_ulpi_init(); p852_common_init(); }
gpl-2.0
kingklick/kk-incredible-kernel
drivers/md/dm-crypt.c
406
33559
/* * Copyright (C) 2003 Christophe Saout <christophe@saout.de> * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ #include <linux/completion.h> #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/mempool.h> #include <linux/slab.h> #include <linux/crypto.h> #include <linux/workqueue.h> #include <linux/backing-dev.h> #include <asm/atomic.h> #include <linux/scatterlist.h> #include <asm/page.h> #include <asm/unaligned.h> #include <linux/device-mapper.h> #define DM_MSG_PREFIX "crypt" #define MESG_STR(x) x, sizeof(x) /* * context holding the current state of a multi-part conversion */ struct convert_context { struct completion restart; struct bio *bio_in; struct bio *bio_out; unsigned int offset_in; unsigned int offset_out; unsigned int idx_in; unsigned int idx_out; sector_t sector; atomic_t pending; }; /* * per bio private data */ struct dm_crypt_io { struct dm_target *target; struct bio *base_bio; struct work_struct work; struct convert_context ctx; atomic_t pending; int error; sector_t sector; struct dm_crypt_io *base_io; }; struct dm_crypt_request { struct convert_context *ctx; struct scatterlist sg_in; struct scatterlist sg_out; }; struct crypt_config; struct crypt_iv_operations { int (*ctr)(struct crypt_config *cc, struct dm_target *ti, const char *opts); void (*dtr)(struct crypt_config *cc); int (*init)(struct crypt_config *cc); int (*wipe)(struct crypt_config *cc); int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); }; struct iv_essiv_private { struct crypto_cipher *tfm; struct crypto_hash *hash_tfm; u8 *salt; }; struct iv_benbi_private { int shift; }; /* * Crypt: maps a linear range of a block device * and encrypts / decrypts at the same time. */ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; struct crypt_config { struct dm_dev *dev; sector_t start; /* * pool for per bio private data, crypto requests and * encryption requeusts/buffer pages */ mempool_t *io_pool; mempool_t *req_pool; mempool_t *page_pool; struct bio_set *bs; struct workqueue_struct *io_queue; struct workqueue_struct *crypt_queue; /* * crypto related data */ struct crypt_iv_operations *iv_gen_ops; char *iv_mode; union { struct iv_essiv_private essiv; struct iv_benbi_private benbi; } iv_gen_private; sector_t iv_offset; unsigned int iv_size; /* * Layout of each crypto request: * * struct ablkcipher_request * context * padding * struct dm_crypt_request * padding * IV * * The padding is added so that dm_crypt_request and the IV are * correctly aligned. */ unsigned int dmreq_start; struct ablkcipher_request *req; char cipher[CRYPTO_MAX_ALG_NAME]; char chainmode[CRYPTO_MAX_ALG_NAME]; struct crypto_ablkcipher *tfm; unsigned long flags; unsigned int key_size; u8 key[0]; }; #define MIN_IOS 16 #define MIN_POOL_PAGES 32 #define MIN_BIO_PAGES 8 static struct kmem_cache *_crypt_io_pool; static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); /* * Different IV generation algorithms: * * plain: the initial vector is the 32-bit little-endian version of the sector * number, padded with zeros if necessary. * * essiv: "encrypted sector|salt initial vector", the sector number is * encrypted with the bulk cipher using a salt as key. The salt * should be derived from the bulk cipher's key via hashing. * * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 * (needed for LRW-32-AES and possible other narrow block modes) * * null: the initial vector is always zero. Provides compatibility with * obsolete loop_fish2 devices. Do not use for new devices. * * plumb: unimplemented, see: * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 */ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { memset(iv, 0, cc->iv_size); *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); return 0; } /* Initialise ESSIV - compute salt but no local memory allocations */ static int crypt_iv_essiv_init(struct crypt_config *cc) { struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; struct hash_desc desc; struct scatterlist sg; int err; sg_init_one(&sg, cc->key, cc->key_size); desc.tfm = essiv->hash_tfm; desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); if (err) return err; return crypto_cipher_setkey(essiv->tfm, essiv->salt, crypto_hash_digestsize(essiv->hash_tfm)); } /* Wipe salt and reset key derived from volume key */ static int crypt_iv_essiv_wipe(struct crypt_config *cc) { struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); memset(essiv->salt, 0, salt_size); return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); } static void crypt_iv_essiv_dtr(struct crypt_config *cc) { struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; crypto_free_cipher(essiv->tfm); essiv->tfm = NULL; crypto_free_hash(essiv->hash_tfm); essiv->hash_tfm = NULL; kzfree(essiv->salt); essiv->salt = NULL; } static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { struct crypto_cipher *essiv_tfm = NULL; struct crypto_hash *hash_tfm = NULL; u8 *salt = NULL; int err; if (!opts) { ti->error = "Digest algorithm missing for ESSIV mode"; return -EINVAL; } /* Allocate hash algorithm */ hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash_tfm)) { ti->error = "Error initializing ESSIV hash"; err = PTR_ERR(hash_tfm); goto bad; } salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); if (!salt) { ti->error = "Error kmallocing salt storage in ESSIV"; err = -ENOMEM; goto bad; } /* Allocate essiv_tfm */ essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(essiv_tfm)) { ti->error = "Error allocating crypto tfm for ESSIV"; err = PTR_ERR(essiv_tfm); goto bad; } if (crypto_cipher_blocksize(essiv_tfm) != crypto_ablkcipher_ivsize(cc->tfm)) { ti->error = "Block size of ESSIV cipher does " "not match IV size of block cipher"; err = -EINVAL; goto bad; } cc->iv_gen_private.essiv.salt = salt; cc->iv_gen_private.essiv.tfm = essiv_tfm; cc->iv_gen_private.essiv.hash_tfm = hash_tfm; return 0; bad: if (essiv_tfm && !IS_ERR(essiv_tfm)) crypto_free_cipher(essiv_tfm); if (hash_tfm && !IS_ERR(hash_tfm)) crypto_free_hash(hash_tfm); kfree(salt); return err; } static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { memset(iv, 0, cc->iv_size); *(u64 *)iv = cpu_to_le64(sector); crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv); return 0; } static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); int log = ilog2(bs); /* we need to calculate how far we must shift the sector count * to get the cipher block count, we use this shift in _gen */ if (1 << log != bs) { ti->error = "cypher blocksize is not a power of 2"; return -EINVAL; } if (log > 9) { ti->error = "cypher blocksize is > 512"; return -EINVAL; } cc->iv_gen_private.benbi.shift = 9 - log; return 0; } static void crypt_iv_benbi_dtr(struct crypt_config *cc) { } static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { __be64 val; memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1); put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); return 0; } static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { memset(iv, 0, cc->iv_size); return 0; } static struct crypt_iv_operations crypt_iv_plain_ops = { .generator = crypt_iv_plain_gen }; static struct crypt_iv_operations crypt_iv_essiv_ops = { .ctr = crypt_iv_essiv_ctr, .dtr = crypt_iv_essiv_dtr, .init = crypt_iv_essiv_init, .wipe = crypt_iv_essiv_wipe, .generator = crypt_iv_essiv_gen }; static struct crypt_iv_operations crypt_iv_benbi_ops = { .ctr = crypt_iv_benbi_ctr, .dtr = crypt_iv_benbi_dtr, .generator = crypt_iv_benbi_gen }; static struct crypt_iv_operations crypt_iv_null_ops = { .generator = crypt_iv_null_gen }; static void crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, struct bio *bio_out, struct bio *bio_in, sector_t sector) { ctx->bio_in = bio_in; ctx->bio_out = bio_out; ctx->offset_in = 0; ctx->offset_out = 0; ctx->idx_in = bio_in ? bio_in->bi_idx : 0; ctx->idx_out = bio_out ? bio_out->bi_idx : 0; ctx->sector = sector + cc->iv_offset; init_completion(&ctx->restart); } static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, struct ablkcipher_request *req) { return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); } static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq) { return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); } static int crypt_convert_block(struct crypt_config *cc, struct convert_context *ctx, struct ablkcipher_request *req) { struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); struct dm_crypt_request *dmreq; u8 *iv; int r = 0; dmreq = dmreq_of_req(cc, req); iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), crypto_ablkcipher_alignmask(cc->tfm) + 1); dmreq->ctx = ctx; sg_init_table(&dmreq->sg_in, 1); sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in); sg_init_table(&dmreq->sg_out, 1); sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out); ctx->offset_in += 1 << SECTOR_SHIFT; if (ctx->offset_in >= bv_in->bv_len) { ctx->offset_in = 0; ctx->idx_in++; } ctx->offset_out += 1 << SECTOR_SHIFT; if (ctx->offset_out >= bv_out->bv_len) { ctx->offset_out = 0; ctx->idx_out++; } if (cc->iv_gen_ops) { r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); if (r < 0) return r; } ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, 1 << SECTOR_SHIFT, iv); if (bio_data_dir(ctx->bio_in) == WRITE) r = crypto_ablkcipher_encrypt(req); else r = crypto_ablkcipher_decrypt(req); return r; } static void kcryptd_async_done(struct crypto_async_request *async_req, int error); static void crypt_alloc_req(struct crypt_config *cc, struct convert_context *ctx) { if (!cc->req) cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); ablkcipher_request_set_tfm(cc->req, cc->tfm); ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, kcryptd_async_done, dmreq_of_req(cc, cc->req)); } /* * Encrypt / decrypt data from one bio to another one (can be the same one) */ static int crypt_convert(struct crypt_config *cc, struct convert_context *ctx) { int r; atomic_set(&ctx->pending, 1); while(ctx->idx_in < ctx->bio_in->bi_vcnt && ctx->idx_out < ctx->bio_out->bi_vcnt) { crypt_alloc_req(cc, ctx); atomic_inc(&ctx->pending); r = crypt_convert_block(cc, ctx, cc->req); switch (r) { /* async */ case -EBUSY: wait_for_completion(&ctx->restart); INIT_COMPLETION(ctx->restart); /* fall through*/ case -EINPROGRESS: cc->req = NULL; ctx->sector++; continue; /* sync */ case 0: atomic_dec(&ctx->pending); ctx->sector++; cond_resched(); continue; /* error */ default: atomic_dec(&ctx->pending); return r; } } return 0; } static void dm_crypt_bio_destructor(struct bio *bio) { struct dm_crypt_io *io = bio->bi_private; struct crypt_config *cc = io->target->private; bio_free(bio, cc->bs); } /* * Generate a new unfragmented bio with the given size * This should never violate the device limitations * May return a smaller bio when running out of pages, indicated by * *out_of_pages set to 1. */ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, unsigned *out_of_pages) { struct crypt_config *cc = io->target->private; struct bio *clone; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; unsigned i, len; struct page *page; clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); if (!clone) return NULL; clone_init(io, clone); *out_of_pages = 0; for (i = 0; i < nr_iovecs; i++) { page = mempool_alloc(cc->page_pool, gfp_mask); if (!page) { *out_of_pages = 1; break; } /* * if additional pages cannot be allocated without waiting, * return a partially allocated bio, the caller will then try * to allocate additional bios while submitting this partial bio */ if (i == (MIN_BIO_PAGES - 1)) gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; len = (size > PAGE_SIZE) ? PAGE_SIZE : size; if (!bio_add_page(clone, page, len, 0)) { mempool_free(page, cc->page_pool); break; } size -= len; } if (!clone->bi_size) { bio_put(clone); return NULL; } return clone; } static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) { unsigned int i; struct bio_vec *bv; for (i = 0; i < clone->bi_vcnt; i++) { bv = bio_iovec_idx(clone, i); BUG_ON(!bv->bv_page); mempool_free(bv->bv_page, cc->page_pool); bv->bv_page = NULL; } } static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, struct bio *bio, sector_t sector) { struct crypt_config *cc = ti->private; struct dm_crypt_io *io; io = mempool_alloc(cc->io_pool, GFP_NOIO); io->target = ti; io->base_bio = bio; io->sector = sector; io->error = 0; io->base_io = NULL; atomic_set(&io->pending, 0); return io; } static void crypt_inc_pending(struct dm_crypt_io *io) { atomic_inc(&io->pending); } /* * One of the bios was finished. Check for completion of * the whole request and correctly clean up the buffer. * If base_io is set, wait for the last fragment to complete. */ static void crypt_dec_pending(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; struct bio *base_bio = io->base_bio; struct dm_crypt_io *base_io = io->base_io; int error = io->error; if (!atomic_dec_and_test(&io->pending)) return; mempool_free(io, cc->io_pool); if (likely(!base_io)) bio_endio(base_bio, error); else { if (error && !base_io->error) base_io->error = error; crypt_dec_pending(base_io); } } /* * kcryptd/kcryptd_io: * * Needed because it would be very unwise to do decryption in an * interrupt context. * * kcryptd performs the actual encryption or decryption. * * kcryptd_io performs the IO submission. * * They must be separated as otherwise the final stages could be * starved by new requests which can block in the first stages due * to memory allocation. */ static void crypt_endio(struct bio *clone, int error) { struct dm_crypt_io *io = clone->bi_private; struct crypt_config *cc = io->target->private; unsigned rw = bio_data_dir(clone); if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) error = -EIO; /* * free the processed pages */ if (rw == WRITE) crypt_free_buffer_pages(cc, clone); bio_put(clone); if (rw == READ && !error) { kcryptd_queue_crypt(io); return; } if (unlikely(error)) io->error = error; crypt_dec_pending(io); } static void clone_init(struct dm_crypt_io *io, struct bio *clone) { struct crypt_config *cc = io->target->private; clone->bi_private = io; clone->bi_end_io = crypt_endio; clone->bi_bdev = cc->dev->bdev; clone->bi_rw = io->base_bio->bi_rw; clone->bi_destructor = dm_crypt_bio_destructor; } static void kcryptd_io_read(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; struct bio *base_bio = io->base_bio; struct bio *clone; crypt_inc_pending(io); /* * The block layer might modify the bvec array, so always * copy the required bvecs because we need the original * one in order to decrypt the whole bio data *afterwards*. */ clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); if (unlikely(!clone)) { io->error = -ENOMEM; crypt_dec_pending(io); return; } clone_init(io, clone); clone->bi_idx = 0; clone->bi_vcnt = bio_segments(base_bio); clone->bi_size = base_bio->bi_size; clone->bi_sector = cc->start + io->sector; memcpy(clone->bi_io_vec, bio_iovec(base_bio), sizeof(struct bio_vec) * clone->bi_vcnt); generic_make_request(clone); } static void kcryptd_io_write(struct dm_crypt_io *io) { struct bio *clone = io->ctx.bio_out; generic_make_request(clone); } static void kcryptd_io(struct work_struct *work) { struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); if (bio_data_dir(io->base_bio) == READ) kcryptd_io_read(io); else kcryptd_io_write(io); } static void kcryptd_queue_io(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; INIT_WORK(&io->work, kcryptd_io); queue_work(cc->io_queue, &io->work); } static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error, int async) { struct bio *clone = io->ctx.bio_out; struct crypt_config *cc = io->target->private; if (unlikely(error < 0)) { crypt_free_buffer_pages(cc, clone); bio_put(clone); io->error = -EIO; crypt_dec_pending(io); return; } /* crypt_convert should have filled the clone bio */ BUG_ON(io->ctx.idx_out < clone->bi_vcnt); clone->bi_sector = cc->start + io->sector; if (async) kcryptd_queue_io(io); else generic_make_request(clone); } static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; struct bio *clone; struct dm_crypt_io *new_io; int crypt_finished; unsigned out_of_pages = 0; unsigned remaining = io->base_bio->bi_size; sector_t sector = io->sector; int r; /* * Prevent io from disappearing until this function completes. */ crypt_inc_pending(io); crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); /* * The allocated buffers can be smaller than the whole bio, * so repeat the whole process until all the data can be handled. */ while (remaining) { clone = crypt_alloc_buffer(io, remaining, &out_of_pages); if (unlikely(!clone)) { io->error = -ENOMEM; break; } io->ctx.bio_out = clone; io->ctx.idx_out = 0; remaining -= clone->bi_size; sector += bio_sectors(clone); crypt_inc_pending(io); r = crypt_convert(cc, &io->ctx); crypt_finished = atomic_dec_and_test(&io->ctx.pending); /* Encryption was already finished, submit io now */ if (crypt_finished) { kcryptd_crypt_write_io_submit(io, r, 0); /* * If there was an error, do not try next fragments. * For async, error is processed in async handler. */ if (unlikely(r < 0)) break; io->sector = sector; } /* * Out of memory -> run queues * But don't wait if split was due to the io size restriction */ if (unlikely(out_of_pages)) congestion_wait(BLK_RW_ASYNC, HZ/100); /* * With async crypto it is unsafe to share the crypto context * between fragments, so switch to a new dm_crypt_io structure. */ if (unlikely(!crypt_finished && remaining)) { new_io = crypt_io_alloc(io->target, io->base_bio, sector); crypt_inc_pending(new_io); crypt_convert_init(cc, &new_io->ctx, NULL, io->base_bio, sector); new_io->ctx.idx_in = io->ctx.idx_in; new_io->ctx.offset_in = io->ctx.offset_in; /* * Fragments after the first use the base_io * pending count. */ if (!io->base_io) new_io->base_io = io; else { new_io->base_io = io->base_io; crypt_inc_pending(io->base_io); crypt_dec_pending(io); } io = new_io; } } crypt_dec_pending(io); } static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) { if (unlikely(error < 0)) io->error = -EIO; crypt_dec_pending(io); } static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; int r = 0; crypt_inc_pending(io); crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, io->sector); r = crypt_convert(cc, &io->ctx); if (atomic_dec_and_test(&io->ctx.pending)) kcryptd_crypt_read_done(io, r); crypt_dec_pending(io); } static void kcryptd_async_done(struct crypto_async_request *async_req, int error) { struct dm_crypt_request *dmreq = async_req->data; struct convert_context *ctx = dmreq->ctx; struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); struct crypt_config *cc = io->target->private; if (error == -EINPROGRESS) { complete(&ctx->restart); return; } mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); if (!atomic_dec_and_test(&ctx->pending)) return; if (bio_data_dir(io->base_bio) == READ) kcryptd_crypt_read_done(io, error); else kcryptd_crypt_write_io_submit(io, error, 1); } static void kcryptd_crypt(struct work_struct *work) { struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); if (bio_data_dir(io->base_bio) == READ) kcryptd_crypt_read_convert(io); else kcryptd_crypt_write_convert(io); } static void kcryptd_queue_crypt(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; INIT_WORK(&io->work, kcryptd_crypt); queue_work(cc->crypt_queue, &io->work); } /* * Decode key from its hex representation */ static int crypt_decode_key(u8 *key, char *hex, unsigned int size) { char buffer[3]; char *endp; unsigned int i; buffer[2] = '\0'; for (i = 0; i < size; i++) { buffer[0] = *hex++; buffer[1] = *hex++; key[i] = (u8)simple_strtoul(buffer, &endp, 16); if (endp != &buffer[2]) return -EINVAL; } if (*hex != '\0') return -EINVAL; return 0; } /* * Encode key into its hex representation */ static void crypt_encode_key(char *hex, u8 *key, unsigned int size) { unsigned int i; for (i = 0; i < size; i++) { sprintf(hex, "%02x", *key); hex += 2; key++; } } static int crypt_set_key(struct crypt_config *cc, char *key) { unsigned key_size = strlen(key) >> 1; if (cc->key_size && cc->key_size != key_size) return -EINVAL; cc->key_size = key_size; /* initial settings */ if ((!key_size && strcmp(key, "-")) || (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) return -EINVAL; set_bit(DM_CRYPT_KEY_VALID, &cc->flags); return 0; } static int crypt_wipe_key(struct crypt_config *cc) { clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); memset(&cc->key, 0, cc->key_size * sizeof(u8)); return 0; } /* * Construct an encryption mapping: * <cipher> <key> <iv_offset> <dev_path> <start> */ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct crypt_config *cc; struct crypto_ablkcipher *tfm; char *tmp; char *cipher; char *chainmode; char *ivmode; char *ivopts; unsigned int key_size; unsigned long long tmpll; if (argc != 5) { ti->error = "Not enough arguments"; return -EINVAL; } tmp = argv[0]; cipher = strsep(&tmp, "-"); chainmode = strsep(&tmp, "-"); ivopts = strsep(&tmp, "-"); ivmode = strsep(&ivopts, ":"); if (tmp) DMWARN("Unexpected additional cipher options"); key_size = strlen(argv[1]) >> 1; cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); if (cc == NULL) { ti->error = "Cannot allocate transparent encryption context"; return -ENOMEM; } if (crypt_set_key(cc, argv[1])) { ti->error = "Error decoding key"; goto bad_cipher; } /* Compatiblity mode for old dm-crypt cipher strings */ if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { chainmode = "cbc"; ivmode = "plain"; } if (strcmp(chainmode, "ecb") && !ivmode) { ti->error = "This chaining mode requires an IV mechanism"; goto bad_cipher; } if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) { ti->error = "Chain mode + cipher name is too long"; goto bad_cipher; } tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0); if (IS_ERR(tfm)) { ti->error = "Error allocating crypto tfm"; goto bad_cipher; } strcpy(cc->cipher, cipher); strcpy(cc->chainmode, chainmode); cc->tfm = tfm; /* * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi". * See comments at iv code */ if (ivmode == NULL) cc->iv_gen_ops = NULL; else if (strcmp(ivmode, "plain") == 0) cc->iv_gen_ops = &crypt_iv_plain_ops; else if (strcmp(ivmode, "essiv") == 0) cc->iv_gen_ops = &crypt_iv_essiv_ops; else if (strcmp(ivmode, "benbi") == 0) cc->iv_gen_ops = &crypt_iv_benbi_ops; else if (strcmp(ivmode, "null") == 0) cc->iv_gen_ops = &crypt_iv_null_ops; else { ti->error = "Invalid IV mode"; goto bad_ivmode; } if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) goto bad_ivmode; if (cc->iv_gen_ops && cc->iv_gen_ops->init && cc->iv_gen_ops->init(cc) < 0) { ti->error = "Error initialising IV"; goto bad_slab_pool; } cc->iv_size = crypto_ablkcipher_ivsize(tfm); if (cc->iv_size) /* at least a 64 bit sector number should fit in our buffer */ cc->iv_size = max(cc->iv_size, (unsigned int)(sizeof(u64) / sizeof(u8))); else { if (cc->iv_gen_ops) { DMWARN("Selected cipher does not support IVs"); if (cc->iv_gen_ops->dtr) cc->iv_gen_ops->dtr(cc); cc->iv_gen_ops = NULL; } } cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); if (!cc->io_pool) { ti->error = "Cannot allocate crypt io mempool"; goto bad_slab_pool; } cc->dmreq_start = sizeof(struct ablkcipher_request); cc->dmreq_start += crypto_ablkcipher_reqsize(tfm); cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + sizeof(struct dm_crypt_request) + cc->iv_size); if (!cc->req_pool) { ti->error = "Cannot allocate crypt request mempool"; goto bad_req_pool; } cc->req = NULL; cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); if (!cc->page_pool) { ti->error = "Cannot allocate page mempool"; goto bad_page_pool; } cc->bs = bioset_create(MIN_IOS, 0); if (!cc->bs) { ti->error = "Cannot allocate crypt bioset"; goto bad_bs; } if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) { ti->error = "Error setting key"; goto bad_device; } if (sscanf(argv[2], "%llu", &tmpll) != 1) { ti->error = "Invalid iv_offset sector"; goto bad_device; } cc->iv_offset = tmpll; if (sscanf(argv[4], "%llu", &tmpll) != 1) { ti->error = "Invalid device sector"; goto bad_device; } cc->start = tmpll; if (dm_get_device(ti, argv[3], cc->start, ti->len, dm_table_get_mode(ti->table), &cc->dev)) { ti->error = "Device lookup failed"; goto bad_device; } if (ivmode && cc->iv_gen_ops) { if (ivopts) *(ivopts - 1) = ':'; cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); if (!cc->iv_mode) { ti->error = "Error kmallocing iv_mode string"; goto bad_ivmode_string; } strcpy(cc->iv_mode, ivmode); } else cc->iv_mode = NULL; cc->io_queue = create_singlethread_workqueue("kcryptd_io"); if (!cc->io_queue) { ti->error = "Couldn't create kcryptd io queue"; goto bad_io_queue; } cc->crypt_queue = create_singlethread_workqueue("kcryptd"); if (!cc->crypt_queue) { ti->error = "Couldn't create kcryptd queue"; goto bad_crypt_queue; } ti->num_flush_requests = 1; ti->private = cc; return 0; bad_crypt_queue: destroy_workqueue(cc->io_queue); bad_io_queue: kfree(cc->iv_mode); bad_ivmode_string: dm_put_device(ti, cc->dev); bad_device: bioset_free(cc->bs); bad_bs: mempool_destroy(cc->page_pool); bad_page_pool: mempool_destroy(cc->req_pool); bad_req_pool: mempool_destroy(cc->io_pool); bad_slab_pool: if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) cc->iv_gen_ops->dtr(cc); bad_ivmode: crypto_free_ablkcipher(tfm); bad_cipher: /* Must zero key material before freeing */ kzfree(cc); return -EINVAL; } static void crypt_dtr(struct dm_target *ti) { struct crypt_config *cc = (struct crypt_config *) ti->private; destroy_workqueue(cc->io_queue); destroy_workqueue(cc->crypt_queue); if (cc->req) mempool_free(cc->req, cc->req_pool); bioset_free(cc->bs); mempool_destroy(cc->page_pool); mempool_destroy(cc->req_pool); mempool_destroy(cc->io_pool); kfree(cc->iv_mode); if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) cc->iv_gen_ops->dtr(cc); crypto_free_ablkcipher(cc->tfm); dm_put_device(ti, cc->dev); /* Must zero key material before freeing */ kzfree(cc); } static int crypt_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { struct dm_crypt_io *io; struct crypt_config *cc; if (unlikely(bio_empty_barrier(bio))) { cc = ti->private; bio->bi_bdev = cc->dev->bdev; return DM_MAPIO_REMAPPED; } io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin); if (bio_data_dir(io->base_bio) == READ) kcryptd_queue_io(io); else kcryptd_queue_crypt(io); return DM_MAPIO_SUBMITTED; } static int crypt_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen) { struct crypt_config *cc = (struct crypt_config *) ti->private; unsigned int sz = 0; switch (type) { case STATUSTYPE_INFO: result[0] = '\0'; break; case STATUSTYPE_TABLE: if (cc->iv_mode) DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode, cc->iv_mode); else DMEMIT("%s-%s ", cc->cipher, cc->chainmode); if (cc->key_size > 0) { if ((maxlen - sz) < ((cc->key_size << 1) + 1)) return -ENOMEM; crypt_encode_key(result + sz, cc->key, cc->key_size); sz += cc->key_size << 1; } else { if (sz >= maxlen) return -ENOMEM; result[sz++] = '-'; } DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, cc->dev->name, (unsigned long long)cc->start); break; } return 0; } static void crypt_postsuspend(struct dm_target *ti) { struct crypt_config *cc = ti->private; set_bit(DM_CRYPT_SUSPENDED, &cc->flags); } static int crypt_preresume(struct dm_target *ti) { struct crypt_config *cc = ti->private; if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { DMERR("aborting resume - crypt key is not set."); return -EAGAIN; } return 0; } static void crypt_resume(struct dm_target *ti) { struct crypt_config *cc = ti->private; clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); } /* Message interface * key set <key> * key wipe */ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) { struct crypt_config *cc = ti->private; int ret = -EINVAL; if (argc < 2) goto error; if (!strnicmp(argv[0], MESG_STR("key"))) { if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { DMWARN("not suspended during key manipulation."); return -EINVAL; } if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { ret = crypt_set_key(cc, argv[2]); if (ret) return ret; if (cc->iv_gen_ops && cc->iv_gen_ops->init) ret = cc->iv_gen_ops->init(cc); return ret; } if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { ret = cc->iv_gen_ops->wipe(cc); if (ret) return ret; } return crypt_wipe_key(cc); } } error: DMWARN("unrecognised message received."); return -EINVAL; } static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, struct bio_vec *biovec, int max_size) { struct crypt_config *cc = ti->private; struct request_queue *q = bdev_get_queue(cc->dev->bdev); if (!q->merge_bvec_fn) return max_size; bvm->bi_bdev = cc->dev->bdev; bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin; return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); } static int crypt_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct crypt_config *cc = ti->private; return fn(ti, cc->dev, cc->start, ti->len, data); } static struct target_type crypt_target = { .name = "crypt", .version = {1, 7, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, .map = crypt_map, .status = crypt_status, .postsuspend = crypt_postsuspend, .preresume = crypt_preresume, .resume = crypt_resume, .message = crypt_message, .merge = crypt_merge, .iterate_devices = crypt_iterate_devices, }; static int __init dm_crypt_init(void) { int r; _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); if (!_crypt_io_pool) return -ENOMEM; r = dm_register_target(&crypt_target); if (r < 0) { DMERR("register failed %d", r); kmem_cache_destroy(_crypt_io_pool); } return r; } static void __exit dm_crypt_exit(void) { dm_unregister_target(&crypt_target); kmem_cache_destroy(_crypt_io_pool); } module_init(dm_crypt_init); module_exit(dm_crypt_exit); MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); MODULE_LICENSE("GPL");
gpl-2.0
lollipop-og/android_kernel_geehrc
kernel/resource.c
662
28523
/* * linux/kernel/resource.c * * Copyright (C) 1999 Linus Torvalds * Copyright (C) 1999 Martin Mares <mj@ucw.cz> * * Arbitrary resource management. */ #include <linux/export.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/device.h> #include <linux/pfn.h> #include <asm/io.h> struct resource ioport_resource = { .name = "PCI IO", .start = 0, .end = IO_SPACE_LIMIT, .flags = IORESOURCE_IO, }; EXPORT_SYMBOL(ioport_resource); struct resource iomem_resource = { .name = "PCI mem", .start = 0, .end = -1, .flags = IORESOURCE_MEM, }; EXPORT_SYMBOL(iomem_resource); /* constraints to be met while allocating resources */ struct resource_constraint { resource_size_t min, max, align; resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t); void *alignf_data; }; static DEFINE_RWLOCK(resource_lock); static void *r_next(struct seq_file *m, void *v, loff_t *pos) { struct resource *p = v; (*pos)++; if (p->child) return p->child; while (!p->sibling && p->parent) p = p->parent; return p->sibling; } #ifdef CONFIG_PROC_FS enum { MAX_IORES_LEVEL = 5 }; static void *r_start(struct seq_file *m, loff_t *pos) __acquires(resource_lock) { struct resource *p = m->private; loff_t l = 0; read_lock(&resource_lock); for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) ; return p; } static void r_stop(struct seq_file *m, void *v) __releases(resource_lock) { read_unlock(&resource_lock); } static int r_show(struct seq_file *m, void *v) { struct resource *root = m->private; struct resource *r = v, *p; int width = root->end < 0x10000 ? 4 : 8; int depth; for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) if (p->parent == root) break; seq_printf(m, "%*s%0*llx-%0*llx : %s\n", depth * 2, "", width, (unsigned long long) r->start, width, (unsigned long long) r->end, r->name ? r->name : "<BAD>"); return 0; } static const struct seq_operations resource_op = { .start = r_start, .next = r_next, .stop = r_stop, .show = r_show, }; static int ioports_open(struct inode *inode, struct file *file) { int res = seq_open(file, &resource_op); if (!res) { struct seq_file *m = file->private_data; m->private = &ioport_resource; } return res; } static int iomem_open(struct inode *inode, struct file *file) { int res = seq_open(file, &resource_op); if (!res) { struct seq_file *m = file->private_data; m->private = &iomem_resource; } return res; } static const struct file_operations proc_ioports_operations = { .open = ioports_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations proc_iomem_operations = { .open = iomem_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init ioresources_init(void) { proc_create("ioports", 0, NULL, &proc_ioports_operations); proc_create("iomem", 0, NULL, &proc_iomem_operations); return 0; } __initcall(ioresources_init); #endif /* CONFIG_PROC_FS */ /* Return the conflict entry if you can't request it */ static struct resource * __request_resource(struct resource *root, struct resource *new) { resource_size_t start = new->start; resource_size_t end = new->end; struct resource *tmp, **p; if (end < start) return root; if (start < root->start) return root; if (end > root->end) return root; p = &root->child; for (;;) { tmp = *p; if (!tmp || tmp->start > end) { new->sibling = tmp; *p = new; new->parent = root; return NULL; } p = &tmp->sibling; if (tmp->end < start) continue; return tmp; } } static int __release_resource(struct resource *old) { struct resource *tmp, **p; p = &old->parent->child; for (;;) { tmp = *p; if (!tmp) break; if (tmp == old) { *p = tmp->sibling; old->parent = NULL; return 0; } p = &tmp->sibling; } return -EINVAL; } static void __release_child_resources(struct resource *r) { struct resource *tmp, *p; resource_size_t size; p = r->child; r->child = NULL; while (p) { tmp = p; p = p->sibling; tmp->parent = NULL; tmp->sibling = NULL; __release_child_resources(tmp); printk(KERN_DEBUG "release child resource %pR\n", tmp); /* need to restore size, and keep flags */ size = resource_size(tmp); tmp->start = 0; tmp->end = size - 1; } } void release_child_resources(struct resource *r) { write_lock(&resource_lock); __release_child_resources(r); write_unlock(&resource_lock); } /** * request_resource_conflict - request and reserve an I/O or memory resource * @root: root resource descriptor * @new: resource descriptor desired by caller * * Returns 0 for success, conflict resource on error. */ struct resource *request_resource_conflict(struct resource *root, struct resource *new) { struct resource *conflict; write_lock(&resource_lock); conflict = __request_resource(root, new); write_unlock(&resource_lock); return conflict; } /** * request_resource - request and reserve an I/O or memory resource * @root: root resource descriptor * @new: resource descriptor desired by caller * * Returns 0 for success, negative error code on error. */ int request_resource(struct resource *root, struct resource *new) { struct resource *conflict; conflict = request_resource_conflict(root, new); return conflict ? -EBUSY : 0; } EXPORT_SYMBOL(request_resource); /** * locate_resource - locate an already reserved I/O or memory resource * @root: root resource descriptor * @search: resource descriptor to be located * * Returns pointer to desired resource or NULL if not found. */ struct resource *locate_resource(struct resource *root, struct resource *search) { struct resource *found; write_lock(&resource_lock); found = __request_resource(root, search); write_unlock(&resource_lock); return found; } EXPORT_SYMBOL(locate_resource); /** * release_resource - release a previously reserved resource * @old: resource pointer */ int release_resource(struct resource *old) { int retval; write_lock(&resource_lock); retval = __release_resource(old); write_unlock(&resource_lock); return retval; } EXPORT_SYMBOL(release_resource); #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) /* * Finds the lowest memory reosurce exists within [res->start.res->end) * the caller must specify res->start, res->end, res->flags and "name". * If found, returns 0, res is overwritten, if not found, returns -1. */ static int find_next_system_ram(struct resource *res, char *name) { resource_size_t start, end; struct resource *p; BUG_ON(!res); start = res->start; end = res->end; BUG_ON(start >= end); read_lock(&resource_lock); for (p = iomem_resource.child; p ; p = p->sibling) { /* system ram is just marked as IORESOURCE_MEM */ if (p->flags != res->flags) continue; if (name && strcmp(p->name, name)) continue; if (p->start > end) { p = NULL; break; } if ((p->end >= start) && (p->start < end)) break; } read_unlock(&resource_lock); if (!p) return -1; /* copy data */ if (res->start < p->start) res->start = p->start; if (res->end > p->end) res->end = p->end; return 0; } /* * This function calls callback against all memory range of "System RAM" * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY. * Now, this function is only for "System RAM". */ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)) { struct resource res; unsigned long pfn, end_pfn; u64 orig_end; int ret = -1; res.start = (u64) start_pfn << PAGE_SHIFT; res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; orig_end = res.end; while ((res.start < res.end) && (find_next_system_ram(&res, "System RAM") >= 0)) { pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; if (res.end + 1 <= 0) end_pfn = res.end >> PAGE_SHIFT; else end_pfn = (res.end + 1) >> PAGE_SHIFT; if (end_pfn > pfn) ret = (*func)(pfn, end_pfn - pfn, arg); if (ret) break; if (res.end + 1 > res.start) res.start = res.end + 1; else res.start = res.end; res.end = orig_end; } return ret; } #endif static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) { return 1; } /* * This generic page_is_ram() returns true if specified address is * registered as "System RAM" in iomem_resource list. */ int __weak page_is_ram(unsigned long pfn) { return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; } void __weak arch_remove_reservations(struct resource *avail) { } static resource_size_t simple_align_resource(void *data, const struct resource *avail, resource_size_t size, resource_size_t align) { return avail->start; } static void resource_clip(struct resource *res, resource_size_t min, resource_size_t max) { if (res->start < min) res->start = min; if (res->end > max) res->end = max; } static bool resource_contains(struct resource *res1, struct resource *res2) { return res1->start <= res2->start && res1->end >= res2->end; } /* * Find empty slot in the resource tree with the given range and * alignment constraints */ static int __find_resource(struct resource *root, struct resource *old, struct resource *new, resource_size_t size, struct resource_constraint *constraint) { struct resource *this = root->child; struct resource tmp = *new, avail, alloc; tmp.flags = new->flags; tmp.start = root->start; /* * Skip past an allocated resource that starts at 0, since the assignment * of this->start - 1 to tmp->end below would cause an underflow. */ if (this && this->start == root->start) { tmp.start = (this == old) ? old->start : this->end + 1; this = this->sibling; } for(;;) { if (this) tmp.end = (this == old) ? this->end : this->start - 1; else tmp.end = root->end; if (tmp.end < tmp.start) goto next; resource_clip(&tmp, constraint->min, constraint->max); arch_remove_reservations(&tmp); /* Check for overflow after ALIGN() */ avail = *new; avail.start = ALIGN(tmp.start, constraint->align); avail.end = tmp.end; if (avail.start >= tmp.start) { alloc.start = constraint->alignf(constraint->alignf_data, &avail, size, constraint->align); alloc.end = alloc.start + size - 1; if (resource_contains(&avail, &alloc)) { new->start = alloc.start; new->end = alloc.end; return 0; } } next: if (!this || this->end == root->end) break; if (this != old) tmp.start = this->end + 1; this = this->sibling; } return -EBUSY; } /* * Find empty slot in the resource tree given range and alignment. */ static int find_resource(struct resource *root, struct resource *new, resource_size_t size, struct resource_constraint *constraint) { return __find_resource(root, NULL, new, size, constraint); } /** * reallocate_resource - allocate a slot in the resource tree given range & alignment. * The resource will be relocated if the new size cannot be reallocated in the * current location. * * @root: root resource descriptor * @old: resource descriptor desired by caller * @newsize: new size of the resource descriptor * @constraint: the size and alignment constraints to be met. */ int reallocate_resource(struct resource *root, struct resource *old, resource_size_t newsize, struct resource_constraint *constraint) { int err=0; struct resource new = *old; struct resource *conflict; write_lock(&resource_lock); if ((err = __find_resource(root, old, &new, newsize, constraint))) goto out; if (resource_contains(&new, old)) { old->start = new.start; old->end = new.end; goto out; } if (old->child) { err = -EBUSY; goto out; } if (resource_contains(old, &new)) { old->start = new.start; old->end = new.end; } else { __release_resource(old); *old = new; conflict = __request_resource(root, old); BUG_ON(conflict); } out: write_unlock(&resource_lock); return err; } /** * allocate_resource - allocate empty slot in the resource tree given range & alignment. * The resource will be reallocated with a new size if it was already allocated * @root: root resource descriptor * @new: resource descriptor desired by caller * @size: requested resource region size * @min: minimum size to allocate * @max: maximum size to allocate * @align: alignment requested, in bytes * @alignf: alignment function, optional, called if not NULL * @alignf_data: arbitrary data to pass to the @alignf function */ int allocate_resource(struct resource *root, struct resource *new, resource_size_t size, resource_size_t min, resource_size_t max, resource_size_t align, resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t), void *alignf_data) { int err; struct resource_constraint constraint; if (!alignf) alignf = simple_align_resource; constraint.min = min; constraint.max = max; constraint.align = align; constraint.alignf = alignf; constraint.alignf_data = alignf_data; if ( new->parent ) { /* resource is already allocated, try reallocating with the new constraints */ return reallocate_resource(root, new, size, &constraint); } write_lock(&resource_lock); err = find_resource(root, new, size, &constraint); if (err >= 0 && __request_resource(root, new)) err = -EBUSY; write_unlock(&resource_lock); return err; } EXPORT_SYMBOL(allocate_resource); /** * lookup_resource - find an existing resource by a resource start address * @root: root resource descriptor * @start: resource start address * * Returns a pointer to the resource if found, NULL otherwise */ struct resource *lookup_resource(struct resource *root, resource_size_t start) { struct resource *res; read_lock(&resource_lock); for (res = root->child; res; res = res->sibling) { if (res->start == start) break; } read_unlock(&resource_lock); return res; } /* * Insert a resource into the resource tree. If successful, return NULL, * otherwise return the conflicting resource (compare to __request_resource()) */ static struct resource * __insert_resource(struct resource *parent, struct resource *new) { struct resource *first, *next; for (;; parent = first) { first = __request_resource(parent, new); if (!first) return first; if (first == parent) return first; if (WARN_ON(first == new)) /* duplicated insertion */ return first; if ((first->start > new->start) || (first->end < new->end)) break; if ((first->start == new->start) && (first->end == new->end)) break; } for (next = first; ; next = next->sibling) { /* Partial overlap? Bad, and unfixable */ if (next->start < new->start || next->end > new->end) return next; if (!next->sibling) break; if (next->sibling->start > new->end) break; } new->parent = parent; new->sibling = next->sibling; new->child = first; next->sibling = NULL; for (next = first; next; next = next->sibling) next->parent = new; if (parent->child == first) { parent->child = new; } else { next = parent->child; while (next->sibling != first) next = next->sibling; next->sibling = new; } return NULL; } /** * insert_resource_conflict - Inserts resource in the resource tree * @parent: parent of the new resource * @new: new resource to insert * * Returns 0 on success, conflict resource if the resource can't be inserted. * * This function is equivalent to request_resource_conflict when no conflict * happens. If a conflict happens, and the conflicting resources * entirely fit within the range of the new resource, then the new * resource is inserted and the conflicting resources become children of * the new resource. */ struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) { struct resource *conflict; write_lock(&resource_lock); conflict = __insert_resource(parent, new); write_unlock(&resource_lock); return conflict; } /** * insert_resource - Inserts a resource in the resource tree * @parent: parent of the new resource * @new: new resource to insert * * Returns 0 on success, -EBUSY if the resource can't be inserted. */ int insert_resource(struct resource *parent, struct resource *new) { struct resource *conflict; conflict = insert_resource_conflict(parent, new); return conflict ? -EBUSY : 0; } /** * insert_resource_expand_to_fit - Insert a resource into the resource tree * @root: root resource descriptor * @new: new resource to insert * * Insert a resource into the resource tree, possibly expanding it in order * to make it encompass any conflicting resources. */ void insert_resource_expand_to_fit(struct resource *root, struct resource *new) { if (new->parent) return; write_lock(&resource_lock); for (;;) { struct resource *conflict; conflict = __insert_resource(root, new); if (!conflict) break; if (conflict == root) break; /* Ok, expand resource to cover the conflict, then try again .. */ if (conflict->start < new->start) new->start = conflict->start; if (conflict->end > new->end) new->end = conflict->end; printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); } write_unlock(&resource_lock); } /** * adjust_resource - modify a resource's start and size * @res: resource to modify * @start: new start value * @size: new size * * Given an existing resource, change its start and size to match the * arguments. Returns 0 on success, -EBUSY if it can't fit. * Existing children of the resource are assumed to be immutable. */ int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size) { struct resource *tmp, *parent = res->parent; resource_size_t end = start + size - 1; int result = -EBUSY; write_lock(&resource_lock); if ((start < parent->start) || (end > parent->end)) goto out; for (tmp = res->child; tmp; tmp = tmp->sibling) { if ((tmp->start < start) || (tmp->end > end)) goto out; } if (res->sibling && (res->sibling->start <= end)) goto out; tmp = parent->child; if (tmp != res) { while (tmp->sibling != res) tmp = tmp->sibling; if (start <= tmp->end) goto out; } res->start = start; res->end = end; result = 0; out: write_unlock(&resource_lock); return result; } EXPORT_SYMBOL(adjust_resource); static void __init __reserve_region_with_split(struct resource *root, resource_size_t start, resource_size_t end, const char *name) { struct resource *parent = root; struct resource *conflict; struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC); struct resource *next_res = NULL; if (!res) return; res->name = name; res->start = start; res->end = end; res->flags = IORESOURCE_BUSY; while (1) { conflict = __request_resource(parent, res); if (!conflict) { if (!next_res) break; res = next_res; next_res = NULL; continue; } /* conflict covered whole area */ if (conflict->start <= res->start && conflict->end >= res->end) { kfree(res); WARN_ON(next_res); break; } /* failed, split and try again */ if (conflict->start > res->start) { end = res->end; res->end = conflict->start - 1; if (conflict->end < end) { next_res = kzalloc(sizeof(*next_res), GFP_ATOMIC); if (!next_res) { kfree(res); break; } next_res->name = name; next_res->start = conflict->end + 1; next_res->end = end; next_res->flags = IORESOURCE_BUSY; } } else { res->start = conflict->end + 1; } } } void __init reserve_region_with_split(struct resource *root, resource_size_t start, resource_size_t end, const char *name) { write_lock(&resource_lock); __reserve_region_with_split(root, start, end, name); write_unlock(&resource_lock); } /** * resource_alignment - calculate resource's alignment * @res: resource pointer * * Returns alignment on success, 0 (invalid alignment) on failure. */ resource_size_t resource_alignment(struct resource *res) { switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { case IORESOURCE_SIZEALIGN: return resource_size(res); case IORESOURCE_STARTALIGN: return res->start; default: return 0; } } /* * This is compatibility stuff for IO resources. * * Note how this, unlike the above, knows about * the IO flag meanings (busy etc). * * request_region creates a new busy region. * * check_region returns non-zero if the area is already busy. * * release_region releases a matching busy region. */ static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); /** * __request_region - create a new busy resource region * @parent: parent resource descriptor * @start: resource start address * @n: resource region size * @name: reserving caller's ID string * @flags: IO resource flags */ struct resource * __request_region(struct resource *parent, resource_size_t start, resource_size_t n, const char *name, int flags) { DECLARE_WAITQUEUE(wait, current); struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) return NULL; res->name = name; res->start = start; res->end = start + n - 1; res->flags = IORESOURCE_BUSY; res->flags |= flags; write_lock(&resource_lock); for (;;) { struct resource *conflict; conflict = __request_resource(parent, res); if (!conflict) break; if (conflict != parent) { parent = conflict; if (!(conflict->flags & IORESOURCE_BUSY)) continue; } if (conflict->flags & flags & IORESOURCE_MUXED) { add_wait_queue(&muxed_resource_wait, &wait); write_unlock(&resource_lock); set_current_state(TASK_UNINTERRUPTIBLE); schedule(); remove_wait_queue(&muxed_resource_wait, &wait); write_lock(&resource_lock); continue; } /* Uhhuh, that didn't work out.. */ kfree(res); res = NULL; break; } write_unlock(&resource_lock); return res; } EXPORT_SYMBOL(__request_region); /** * __check_region - check if a resource region is busy or free * @parent: parent resource descriptor * @start: resource start address * @n: resource region size * * Returns 0 if the region is free at the moment it is checked, * returns %-EBUSY if the region is busy. * * NOTE: * This function is deprecated because its use is racy. * Even if it returns 0, a subsequent call to request_region() * may fail because another driver etc. just allocated the region. * Do NOT use it. It will be removed from the kernel. */ int __check_region(struct resource *parent, resource_size_t start, resource_size_t n) { struct resource * res; res = __request_region(parent, start, n, "check-region", 0); if (!res) return -EBUSY; release_resource(res); kfree(res); return 0; } EXPORT_SYMBOL(__check_region); /** * __release_region - release a previously reserved resource region * @parent: parent resource descriptor * @start: resource start address * @n: resource region size * * The described resource region must match a currently busy region. */ void __release_region(struct resource *parent, resource_size_t start, resource_size_t n) { struct resource **p; resource_size_t end; p = &parent->child; end = start + n - 1; write_lock(&resource_lock); for (;;) { struct resource *res = *p; if (!res) break; if (res->start <= start && res->end >= end) { if (!(res->flags & IORESOURCE_BUSY)) { p = &res->child; continue; } if (res->start != start || res->end != end) break; *p = res->sibling; write_unlock(&resource_lock); if (res->flags & IORESOURCE_MUXED) wake_up(&muxed_resource_wait); kfree(res); return; } p = &res->sibling; } write_unlock(&resource_lock); printk(KERN_WARNING "Trying to free nonexistent resource " "<%016llx-%016llx>\n", (unsigned long long)start, (unsigned long long)end); } EXPORT_SYMBOL(__release_region); /* * Managed region resource */ struct region_devres { struct resource *parent; resource_size_t start; resource_size_t n; }; static void devm_region_release(struct device *dev, void *res) { struct region_devres *this = res; __release_region(this->parent, this->start, this->n); } static int devm_region_match(struct device *dev, void *res, void *match_data) { struct region_devres *this = res, *match = match_data; return this->parent == match->parent && this->start == match->start && this->n == match->n; } struct resource * __devm_request_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n, const char *name) { struct region_devres *dr = NULL; struct resource *res; dr = devres_alloc(devm_region_release, sizeof(struct region_devres), GFP_KERNEL); if (!dr) return NULL; dr->parent = parent; dr->start = start; dr->n = n; res = __request_region(parent, start, n, name, 0); if (res) devres_add(dev, dr); else devres_free(dr); return res; } EXPORT_SYMBOL(__devm_request_region); void __devm_release_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n) { struct region_devres match_data = { parent, start, n }; __release_region(parent, start, n); WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, &match_data)); } EXPORT_SYMBOL(__devm_release_region); /* * Called from init/main.c to reserve IO ports. */ #define MAXRESERVE 4 static int __init reserve_setup(char *str) { static int reserved; static struct resource reserve[MAXRESERVE]; for (;;) { unsigned int io_start, io_num; int x = reserved; if (get_option (&str, &io_start) != 2) break; if (get_option (&str, &io_num) == 0) break; if (x < MAXRESERVE) { struct resource *res = reserve + x; res->name = "reserved"; res->start = io_start; res->end = io_start + io_num - 1; res->flags = IORESOURCE_BUSY; res->child = NULL; if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) reserved = x+1; } } return 1; } __setup("reserve=", reserve_setup); /* * Check if the requested addr and size spans more than any slot in the * iomem resource tree. */ int iomem_map_sanity_check(resource_size_t addr, unsigned long size) { struct resource *p = &iomem_resource; int err = 0; loff_t l; read_lock(&resource_lock); for (p = p->child; p ; p = r_next(NULL, p, &l)) { /* * We can probably skip the resources without * IORESOURCE_IO attribute? */ if (p->start >= addr + size) continue; if (p->end < addr) continue; if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) continue; /* * if a resource is "BUSY", it's not a hardware resource * but a driver mapping of such a resource; we don't want * to warn for those; some drivers legitimately map only * partial hardware resources. (example: vesafb) */ if (p->flags & IORESOURCE_BUSY) continue; printk(KERN_WARNING "resource map sanity check conflict: " "0x%llx 0x%llx 0x%llx 0x%llx %s\n", (unsigned long long)addr, (unsigned long long)(addr + size - 1), (unsigned long long)p->start, (unsigned long long)p->end, p->name); err = -1; break; } read_unlock(&resource_lock); return err; } #ifdef CONFIG_STRICT_DEVMEM static int strict_iomem_checks = 1; #else static int strict_iomem_checks; #endif /* * check if an address is reserved in the iomem resource tree * returns 1 if reserved, 0 if not reserved. */ int iomem_is_exclusive(u64 addr) { struct resource *p = &iomem_resource; int err = 0; loff_t l; int size = PAGE_SIZE; if (!strict_iomem_checks) return 0; addr = addr & PAGE_MASK; read_lock(&resource_lock); for (p = p->child; p ; p = r_next(NULL, p, &l)) { /* * We can probably skip the resources without * IORESOURCE_IO attribute? */ if (p->start >= addr + size) break; if (p->end < addr) continue; if (p->flags & IORESOURCE_BUSY && p->flags & IORESOURCE_EXCLUSIVE) { err = 1; break; } } read_unlock(&resource_lock); return err; } static int __init strict_iomem(char *str) { if (strstr(str, "relaxed")) strict_iomem_checks = 0; if (strstr(str, "strict")) strict_iomem_checks = 1; return 1; } __setup("iomem=", strict_iomem);
gpl-2.0
invisiblek/android_kernel_oneplus_msm8974
kernel/resource.c
662
28523
/* * linux/kernel/resource.c * * Copyright (C) 1999 Linus Torvalds * Copyright (C) 1999 Martin Mares <mj@ucw.cz> * * Arbitrary resource management. */ #include <linux/export.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/device.h> #include <linux/pfn.h> #include <asm/io.h> struct resource ioport_resource = { .name = "PCI IO", .start = 0, .end = IO_SPACE_LIMIT, .flags = IORESOURCE_IO, }; EXPORT_SYMBOL(ioport_resource); struct resource iomem_resource = { .name = "PCI mem", .start = 0, .end = -1, .flags = IORESOURCE_MEM, }; EXPORT_SYMBOL(iomem_resource); /* constraints to be met while allocating resources */ struct resource_constraint { resource_size_t min, max, align; resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t); void *alignf_data; }; static DEFINE_RWLOCK(resource_lock); static void *r_next(struct seq_file *m, void *v, loff_t *pos) { struct resource *p = v; (*pos)++; if (p->child) return p->child; while (!p->sibling && p->parent) p = p->parent; return p->sibling; } #ifdef CONFIG_PROC_FS enum { MAX_IORES_LEVEL = 5 }; static void *r_start(struct seq_file *m, loff_t *pos) __acquires(resource_lock) { struct resource *p = m->private; loff_t l = 0; read_lock(&resource_lock); for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) ; return p; } static void r_stop(struct seq_file *m, void *v) __releases(resource_lock) { read_unlock(&resource_lock); } static int r_show(struct seq_file *m, void *v) { struct resource *root = m->private; struct resource *r = v, *p; int width = root->end < 0x10000 ? 4 : 8; int depth; for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) if (p->parent == root) break; seq_printf(m, "%*s%0*llx-%0*llx : %s\n", depth * 2, "", width, (unsigned long long) r->start, width, (unsigned long long) r->end, r->name ? r->name : "<BAD>"); return 0; } static const struct seq_operations resource_op = { .start = r_start, .next = r_next, .stop = r_stop, .show = r_show, }; static int ioports_open(struct inode *inode, struct file *file) { int res = seq_open(file, &resource_op); if (!res) { struct seq_file *m = file->private_data; m->private = &ioport_resource; } return res; } static int iomem_open(struct inode *inode, struct file *file) { int res = seq_open(file, &resource_op); if (!res) { struct seq_file *m = file->private_data; m->private = &iomem_resource; } return res; } static const struct file_operations proc_ioports_operations = { .open = ioports_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations proc_iomem_operations = { .open = iomem_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init ioresources_init(void) { proc_create("ioports", 0, NULL, &proc_ioports_operations); proc_create("iomem", 0, NULL, &proc_iomem_operations); return 0; } __initcall(ioresources_init); #endif /* CONFIG_PROC_FS */ /* Return the conflict entry if you can't request it */ static struct resource * __request_resource(struct resource *root, struct resource *new) { resource_size_t start = new->start; resource_size_t end = new->end; struct resource *tmp, **p; if (end < start) return root; if (start < root->start) return root; if (end > root->end) return root; p = &root->child; for (;;) { tmp = *p; if (!tmp || tmp->start > end) { new->sibling = tmp; *p = new; new->parent = root; return NULL; } p = &tmp->sibling; if (tmp->end < start) continue; return tmp; } } static int __release_resource(struct resource *old) { struct resource *tmp, **p; p = &old->parent->child; for (;;) { tmp = *p; if (!tmp) break; if (tmp == old) { *p = tmp->sibling; old->parent = NULL; return 0; } p = &tmp->sibling; } return -EINVAL; } static void __release_child_resources(struct resource *r) { struct resource *tmp, *p; resource_size_t size; p = r->child; r->child = NULL; while (p) { tmp = p; p = p->sibling; tmp->parent = NULL; tmp->sibling = NULL; __release_child_resources(tmp); printk(KERN_DEBUG "release child resource %pR\n", tmp); /* need to restore size, and keep flags */ size = resource_size(tmp); tmp->start = 0; tmp->end = size - 1; } } void release_child_resources(struct resource *r) { write_lock(&resource_lock); __release_child_resources(r); write_unlock(&resource_lock); } /** * request_resource_conflict - request and reserve an I/O or memory resource * @root: root resource descriptor * @new: resource descriptor desired by caller * * Returns 0 for success, conflict resource on error. */ struct resource *request_resource_conflict(struct resource *root, struct resource *new) { struct resource *conflict; write_lock(&resource_lock); conflict = __request_resource(root, new); write_unlock(&resource_lock); return conflict; } /** * request_resource - request and reserve an I/O or memory resource * @root: root resource descriptor * @new: resource descriptor desired by caller * * Returns 0 for success, negative error code on error. */ int request_resource(struct resource *root, struct resource *new) { struct resource *conflict; conflict = request_resource_conflict(root, new); return conflict ? -EBUSY : 0; } EXPORT_SYMBOL(request_resource); /** * locate_resource - locate an already reserved I/O or memory resource * @root: root resource descriptor * @search: resource descriptor to be located * * Returns pointer to desired resource or NULL if not found. */ struct resource *locate_resource(struct resource *root, struct resource *search) { struct resource *found; write_lock(&resource_lock); found = __request_resource(root, search); write_unlock(&resource_lock); return found; } EXPORT_SYMBOL(locate_resource); /** * release_resource - release a previously reserved resource * @old: resource pointer */ int release_resource(struct resource *old) { int retval; write_lock(&resource_lock); retval = __release_resource(old); write_unlock(&resource_lock); return retval; } EXPORT_SYMBOL(release_resource); #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) /* * Finds the lowest memory reosurce exists within [res->start.res->end) * the caller must specify res->start, res->end, res->flags and "name". * If found, returns 0, res is overwritten, if not found, returns -1. */ static int find_next_system_ram(struct resource *res, char *name) { resource_size_t start, end; struct resource *p; BUG_ON(!res); start = res->start; end = res->end; BUG_ON(start >= end); read_lock(&resource_lock); for (p = iomem_resource.child; p ; p = p->sibling) { /* system ram is just marked as IORESOURCE_MEM */ if (p->flags != res->flags) continue; if (name && strcmp(p->name, name)) continue; if (p->start > end) { p = NULL; break; } if ((p->end >= start) && (p->start < end)) break; } read_unlock(&resource_lock); if (!p) return -1; /* copy data */ if (res->start < p->start) res->start = p->start; if (res->end > p->end) res->end = p->end; return 0; } /* * This function calls callback against all memory range of "System RAM" * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY. * Now, this function is only for "System RAM". */ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)) { struct resource res; unsigned long pfn, end_pfn; u64 orig_end; int ret = -1; res.start = (u64) start_pfn << PAGE_SHIFT; res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; orig_end = res.end; while ((res.start < res.end) && (find_next_system_ram(&res, "System RAM") >= 0)) { pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; if (res.end + 1 <= 0) end_pfn = res.end >> PAGE_SHIFT; else end_pfn = (res.end + 1) >> PAGE_SHIFT; if (end_pfn > pfn) ret = (*func)(pfn, end_pfn - pfn, arg); if (ret) break; if (res.end + 1 > res.start) res.start = res.end + 1; else res.start = res.end; res.end = orig_end; } return ret; } #endif static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) { return 1; } /* * This generic page_is_ram() returns true if specified address is * registered as "System RAM" in iomem_resource list. */ int __weak page_is_ram(unsigned long pfn) { return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; } void __weak arch_remove_reservations(struct resource *avail) { } static resource_size_t simple_align_resource(void *data, const struct resource *avail, resource_size_t size, resource_size_t align) { return avail->start; } static void resource_clip(struct resource *res, resource_size_t min, resource_size_t max) { if (res->start < min) res->start = min; if (res->end > max) res->end = max; } static bool resource_contains(struct resource *res1, struct resource *res2) { return res1->start <= res2->start && res1->end >= res2->end; } /* * Find empty slot in the resource tree with the given range and * alignment constraints */ static int __find_resource(struct resource *root, struct resource *old, struct resource *new, resource_size_t size, struct resource_constraint *constraint) { struct resource *this = root->child; struct resource tmp = *new, avail, alloc; tmp.flags = new->flags; tmp.start = root->start; /* * Skip past an allocated resource that starts at 0, since the assignment * of this->start - 1 to tmp->end below would cause an underflow. */ if (this && this->start == root->start) { tmp.start = (this == old) ? old->start : this->end + 1; this = this->sibling; } for(;;) { if (this) tmp.end = (this == old) ? this->end : this->start - 1; else tmp.end = root->end; if (tmp.end < tmp.start) goto next; resource_clip(&tmp, constraint->min, constraint->max); arch_remove_reservations(&tmp); /* Check for overflow after ALIGN() */ avail = *new; avail.start = ALIGN(tmp.start, constraint->align); avail.end = tmp.end; if (avail.start >= tmp.start) { alloc.start = constraint->alignf(constraint->alignf_data, &avail, size, constraint->align); alloc.end = alloc.start + size - 1; if (resource_contains(&avail, &alloc)) { new->start = alloc.start; new->end = alloc.end; return 0; } } next: if (!this || this->end == root->end) break; if (this != old) tmp.start = this->end + 1; this = this->sibling; } return -EBUSY; } /* * Find empty slot in the resource tree given range and alignment. */ static int find_resource(struct resource *root, struct resource *new, resource_size_t size, struct resource_constraint *constraint) { return __find_resource(root, NULL, new, size, constraint); } /** * reallocate_resource - allocate a slot in the resource tree given range & alignment. * The resource will be relocated if the new size cannot be reallocated in the * current location. * * @root: root resource descriptor * @old: resource descriptor desired by caller * @newsize: new size of the resource descriptor * @constraint: the size and alignment constraints to be met. */ int reallocate_resource(struct resource *root, struct resource *old, resource_size_t newsize, struct resource_constraint *constraint) { int err=0; struct resource new = *old; struct resource *conflict; write_lock(&resource_lock); if ((err = __find_resource(root, old, &new, newsize, constraint))) goto out; if (resource_contains(&new, old)) { old->start = new.start; old->end = new.end; goto out; } if (old->child) { err = -EBUSY; goto out; } if (resource_contains(old, &new)) { old->start = new.start; old->end = new.end; } else { __release_resource(old); *old = new; conflict = __request_resource(root, old); BUG_ON(conflict); } out: write_unlock(&resource_lock); return err; } /** * allocate_resource - allocate empty slot in the resource tree given range & alignment. * The resource will be reallocated with a new size if it was already allocated * @root: root resource descriptor * @new: resource descriptor desired by caller * @size: requested resource region size * @min: minimum size to allocate * @max: maximum size to allocate * @align: alignment requested, in bytes * @alignf: alignment function, optional, called if not NULL * @alignf_data: arbitrary data to pass to the @alignf function */ int allocate_resource(struct resource *root, struct resource *new, resource_size_t size, resource_size_t min, resource_size_t max, resource_size_t align, resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t), void *alignf_data) { int err; struct resource_constraint constraint; if (!alignf) alignf = simple_align_resource; constraint.min = min; constraint.max = max; constraint.align = align; constraint.alignf = alignf; constraint.alignf_data = alignf_data; if ( new->parent ) { /* resource is already allocated, try reallocating with the new constraints */ return reallocate_resource(root, new, size, &constraint); } write_lock(&resource_lock); err = find_resource(root, new, size, &constraint); if (err >= 0 && __request_resource(root, new)) err = -EBUSY; write_unlock(&resource_lock); return err; } EXPORT_SYMBOL(allocate_resource); /** * lookup_resource - find an existing resource by a resource start address * @root: root resource descriptor * @start: resource start address * * Returns a pointer to the resource if found, NULL otherwise */ struct resource *lookup_resource(struct resource *root, resource_size_t start) { struct resource *res; read_lock(&resource_lock); for (res = root->child; res; res = res->sibling) { if (res->start == start) break; } read_unlock(&resource_lock); return res; } /* * Insert a resource into the resource tree. If successful, return NULL, * otherwise return the conflicting resource (compare to __request_resource()) */ static struct resource * __insert_resource(struct resource *parent, struct resource *new) { struct resource *first, *next; for (;; parent = first) { first = __request_resource(parent, new); if (!first) return first; if (first == parent) return first; if (WARN_ON(first == new)) /* duplicated insertion */ return first; if ((first->start > new->start) || (first->end < new->end)) break; if ((first->start == new->start) && (first->end == new->end)) break; } for (next = first; ; next = next->sibling) { /* Partial overlap? Bad, and unfixable */ if (next->start < new->start || next->end > new->end) return next; if (!next->sibling) break; if (next->sibling->start > new->end) break; } new->parent = parent; new->sibling = next->sibling; new->child = first; next->sibling = NULL; for (next = first; next; next = next->sibling) next->parent = new; if (parent->child == first) { parent->child = new; } else { next = parent->child; while (next->sibling != first) next = next->sibling; next->sibling = new; } return NULL; } /** * insert_resource_conflict - Inserts resource in the resource tree * @parent: parent of the new resource * @new: new resource to insert * * Returns 0 on success, conflict resource if the resource can't be inserted. * * This function is equivalent to request_resource_conflict when no conflict * happens. If a conflict happens, and the conflicting resources * entirely fit within the range of the new resource, then the new * resource is inserted and the conflicting resources become children of * the new resource. */ struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) { struct resource *conflict; write_lock(&resource_lock); conflict = __insert_resource(parent, new); write_unlock(&resource_lock); return conflict; } /** * insert_resource - Inserts a resource in the resource tree * @parent: parent of the new resource * @new: new resource to insert * * Returns 0 on success, -EBUSY if the resource can't be inserted. */ int insert_resource(struct resource *parent, struct resource *new) { struct resource *conflict; conflict = insert_resource_conflict(parent, new); return conflict ? -EBUSY : 0; } /** * insert_resource_expand_to_fit - Insert a resource into the resource tree * @root: root resource descriptor * @new: new resource to insert * * Insert a resource into the resource tree, possibly expanding it in order * to make it encompass any conflicting resources. */ void insert_resource_expand_to_fit(struct resource *root, struct resource *new) { if (new->parent) return; write_lock(&resource_lock); for (;;) { struct resource *conflict; conflict = __insert_resource(root, new); if (!conflict) break; if (conflict == root) break; /* Ok, expand resource to cover the conflict, then try again .. */ if (conflict->start < new->start) new->start = conflict->start; if (conflict->end > new->end) new->end = conflict->end; printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); } write_unlock(&resource_lock); } /** * adjust_resource - modify a resource's start and size * @res: resource to modify * @start: new start value * @size: new size * * Given an existing resource, change its start and size to match the * arguments. Returns 0 on success, -EBUSY if it can't fit. * Existing children of the resource are assumed to be immutable. */ int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size) { struct resource *tmp, *parent = res->parent; resource_size_t end = start + size - 1; int result = -EBUSY; write_lock(&resource_lock); if ((start < parent->start) || (end > parent->end)) goto out; for (tmp = res->child; tmp; tmp = tmp->sibling) { if ((tmp->start < start) || (tmp->end > end)) goto out; } if (res->sibling && (res->sibling->start <= end)) goto out; tmp = parent->child; if (tmp != res) { while (tmp->sibling != res) tmp = tmp->sibling; if (start <= tmp->end) goto out; } res->start = start; res->end = end; result = 0; out: write_unlock(&resource_lock); return result; } EXPORT_SYMBOL(adjust_resource); static void __init __reserve_region_with_split(struct resource *root, resource_size_t start, resource_size_t end, const char *name) { struct resource *parent = root; struct resource *conflict; struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC); struct resource *next_res = NULL; if (!res) return; res->name = name; res->start = start; res->end = end; res->flags = IORESOURCE_BUSY; while (1) { conflict = __request_resource(parent, res); if (!conflict) { if (!next_res) break; res = next_res; next_res = NULL; continue; } /* conflict covered whole area */ if (conflict->start <= res->start && conflict->end >= res->end) { kfree(res); WARN_ON(next_res); break; } /* failed, split and try again */ if (conflict->start > res->start) { end = res->end; res->end = conflict->start - 1; if (conflict->end < end) { next_res = kzalloc(sizeof(*next_res), GFP_ATOMIC); if (!next_res) { kfree(res); break; } next_res->name = name; next_res->start = conflict->end + 1; next_res->end = end; next_res->flags = IORESOURCE_BUSY; } } else { res->start = conflict->end + 1; } } } void __init reserve_region_with_split(struct resource *root, resource_size_t start, resource_size_t end, const char *name) { write_lock(&resource_lock); __reserve_region_with_split(root, start, end, name); write_unlock(&resource_lock); } /** * resource_alignment - calculate resource's alignment * @res: resource pointer * * Returns alignment on success, 0 (invalid alignment) on failure. */ resource_size_t resource_alignment(struct resource *res) { switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { case IORESOURCE_SIZEALIGN: return resource_size(res); case IORESOURCE_STARTALIGN: return res->start; default: return 0; } } /* * This is compatibility stuff for IO resources. * * Note how this, unlike the above, knows about * the IO flag meanings (busy etc). * * request_region creates a new busy region. * * check_region returns non-zero if the area is already busy. * * release_region releases a matching busy region. */ static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); /** * __request_region - create a new busy resource region * @parent: parent resource descriptor * @start: resource start address * @n: resource region size * @name: reserving caller's ID string * @flags: IO resource flags */ struct resource * __request_region(struct resource *parent, resource_size_t start, resource_size_t n, const char *name, int flags) { DECLARE_WAITQUEUE(wait, current); struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) return NULL; res->name = name; res->start = start; res->end = start + n - 1; res->flags = IORESOURCE_BUSY; res->flags |= flags; write_lock(&resource_lock); for (;;) { struct resource *conflict; conflict = __request_resource(parent, res); if (!conflict) break; if (conflict != parent) { parent = conflict; if (!(conflict->flags & IORESOURCE_BUSY)) continue; } if (conflict->flags & flags & IORESOURCE_MUXED) { add_wait_queue(&muxed_resource_wait, &wait); write_unlock(&resource_lock); set_current_state(TASK_UNINTERRUPTIBLE); schedule(); remove_wait_queue(&muxed_resource_wait, &wait); write_lock(&resource_lock); continue; } /* Uhhuh, that didn't work out.. */ kfree(res); res = NULL; break; } write_unlock(&resource_lock); return res; } EXPORT_SYMBOL(__request_region); /** * __check_region - check if a resource region is busy or free * @parent: parent resource descriptor * @start: resource start address * @n: resource region size * * Returns 0 if the region is free at the moment it is checked, * returns %-EBUSY if the region is busy. * * NOTE: * This function is deprecated because its use is racy. * Even if it returns 0, a subsequent call to request_region() * may fail because another driver etc. just allocated the region. * Do NOT use it. It will be removed from the kernel. */ int __check_region(struct resource *parent, resource_size_t start, resource_size_t n) { struct resource * res; res = __request_region(parent, start, n, "check-region", 0); if (!res) return -EBUSY; release_resource(res); kfree(res); return 0; } EXPORT_SYMBOL(__check_region); /** * __release_region - release a previously reserved resource region * @parent: parent resource descriptor * @start: resource start address * @n: resource region size * * The described resource region must match a currently busy region. */ void __release_region(struct resource *parent, resource_size_t start, resource_size_t n) { struct resource **p; resource_size_t end; p = &parent->child; end = start + n - 1; write_lock(&resource_lock); for (;;) { struct resource *res = *p; if (!res) break; if (res->start <= start && res->end >= end) { if (!(res->flags & IORESOURCE_BUSY)) { p = &res->child; continue; } if (res->start != start || res->end != end) break; *p = res->sibling; write_unlock(&resource_lock); if (res->flags & IORESOURCE_MUXED) wake_up(&muxed_resource_wait); kfree(res); return; } p = &res->sibling; } write_unlock(&resource_lock); printk(KERN_WARNING "Trying to free nonexistent resource " "<%016llx-%016llx>\n", (unsigned long long)start, (unsigned long long)end); } EXPORT_SYMBOL(__release_region); /* * Managed region resource */ struct region_devres { struct resource *parent; resource_size_t start; resource_size_t n; }; static void devm_region_release(struct device *dev, void *res) { struct region_devres *this = res; __release_region(this->parent, this->start, this->n); } static int devm_region_match(struct device *dev, void *res, void *match_data) { struct region_devres *this = res, *match = match_data; return this->parent == match->parent && this->start == match->start && this->n == match->n; } struct resource * __devm_request_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n, const char *name) { struct region_devres *dr = NULL; struct resource *res; dr = devres_alloc(devm_region_release, sizeof(struct region_devres), GFP_KERNEL); if (!dr) return NULL; dr->parent = parent; dr->start = start; dr->n = n; res = __request_region(parent, start, n, name, 0); if (res) devres_add(dev, dr); else devres_free(dr); return res; } EXPORT_SYMBOL(__devm_request_region); void __devm_release_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n) { struct region_devres match_data = { parent, start, n }; __release_region(parent, start, n); WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, &match_data)); } EXPORT_SYMBOL(__devm_release_region); /* * Called from init/main.c to reserve IO ports. */ #define MAXRESERVE 4 static int __init reserve_setup(char *str) { static int reserved; static struct resource reserve[MAXRESERVE]; for (;;) { unsigned int io_start, io_num; int x = reserved; if (get_option (&str, &io_start) != 2) break; if (get_option (&str, &io_num) == 0) break; if (x < MAXRESERVE) { struct resource *res = reserve + x; res->name = "reserved"; res->start = io_start; res->end = io_start + io_num - 1; res->flags = IORESOURCE_BUSY; res->child = NULL; if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) reserved = x+1; } } return 1; } __setup("reserve=", reserve_setup); /* * Check if the requested addr and size spans more than any slot in the * iomem resource tree. */ int iomem_map_sanity_check(resource_size_t addr, unsigned long size) { struct resource *p = &iomem_resource; int err = 0; loff_t l; read_lock(&resource_lock); for (p = p->child; p ; p = r_next(NULL, p, &l)) { /* * We can probably skip the resources without * IORESOURCE_IO attribute? */ if (p->start >= addr + size) continue; if (p->end < addr) continue; if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) continue; /* * if a resource is "BUSY", it's not a hardware resource * but a driver mapping of such a resource; we don't want * to warn for those; some drivers legitimately map only * partial hardware resources. (example: vesafb) */ if (p->flags & IORESOURCE_BUSY) continue; printk(KERN_WARNING "resource map sanity check conflict: " "0x%llx 0x%llx 0x%llx 0x%llx %s\n", (unsigned long long)addr, (unsigned long long)(addr + size - 1), (unsigned long long)p->start, (unsigned long long)p->end, p->name); err = -1; break; } read_unlock(&resource_lock); return err; } #ifdef CONFIG_STRICT_DEVMEM static int strict_iomem_checks = 1; #else static int strict_iomem_checks; #endif /* * check if an address is reserved in the iomem resource tree * returns 1 if reserved, 0 if not reserved. */ int iomem_is_exclusive(u64 addr) { struct resource *p = &iomem_resource; int err = 0; loff_t l; int size = PAGE_SIZE; if (!strict_iomem_checks) return 0; addr = addr & PAGE_MASK; read_lock(&resource_lock); for (p = p->child; p ; p = r_next(NULL, p, &l)) { /* * We can probably skip the resources without * IORESOURCE_IO attribute? */ if (p->start >= addr + size) break; if (p->end < addr) continue; if (p->flags & IORESOURCE_BUSY && p->flags & IORESOURCE_EXCLUSIVE) { err = 1; break; } } read_unlock(&resource_lock); return err; } static int __init strict_iomem(char *str) { if (strstr(str, "relaxed")) strict_iomem_checks = 0; if (strstr(str, "strict")) strict_iomem_checks = 1; return 1; } __setup("iomem=", strict_iomem);
gpl-2.0
telf/error_state_capture_improvement
fs/ramfs/file-mmu.c
918
1283
/* file-mmu.c: ramfs MMU-based file operations * * Resizable simple ram filesystem for Linux. * * Copyright (C) 2000 Linus Torvalds. * 2000 Transmeta Corp. * * Usage limits added by David Gibson, Linuxcare Australia. * This file is released under the GPL. */ /* * NOTE! This filesystem is probably most useful * not as a real filesystem, but as an example of * how virtual filesystems can be written. * * It doesn't get much simpler than this. Consider * that this file implements the full semantics of * a POSIX-compliant read-write filesystem. * * Note in particular how the filesystem does not * need to implement any data structures of its own * to keep track of the virtual data: using the VFS * caches is sufficient. */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/ramfs.h> #include "internal.h" const struct file_operations ramfs_file_operations = { .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .fsync = noop_fsync, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, .llseek = generic_file_llseek, }; const struct inode_operations ramfs_file_inode_operations = { .setattr = simple_setattr, .getattr = simple_getattr, };
gpl-2.0
hexiaolong2008/linux-arm
drivers/media/platform/vivid/vivid-kthread-out.c
1174
9313
/* * vivid-kthread-out.h - video/vbi output thread support functions. * * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/font.h> #include <linux/mutex.h> #include <linux/videodev2.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/random.h> #include <linux/v4l2-dv-timings.h> #include <asm/div64.h> #include <media/videobuf2-vmalloc.h> #include <media/v4l2-dv-timings.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-fh.h> #include <media/v4l2-event.h> #include "vivid-core.h" #include "vivid-vid-common.h" #include "vivid-vid-cap.h" #include "vivid-vid-out.h" #include "vivid-radio-common.h" #include "vivid-radio-rx.h" #include "vivid-radio-tx.h" #include "vivid-sdr-cap.h" #include "vivid-vbi-cap.h" #include "vivid-vbi-out.h" #include "vivid-osd.h" #include "vivid-ctrls.h" #include "vivid-kthread-out.h" static void vivid_thread_vid_out_tick(struct vivid_dev *dev) { struct vivid_buffer *vid_out_buf = NULL; struct vivid_buffer *vbi_out_buf = NULL; dprintk(dev, 1, "Video Output Thread Tick\n"); /* Drop a certain percentage of buffers. */ if (dev->perc_dropped_buffers && prandom_u32_max(100) < dev->perc_dropped_buffers) return; spin_lock(&dev->slock); /* * Only dequeue buffer if there is at least one more pending. * This makes video loopback possible. */ if (!list_empty(&dev->vid_out_active) && !list_is_singular(&dev->vid_out_active)) { vid_out_buf = list_entry(dev->vid_out_active.next, struct vivid_buffer, list); list_del(&vid_out_buf->list); } if (!list_empty(&dev->vbi_out_active) && (dev->field_out != V4L2_FIELD_ALTERNATE || (dev->vbi_out_seq_count & 1))) { vbi_out_buf = list_entry(dev->vbi_out_active.next, struct vivid_buffer, list); list_del(&vbi_out_buf->list); } spin_unlock(&dev->slock); if (!vid_out_buf && !vbi_out_buf) return; if (vid_out_buf) { vid_out_buf->vb.v4l2_buf.sequence = dev->vid_out_seq_count; if (dev->field_out == V4L2_FIELD_ALTERNATE) { /* * The sequence counter counts frames, not fields. So divide * by two. */ vid_out_buf->vb.v4l2_buf.sequence /= 2; } v4l2_get_timestamp(&vid_out_buf->vb.v4l2_buf.timestamp); vid_out_buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset; vb2_buffer_done(&vid_out_buf->vb, dev->dqbuf_error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); dprintk(dev, 2, "vid_out buffer %d done\n", vid_out_buf->vb.v4l2_buf.index); } if (vbi_out_buf) { if (dev->stream_sliced_vbi_out) vivid_sliced_vbi_out_process(dev, vbi_out_buf); vbi_out_buf->vb.v4l2_buf.sequence = dev->vbi_out_seq_count; v4l2_get_timestamp(&vbi_out_buf->vb.v4l2_buf.timestamp); vbi_out_buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset; vb2_buffer_done(&vbi_out_buf->vb, dev->dqbuf_error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); dprintk(dev, 2, "vbi_out buffer %d done\n", vbi_out_buf->vb.v4l2_buf.index); } dev->dqbuf_error = false; } static int vivid_thread_vid_out(void *data) { struct vivid_dev *dev = data; u64 numerators_since_start; u64 buffers_since_start; u64 next_jiffies_since_start; unsigned long jiffies_since_start; unsigned long cur_jiffies; unsigned wait_jiffies; unsigned numerator; unsigned denominator; dprintk(dev, 1, "Video Output Thread Start\n"); set_freezable(); /* Resets frame counters */ dev->out_seq_offset = 0; if (dev->seq_wrap) dev->out_seq_count = 0xffffff80U; dev->jiffies_vid_out = jiffies; dev->vid_out_seq_start = dev->vbi_out_seq_start = 0; dev->out_seq_resync = false; for (;;) { try_to_freeze(); if (kthread_should_stop()) break; mutex_lock(&dev->mutex); cur_jiffies = jiffies; if (dev->out_seq_resync) { dev->jiffies_vid_out = cur_jiffies; dev->out_seq_offset = dev->out_seq_count + 1; dev->out_seq_count = 0; dev->out_seq_resync = false; } numerator = dev->timeperframe_vid_out.numerator; denominator = dev->timeperframe_vid_out.denominator; if (dev->field_out == V4L2_FIELD_ALTERNATE) denominator *= 2; /* Calculate the number of jiffies since we started streaming */ jiffies_since_start = cur_jiffies - dev->jiffies_vid_out; /* Get the number of buffers streamed since the start */ buffers_since_start = (u64)jiffies_since_start * denominator + (HZ * numerator) / 2; do_div(buffers_since_start, HZ * numerator); /* * After more than 0xf0000000 (rounded down to a multiple of * 'jiffies-per-day' to ease jiffies_to_msecs calculation) * jiffies have passed since we started streaming reset the * counters and keep track of the sequence offset. */ if (jiffies_since_start > JIFFIES_RESYNC) { dev->jiffies_vid_out = cur_jiffies; dev->out_seq_offset = buffers_since_start; buffers_since_start = 0; } dev->out_seq_count = buffers_since_start + dev->out_seq_offset; dev->vid_out_seq_count = dev->out_seq_count - dev->vid_out_seq_start; dev->vbi_out_seq_count = dev->out_seq_count - dev->vbi_out_seq_start; vivid_thread_vid_out_tick(dev); mutex_unlock(&dev->mutex); /* * Calculate the number of 'numerators' streamed since we started, * not including the current buffer. */ numerators_since_start = buffers_since_start * numerator; /* And the number of jiffies since we started */ jiffies_since_start = jiffies - dev->jiffies_vid_out; /* Increase by the 'numerator' of one buffer */ numerators_since_start += numerator; /* * Calculate when that next buffer is supposed to start * in jiffies since we started streaming. */ next_jiffies_since_start = numerators_since_start * HZ + denominator / 2; do_div(next_jiffies_since_start, denominator); /* If it is in the past, then just schedule asap */ if (next_jiffies_since_start < jiffies_since_start) next_jiffies_since_start = jiffies_since_start; wait_jiffies = next_jiffies_since_start - jiffies_since_start; schedule_timeout_interruptible(wait_jiffies ? wait_jiffies : 1); } dprintk(dev, 1, "Video Output Thread End\n"); return 0; } static void vivid_grab_controls(struct vivid_dev *dev, bool grab) { v4l2_ctrl_grab(dev->ctrl_has_crop_out, grab); v4l2_ctrl_grab(dev->ctrl_has_compose_out, grab); v4l2_ctrl_grab(dev->ctrl_has_scaler_out, grab); v4l2_ctrl_grab(dev->ctrl_tx_mode, grab); v4l2_ctrl_grab(dev->ctrl_tx_rgb_range, grab); } int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming) { dprintk(dev, 1, "%s\n", __func__); if (dev->kthread_vid_out) { u32 seq_count = dev->out_seq_count + dev->seq_wrap * 128; if (pstreaming == &dev->vid_out_streaming) dev->vid_out_seq_start = seq_count; else dev->vbi_out_seq_start = seq_count; *pstreaming = true; return 0; } /* Resets frame counters */ dev->jiffies_vid_out = jiffies; dev->vid_out_seq_start = dev->seq_wrap * 128; dev->vbi_out_seq_start = dev->seq_wrap * 128; dev->kthread_vid_out = kthread_run(vivid_thread_vid_out, dev, "%s-vid-out", dev->v4l2_dev.name); if (IS_ERR(dev->kthread_vid_out)) { v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n"); return PTR_ERR(dev->kthread_vid_out); } *pstreaming = true; vivid_grab_controls(dev, true); dprintk(dev, 1, "returning from %s\n", __func__); return 0; } void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming) { dprintk(dev, 1, "%s\n", __func__); if (dev->kthread_vid_out == NULL) return; *pstreaming = false; if (pstreaming == &dev->vid_out_streaming) { /* Release all active buffers */ while (!list_empty(&dev->vid_out_active)) { struct vivid_buffer *buf; buf = list_entry(dev->vid_out_active.next, struct vivid_buffer, list); list_del(&buf->list); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); dprintk(dev, 2, "vid_out buffer %d done\n", buf->vb.v4l2_buf.index); } } if (pstreaming == &dev->vbi_out_streaming) { while (!list_empty(&dev->vbi_out_active)) { struct vivid_buffer *buf; buf = list_entry(dev->vbi_out_active.next, struct vivid_buffer, list); list_del(&buf->list); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); dprintk(dev, 2, "vbi_out buffer %d done\n", buf->vb.v4l2_buf.index); } } if (dev->vid_out_streaming || dev->vbi_out_streaming) return; /* shutdown control thread */ vivid_grab_controls(dev, false); mutex_unlock(&dev->mutex); kthread_stop(dev->kthread_vid_out); dev->kthread_vid_out = NULL; mutex_lock(&dev->mutex); }
gpl-2.0
jakew02/android_kernel_asus_fugu
drivers/net/wireless/orinoco/orinoco_plx.c
2454
11721
/* orinoco_plx.c * * Driver for Prism II devices which would usually be driven by orinoco_cs, * but are connected to the PCI bus by a PLX9052. * * Current maintainers are: * Pavel Roskin <proski AT gnu.org> * and David Gibson <hermes AT gibson.dropbear.id.au> * * (C) Copyright David Gibson, IBM Corp. 2001-2003. * Copyright (C) 2001 Daniel Barlow * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License * at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and * limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU General Public License version 2 (the "GPL"), in * which case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the MPL, indicate your decision by * deleting the provisions above and replace them with the notice and * other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file * under either the MPL or the GPL. * * Here's the general details on how the PLX9052 adapter works: * * - Two PCI I/O address spaces, one 0x80 long which contains the * PLX9052 registers, and one that's 0x40 long mapped to the PCMCIA * slot I/O address space. * * - One PCI memory address space, mapped to the PCMCIA attribute space * (containing the CIS). * * Using the later, you can read through the CIS data to make sure the * card is compatible with the driver. Keep in mind that the PCMCIA * spec specifies the CIS as the lower 8 bits of each word read from * the CIS, so to read the bytes of the CIS, read every other byte * (0,2,4,...). Passing that test, you need to enable the I/O address * space on the PCMCIA card via the PCMCIA COR register. This is the * first byte following the CIS. In my case (which may not have any * relation to what's on the PRISM2 cards), COR was at offset 0x800 * within the PCI memory space. Write 0x41 to the COR register to * enable I/O mode and to select level triggered interrupts. To * confirm you actually succeeded, read the COR register back and make * sure it actually got set to 0x41, in case you have an unexpected * card inserted. * * Following that, you can treat the second PCI I/O address space (the * one that's not 0x80 in length) as the PCMCIA I/O space. * * Note that in the Eumitcom's source for their drivers, they register * the interrupt as edge triggered when registering it with the * Windows kernel. I don't recall how to register edge triggered on * Linux (if it can be done at all). But in some experimentation, I * don't see much operational difference between using either * interrupt mode. Don't mess with the interrupt mode in the COR * register though, as the PLX9052 wants level triggers with the way * the serial EEPROM configures it on the WL11000. * * There's some other little quirks related to timing that I bumped * into, but I don't recall right now. Also, there's two variants of * the WL11000 I've seen, revision A1 and T2. These seem to differ * slightly in the timings configured in the wait-state generator in * the PLX9052. There have also been some comments from Eumitcom that * cards shouldn't be hot swapped, apparently due to risk of cooking * the PLX9052. I'm unsure why they believe this, as I can't see * anything in the design that would really cause a problem, except * for crashing drivers not written to expect it. And having developed * drivers for the WL11000, I'd say it's quite tricky to write code * that will successfully deal with a hot unplug. Very odd things * happen on the I/O side of things. But anyway, be warned. Despite * that, I've hot-swapped a number of times during debugging and * driver development for various reasons (stuck WAIT# line after the * radio card's firmware locks up). */ #define DRIVER_NAME "orinoco_plx" #define PFX DRIVER_NAME ": " #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pci.h> #include <pcmcia/cisreg.h> #include "orinoco.h" #include "orinoco_pci.h" #define COR_OFFSET (0x3e0) /* COR attribute offset of Prism2 PC card */ #define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */ #define COR_RESET (0x80) /* reset bit in the COR register */ #define PLX_RESET_TIME (500) /* milliseconds */ #define PLX_INTCSR 0x4c /* Interrupt Control & Status Register */ #define PLX_INTCSR_INTEN (1 << 6) /* Interrupt Enable bit */ /* * Do a soft reset of the card using the Configuration Option Register */ static int orinoco_plx_cor_reset(struct orinoco_private *priv) { struct hermes *hw = &priv->hw; struct orinoco_pci_card *card = priv->card; unsigned long timeout; u16 reg; iowrite8(COR_VALUE | COR_RESET, card->attr_io + COR_OFFSET); mdelay(1); iowrite8(COR_VALUE, card->attr_io + COR_OFFSET); mdelay(1); /* Just in case, wait more until the card is no longer busy */ timeout = jiffies + (PLX_RESET_TIME * HZ / 1000); reg = hermes_read_regn(hw, CMD); while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) { mdelay(1); reg = hermes_read_regn(hw, CMD); } /* Still busy? */ if (reg & HERMES_CMD_BUSY) { printk(KERN_ERR PFX "Busy timeout\n"); return -ETIMEDOUT; } return 0; } static int orinoco_plx_hw_init(struct orinoco_pci_card *card) { int i; u32 csr_reg; static const u8 cis_magic[] = { 0x01, 0x03, 0x00, 0x00, 0xff, 0x17, 0x04, 0x67 }; printk(KERN_DEBUG PFX "CIS: "); for (i = 0; i < 16; i++) printk("%02X:", ioread8(card->attr_io + (i << 1))); printk("\n"); /* Verify whether a supported PC card is present */ /* FIXME: we probably need to be smarted about this */ for (i = 0; i < sizeof(cis_magic); i++) { if (cis_magic[i] != ioread8(card->attr_io + (i << 1))) { printk(KERN_ERR PFX "The CIS value of Prism2 PC " "card is unexpected\n"); return -ENODEV; } } /* bjoern: We need to tell the card to enable interrupts, in case the serial eprom didn't do this already. See the PLX9052 data book, p8-1 and 8-24 for reference. */ csr_reg = ioread32(card->bridge_io + PLX_INTCSR); if (!(csr_reg & PLX_INTCSR_INTEN)) { csr_reg |= PLX_INTCSR_INTEN; iowrite32(csr_reg, card->bridge_io + PLX_INTCSR); csr_reg = ioread32(card->bridge_io + PLX_INTCSR); if (!(csr_reg & PLX_INTCSR_INTEN)) { printk(KERN_ERR PFX "Cannot enable interrupts\n"); return -EIO; } } return 0; } static int orinoco_plx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int err; struct orinoco_private *priv; struct orinoco_pci_card *card; void __iomem *hermes_io, *attr_io, *bridge_io; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR PFX "Cannot enable PCI device\n"); return err; } err = pci_request_regions(pdev, DRIVER_NAME); if (err) { printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); goto fail_resources; } bridge_io = pci_iomap(pdev, 1, 0); if (!bridge_io) { printk(KERN_ERR PFX "Cannot map bridge registers\n"); err = -EIO; goto fail_map_bridge; } attr_io = pci_iomap(pdev, 2, 0); if (!attr_io) { printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n"); err = -EIO; goto fail_map_attr; } hermes_io = pci_iomap(pdev, 3, 0); if (!hermes_io) { printk(KERN_ERR PFX "Cannot map chipset registers\n"); err = -EIO; goto fail_map_hermes; } /* Allocate network device */ priv = alloc_orinocodev(sizeof(*card), &pdev->dev, orinoco_plx_cor_reset, NULL); if (!priv) { printk(KERN_ERR PFX "Cannot allocate network device\n"); err = -ENOMEM; goto fail_alloc; } card = priv->card; card->bridge_io = bridge_io; card->attr_io = attr_io; hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED, DRIVER_NAME, priv); if (err) { printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq); err = -EBUSY; goto fail_irq; } err = orinoco_plx_hw_init(card); if (err) { printk(KERN_ERR PFX "Hardware initialization failed\n"); goto fail; } err = orinoco_plx_cor_reset(priv); if (err) { printk(KERN_ERR PFX "Initial reset failed\n"); goto fail; } err = orinoco_init(priv); if (err) { printk(KERN_ERR PFX "orinoco_init() failed\n"); goto fail; } err = orinoco_if_add(priv, 0, 0, NULL); if (err) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); goto fail; } pci_set_drvdata(pdev, priv); return 0; fail: free_irq(pdev->irq, priv); fail_irq: pci_set_drvdata(pdev, NULL); free_orinocodev(priv); fail_alloc: pci_iounmap(pdev, hermes_io); fail_map_hermes: pci_iounmap(pdev, attr_io); fail_map_attr: pci_iounmap(pdev, bridge_io); fail_map_bridge: pci_release_regions(pdev); fail_resources: pci_disable_device(pdev); return err; } static void orinoco_plx_remove_one(struct pci_dev *pdev) { struct orinoco_private *priv = pci_get_drvdata(pdev); struct orinoco_pci_card *card = priv->card; orinoco_if_del(priv); free_irq(pdev->irq, priv); pci_set_drvdata(pdev, NULL); free_orinocodev(priv); pci_iounmap(pdev, priv->hw.iobase); pci_iounmap(pdev, card->attr_io); pci_iounmap(pdev, card->bridge_io); pci_release_regions(pdev); pci_disable_device(pdev); } static DEFINE_PCI_DEVICE_TABLE(orinoco_plx_id_table) = { {0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */ {0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */ {0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */ {0x1638, 0x1100, PCI_ANY_ID, PCI_ANY_ID,}, /* SMC EZConnect SMC2602W, Eumitcom PCI WL11000, Addtron AWA-100 */ {0x16ab, 0x1100, PCI_ANY_ID, PCI_ANY_ID,}, /* Global Sun Tech GL24110P */ {0x16ab, 0x1101, PCI_ANY_ID, PCI_ANY_ID,}, /* Reported working, but unknown */ {0x16ab, 0x1102, PCI_ANY_ID, PCI_ANY_ID,}, /* Linksys WDT11 */ {0x16ec, 0x3685, PCI_ANY_ID, PCI_ANY_ID,}, /* USR 2415 */ {0xec80, 0xec00, PCI_ANY_ID, PCI_ANY_ID,}, /* Belkin F5D6000 tested by Brendan W. McAdams <rit AT jacked-in.org> */ {0x10b7, 0x7770, PCI_ANY_ID, PCI_ANY_ID,}, /* 3Com AirConnect PCI tested by Damien Persohn <damien AT persohn.net> */ {0,}, }; MODULE_DEVICE_TABLE(pci, orinoco_plx_id_table); static struct pci_driver orinoco_plx_driver = { .name = DRIVER_NAME, .id_table = orinoco_plx_id_table, .probe = orinoco_plx_init_one, .remove = orinoco_plx_remove_one, .suspend = orinoco_pci_suspend, .resume = orinoco_pci_resume, }; static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION " (Pavel Roskin <proski@gnu.org>," " David Gibson <hermes@gibson.dropbear.id.au>," " Daniel Barlow <dan@telent.net>)"; MODULE_AUTHOR("Daniel Barlow <dan@telent.net>"); MODULE_DESCRIPTION("Driver for wireless LAN cards using the PLX9052 PCI bridge"); MODULE_LICENSE("Dual MPL/GPL"); static int __init orinoco_plx_init(void) { printk(KERN_DEBUG "%s\n", version); return pci_register_driver(&orinoco_plx_driver); } static void __exit orinoco_plx_exit(void) { pci_unregister_driver(&orinoco_plx_driver); } module_init(orinoco_plx_init); module_exit(orinoco_plx_exit); /* * Local variables: * c-indent-level: 8 * c-basic-offset: 8 * tab-width: 8 * End: */
gpl-2.0
major91/Zeta_Chromium-L
arch/arm/mach-msm/nohlt.c
2710
1124
/* Copyright (c) 2009, 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * MSM architecture driver to control arm halt behavior */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/fs.h> #include <asm/system.h> static int set_nohalt(void *data, u64 val) { if (val) disable_hlt(); else enable_hlt(); return 0; } static int get_nohalt(void *data, u64 *val) { *val = (unsigned int)get_hlt(); return 0; } DEFINE_SIMPLE_ATTRIBUTE(nohalt_ops, get_nohalt, set_nohalt, "%llu\n"); static int __init init_hlt_debug(void) { debugfs_create_file("nohlt", 0600, NULL, NULL, &nohalt_ops); return 0; } late_initcall(init_hlt_debug);
gpl-2.0
skullface1/android_kernel_samsung_i9105
fs/ext4/migrate.c
2710
15931
/* * Copyright IBM Corporation, 2007 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2.1 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #include <linux/module.h> #include <linux/slab.h> #include "ext4_jbd2.h" #include "ext4_extents.h" /* * The contiguous blocks details which can be * represented by a single extent */ struct list_blocks_struct { ext4_lblk_t first_block, last_block; ext4_fsblk_t first_pblock, last_pblock; }; static int finish_range(handle_t *handle, struct inode *inode, struct list_blocks_struct *lb) { int retval = 0, needed; struct ext4_extent newext; struct ext4_ext_path *path; if (lb->first_pblock == 0) return 0; /* Add the extent to temp inode*/ newext.ee_block = cpu_to_le32(lb->first_block); newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1); ext4_ext_store_pblock(&newext, lb->first_pblock); path = ext4_ext_find_extent(inode, lb->first_block, NULL); if (IS_ERR(path)) { retval = PTR_ERR(path); path = NULL; goto err_out; } /* * Calculate the credit needed to inserting this extent * Since we are doing this in loop we may accumalate extra * credit. But below we try to not accumalate too much * of them by restarting the journal. */ needed = ext4_ext_calc_credits_for_single_extent(inode, lb->last_block - lb->first_block + 1, path); /* * Make sure the credit we accumalated is not really high */ if (needed && ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS)) { retval = ext4_journal_restart(handle, needed); if (retval) goto err_out; } else if (needed) { retval = ext4_journal_extend(handle, needed); if (retval) { /* * IF not able to extend the journal restart the journal */ retval = ext4_journal_restart(handle, needed); if (retval) goto err_out; } } retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0); err_out: if (path) { ext4_ext_drop_refs(path); kfree(path); } lb->first_pblock = 0; return retval; } static int update_extent_range(handle_t *handle, struct inode *inode, ext4_fsblk_t pblock, ext4_lblk_t blk_num, struct list_blocks_struct *lb) { int retval; /* * See if we can add on to the existing range (if it exists) */ if (lb->first_pblock && (lb->last_pblock+1 == pblock) && (lb->last_block+1 == blk_num)) { lb->last_pblock = pblock; lb->last_block = blk_num; return 0; } /* * Start a new range. */ retval = finish_range(handle, inode, lb); lb->first_pblock = lb->last_pblock = pblock; lb->first_block = lb->last_block = blk_num; return retval; } static int update_ind_extent_range(handle_t *handle, struct inode *inode, ext4_fsblk_t pblock, ext4_lblk_t *blk_nump, struct list_blocks_struct *lb) { struct buffer_head *bh; __le32 *i_data; int i, retval = 0; ext4_lblk_t blk_count = *blk_nump; unsigned long max_entries = inode->i_sb->s_blocksize >> 2; if (!pblock) { /* Only update the file block number */ *blk_nump += max_entries; return 0; } bh = sb_bread(inode->i_sb, pblock); if (!bh) return -EIO; i_data = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++, blk_count++) { if (i_data[i]) { retval = update_extent_range(handle, inode, le32_to_cpu(i_data[i]), blk_count, lb); if (retval) break; } } /* Update the file block number */ *blk_nump = blk_count; put_bh(bh); return retval; } static int update_dind_extent_range(handle_t *handle, struct inode *inode, ext4_fsblk_t pblock, ext4_lblk_t *blk_nump, struct list_blocks_struct *lb) { struct buffer_head *bh; __le32 *i_data; int i, retval = 0; ext4_lblk_t blk_count = *blk_nump; unsigned long max_entries = inode->i_sb->s_blocksize >> 2; if (!pblock) { /* Only update the file block number */ *blk_nump += max_entries * max_entries; return 0; } bh = sb_bread(inode->i_sb, pblock); if (!bh) return -EIO; i_data = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++) { if (i_data[i]) { retval = update_ind_extent_range(handle, inode, le32_to_cpu(i_data[i]), &blk_count, lb); if (retval) break; } else { /* Only update the file block number */ blk_count += max_entries; } } /* Update the file block number */ *blk_nump = blk_count; put_bh(bh); return retval; } static int update_tind_extent_range(handle_t *handle, struct inode *inode, ext4_fsblk_t pblock, ext4_lblk_t *blk_nump, struct list_blocks_struct *lb) { struct buffer_head *bh; __le32 *i_data; int i, retval = 0; ext4_lblk_t blk_count = *blk_nump; unsigned long max_entries = inode->i_sb->s_blocksize >> 2; if (!pblock) { /* Only update the file block number */ *blk_nump += max_entries * max_entries * max_entries; return 0; } bh = sb_bread(inode->i_sb, pblock); if (!bh) return -EIO; i_data = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++) { if (i_data[i]) { retval = update_dind_extent_range(handle, inode, le32_to_cpu(i_data[i]), &blk_count, lb); if (retval) break; } else /* Only update the file block number */ blk_count += max_entries * max_entries; } /* Update the file block number */ *blk_nump = blk_count; put_bh(bh); return retval; } static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode) { int retval = 0, needed; if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) return 0; /* * We are freeing a blocks. During this we touch * superblock, group descriptor and block bitmap. * So allocate a credit of 3. We may update * quota (user and group). */ needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); if (ext4_journal_extend(handle, needed) != 0) retval = ext4_journal_restart(handle, needed); return retval; } static int free_dind_blocks(handle_t *handle, struct inode *inode, __le32 i_data) { int i; __le32 *tmp_idata; struct buffer_head *bh; unsigned long max_entries = inode->i_sb->s_blocksize >> 2; bh = sb_bread(inode->i_sb, le32_to_cpu(i_data)); if (!bh) return -EIO; tmp_idata = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++) { if (tmp_idata[i]) { extend_credit_for_blkdel(handle, inode); ext4_free_blocks(handle, inode, NULL, le32_to_cpu(tmp_idata[i]), 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); } } put_bh(bh); extend_credit_for_blkdel(handle, inode); ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); return 0; } static int free_tind_blocks(handle_t *handle, struct inode *inode, __le32 i_data) { int i, retval = 0; __le32 *tmp_idata; struct buffer_head *bh; unsigned long max_entries = inode->i_sb->s_blocksize >> 2; bh = sb_bread(inode->i_sb, le32_to_cpu(i_data)); if (!bh) return -EIO; tmp_idata = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++) { if (tmp_idata[i]) { retval = free_dind_blocks(handle, inode, tmp_idata[i]); if (retval) { put_bh(bh); return retval; } } } put_bh(bh); extend_credit_for_blkdel(handle, inode); ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); return 0; } static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data) { int retval; /* ei->i_data[EXT4_IND_BLOCK] */ if (i_data[0]) { extend_credit_for_blkdel(handle, inode); ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data[0]), 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); } /* ei->i_data[EXT4_DIND_BLOCK] */ if (i_data[1]) { retval = free_dind_blocks(handle, inode, i_data[1]); if (retval) return retval; } /* ei->i_data[EXT4_TIND_BLOCK] */ if (i_data[2]) { retval = free_tind_blocks(handle, inode, i_data[2]); if (retval) return retval; } return 0; } static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, struct inode *tmp_inode) { int retval; __le32 i_data[3]; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode); /* * One credit accounted for writing the * i_data field of the original inode */ retval = ext4_journal_extend(handle, 1); if (retval) { retval = ext4_journal_restart(handle, 1); if (retval) goto err_out; } i_data[0] = ei->i_data[EXT4_IND_BLOCK]; i_data[1] = ei->i_data[EXT4_DIND_BLOCK]; i_data[2] = ei->i_data[EXT4_TIND_BLOCK]; down_write(&EXT4_I(inode)->i_data_sem); /* * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation * happened after we started the migrate. We need to * fail the migrate */ if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) { retval = -EAGAIN; up_write(&EXT4_I(inode)->i_data_sem); goto err_out; } else ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); /* * We have the extent map build with the tmp inode. * Now copy the i_data across */ ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS); memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data)); /* * Update i_blocks with the new blocks that got * allocated while adding extents for extent index * blocks. * * While converting to extents we need not * update the orignal inode i_blocks for extent blocks * via quota APIs. The quota update happened via tmp_inode already. */ spin_lock(&inode->i_lock); inode->i_blocks += tmp_inode->i_blocks; spin_unlock(&inode->i_lock); up_write(&EXT4_I(inode)->i_data_sem); /* * We mark the inode dirty after, because we decrement the * i_blocks when freeing the indirect meta-data blocks */ retval = free_ind_block(handle, inode, i_data); ext4_mark_inode_dirty(handle, inode); err_out: return retval; } static int free_ext_idx(handle_t *handle, struct inode *inode, struct ext4_extent_idx *ix) { int i, retval = 0; ext4_fsblk_t block; struct buffer_head *bh; struct ext4_extent_header *eh; block = ext4_idx_pblock(ix); bh = sb_bread(inode->i_sb, block); if (!bh) return -EIO; eh = (struct ext4_extent_header *)bh->b_data; if (eh->eh_depth != 0) { ix = EXT_FIRST_INDEX(eh); for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) { retval = free_ext_idx(handle, inode, ix); if (retval) break; } } put_bh(bh); extend_credit_for_blkdel(handle, inode); ext4_free_blocks(handle, inode, NULL, block, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); return retval; } /* * Free the extent meta data blocks only */ static int free_ext_block(handle_t *handle, struct inode *inode) { int i, retval = 0; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data; struct ext4_extent_idx *ix; if (eh->eh_depth == 0) /* * No extra blocks allocated for extent meta data */ return 0; ix = EXT_FIRST_INDEX(eh); for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) { retval = free_ext_idx(handle, inode, ix); if (retval) return retval; } return retval; } int ext4_ext_migrate(struct inode *inode) { handle_t *handle; int retval = 0, i; __le32 *i_data; ext4_lblk_t blk_count = 0; struct ext4_inode_info *ei; struct inode *tmp_inode = NULL; struct list_blocks_struct lb; unsigned long max_entries; __u32 goal; /* * If the filesystem does not support extents, or the inode * already is extent-based, error out. */ if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_INCOMPAT_EXTENTS) || (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return -EINVAL; if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0) /* * don't migrate fast symlink */ return retval; handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + 1); if (IS_ERR(handle)) { retval = PTR_ERR(handle); return retval; } goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) * EXT4_INODES_PER_GROUP(inode->i_sb)) + 1; tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode, S_IFREG, NULL, goal); if (IS_ERR(tmp_inode)) { retval = -ENOMEM; ext4_journal_stop(handle); return retval; } i_size_write(tmp_inode, i_size_read(inode)); /* * Set the i_nlink to zero so it will be deleted later * when we drop inode reference. */ tmp_inode->i_nlink = 0; ext4_ext_tree_init(handle, tmp_inode); ext4_orphan_add(handle, tmp_inode); ext4_journal_stop(handle); /* * start with one credit accounted for * superblock modification. * * For the tmp_inode we already have committed the * trascation that created the inode. Later as and * when we add extents we extent the journal */ /* * Even though we take i_mutex we can still cause block * allocation via mmap write to holes. If we have allocated * new blocks we fail migrate. New block allocation will * clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated * with i_data_sem held to prevent racing with block * allocation. */ down_read((&EXT4_I(inode)->i_data_sem)); ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE); up_read((&EXT4_I(inode)->i_data_sem)); handle = ext4_journal_start(inode, 1); if (IS_ERR(handle)) { /* * It is impossible to update on-disk structures without * a handle, so just rollback in-core changes and live other * work to orphan_list_cleanup() */ ext4_orphan_del(NULL, tmp_inode); retval = PTR_ERR(handle); goto out; } ei = EXT4_I(inode); i_data = ei->i_data; memset(&lb, 0, sizeof(lb)); /* 32 bit block address 4 bytes */ max_entries = inode->i_sb->s_blocksize >> 2; for (i = 0; i < EXT4_NDIR_BLOCKS; i++, blk_count++) { if (i_data[i]) { retval = update_extent_range(handle, tmp_inode, le32_to_cpu(i_data[i]), blk_count, &lb); if (retval) goto err_out; } } if (i_data[EXT4_IND_BLOCK]) { retval = update_ind_extent_range(handle, tmp_inode, le32_to_cpu(i_data[EXT4_IND_BLOCK]), &blk_count, &lb); if (retval) goto err_out; } else blk_count += max_entries; if (i_data[EXT4_DIND_BLOCK]) { retval = update_dind_extent_range(handle, tmp_inode, le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &blk_count, &lb); if (retval) goto err_out; } else blk_count += max_entries * max_entries; if (i_data[EXT4_TIND_BLOCK]) { retval = update_tind_extent_range(handle, tmp_inode, le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &blk_count, &lb); if (retval) goto err_out; } /* * Build the last extent */ retval = finish_range(handle, tmp_inode, &lb); err_out: if (retval) /* * Failure case delete the extent information with the * tmp_inode */ free_ext_block(handle, tmp_inode); else { retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode); if (retval) /* * if we fail to swap inode data free the extent * details of the tmp inode */ free_ext_block(handle, tmp_inode); } /* We mark the tmp_inode dirty via ext4_ext_tree_init. */ if (ext4_journal_extend(handle, 1) != 0) ext4_journal_restart(handle, 1); /* * Mark the tmp_inode as of size zero */ i_size_write(tmp_inode, 0); /* * set the i_blocks count to zero * so that the ext4_delete_inode does the * right job * * We don't need to take the i_lock because * the inode is not visible to user space. */ tmp_inode->i_blocks = 0; /* Reset the extent details */ ext4_ext_tree_init(handle, tmp_inode); ext4_journal_stop(handle); out: unlock_new_inode(tmp_inode); iput(tmp_inode); return retval; }
gpl-2.0
huhuikevin/kernel_imx
drivers/acpi/acpica/exdump.c
3222
29737
/****************************************************************************** * * Module Name: exdump - Interpreter debug output routines * *****************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "amlcode.h" #include "acnamesp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exdump") /* * The following routines are used for debug output only */ #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) /* Local prototypes */ static void acpi_ex_out_string(char *title, char *value); static void acpi_ex_out_pointer(char *title, void *value); static void acpi_ex_dump_object(union acpi_operand_object *obj_desc, struct acpi_exdump_info *info); static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc); static void acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc, u32 level, u32 index); /******************************************************************************* * * Object Descriptor info tables * * Note: The first table entry must be an INIT opcode and must contain * the table length (number of table entries) * ******************************************************************************/ static struct acpi_exdump_info acpi_ex_dump_integer[2] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_integer), NULL}, {ACPI_EXD_UINT64, ACPI_EXD_OFFSET(integer.value), "Value"} }; static struct acpi_exdump_info acpi_ex_dump_string[4] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_string), NULL}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(string.length), "Length"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(string.pointer), "Pointer"}, {ACPI_EXD_STRING, 0, NULL} }; static struct acpi_exdump_info acpi_ex_dump_buffer[5] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.node), "Parent Node"}, {ACPI_EXD_BUFFER, 0, NULL} }; static struct acpi_exdump_info acpi_ex_dump_package[5] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_package), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(package.flags), "Flags"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Elements"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(package.elements), "Element List"}, {ACPI_EXD_PACKAGE, 0, NULL} }; static struct acpi_exdump_info acpi_ex_dump_device[4] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_device), NULL}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.handler), "Handler"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.system_notify), "System Notify"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.device_notify), "Device Notify"} }; static struct acpi_exdump_info acpi_ex_dump_event[2] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_event), NULL}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.os_semaphore), "OsSemaphore"} }; static struct acpi_exdump_info acpi_ex_dump_method[9] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.info_flags), "Info Flags"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "Parameter Count"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.mutex), "Mutex"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.thread_count), "Thread Count"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(method.aml_length), "Aml Length"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.aml_start), "Aml Start"} }; static struct acpi_exdump_info acpi_ex_dump_mutex[5] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"}, {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth), "Acquire Depth"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"} }; static struct acpi_exdump_info acpi_ex_dump_region[7] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.space_id), "Space Id"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.flags), "Flags"}, {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(region.address), "Address"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(region.length), "Length"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.handler), "Handler"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.next), "Next"} }; static struct acpi_exdump_info acpi_ex_dump_power[5] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_power), NULL}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.system_level), "System Level"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.resource_order), "Resource Order"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.system_notify), "System Notify"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.device_notify), "Device Notify"} }; static struct acpi_exdump_info acpi_ex_dump_processor[7] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_processor), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.length), "Length"}, {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify), "System Notify"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.device_notify), "Device Notify"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.handler), "Handler"} }; static struct acpi_exdump_info acpi_ex_dump_thermal[4] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_thermal), NULL}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.system_notify), "System Notify"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.device_notify), "Device Notify"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.handler), "Handler"} }; static struct acpi_exdump_info acpi_ex_dump_buffer_field[3] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer_field), NULL}, {ACPI_EXD_FIELD, 0, NULL}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer_field.buffer_obj), "Buffer Object"} }; static struct acpi_exdump_info acpi_ex_dump_region_field[3] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL}, {ACPI_EXD_FIELD, 0, NULL}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"} }; static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_bank_field), NULL}, {ACPI_EXD_FIELD, 0, NULL}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(bank_field.value), "Value"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(bank_field.region_obj), "Region Object"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(bank_field.bank_obj), "Bank Object"} }; static struct acpi_exdump_info acpi_ex_dump_index_field[5] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_bank_field), NULL}, {ACPI_EXD_FIELD, 0, NULL}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(index_field.value), "Value"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.index_obj), "Index Object"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.data_obj), "Data Object"} }; static struct acpi_exdump_info acpi_ex_dump_reference[8] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_reference), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.class), "Class"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.target_type), "Target Type"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(reference.value), "Value"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.object), "Object Desc"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.node), "Node"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.where), "Where"}, {ACPI_EXD_REFERENCE, 0, NULL} }; static struct acpi_exdump_info acpi_ex_dump_address_handler[6] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_address_handler), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(address_space.space_id), "Space Id"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.next), "Next"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.region_list), "Region List"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.node), "Node"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.context), "Context"} }; static struct acpi_exdump_info acpi_ex_dump_notify[3] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_notify), NULL}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.node), "Node"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"} }; /* Miscellaneous tables */ static struct acpi_exdump_info acpi_ex_dump_common[4] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_common), NULL}, {ACPI_EXD_TYPE, 0, NULL}, {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(common.reference_count), "Reference Count"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common.flags), "Flags"} }; static struct acpi_exdump_info acpi_ex_dump_field_common[7] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_field_common), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.field_flags), "Field Flags"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.access_byte_width), "Access Byte Width"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(common_field.bit_length), "Bit Length"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.start_field_bit_offset), "Field Bit Offset"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(common_field.base_byte_offset), "Base Byte Offset"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(common_field.node), "Parent Node"} }; static struct acpi_exdump_info acpi_ex_dump_node[5] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_node), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(flags), "Flags"}, {ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"}, {ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(child), "Child List"}, {ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(peer), "Next Peer"} }; /* Dispatch table, indexed by object type */ static struct acpi_exdump_info *acpi_ex_dump_info[] = { NULL, acpi_ex_dump_integer, acpi_ex_dump_string, acpi_ex_dump_buffer, acpi_ex_dump_package, NULL, acpi_ex_dump_device, acpi_ex_dump_event, acpi_ex_dump_method, acpi_ex_dump_mutex, acpi_ex_dump_region, acpi_ex_dump_power, acpi_ex_dump_processor, acpi_ex_dump_thermal, acpi_ex_dump_buffer_field, NULL, NULL, acpi_ex_dump_region_field, acpi_ex_dump_bank_field, acpi_ex_dump_index_field, acpi_ex_dump_reference, NULL, NULL, acpi_ex_dump_notify, acpi_ex_dump_address_handler, NULL, NULL, NULL }; /******************************************************************************* * * FUNCTION: acpi_ex_dump_object * * PARAMETERS: obj_desc - Descriptor to dump * Info - Info table corresponding to this object * type * * RETURN: None * * DESCRIPTION: Walk the info table for this object * ******************************************************************************/ static void acpi_ex_dump_object(union acpi_operand_object *obj_desc, struct acpi_exdump_info *info) { u8 *target; char *name; u8 count; if (!info) { acpi_os_printf ("ExDumpObject: Display not implemented for object type %s\n", acpi_ut_get_object_type_name(obj_desc)); return; } /* First table entry must contain the table length (# of table entries) */ count = info->offset; while (count) { target = ACPI_ADD_PTR(u8, obj_desc, info->offset); name = info->name; switch (info->opcode) { case ACPI_EXD_INIT: break; case ACPI_EXD_TYPE: acpi_ex_out_string("Type", acpi_ut_get_object_type_name (obj_desc)); break; case ACPI_EXD_UINT8: acpi_os_printf("%20s : %2.2X\n", name, *target); break; case ACPI_EXD_UINT16: acpi_os_printf("%20s : %4.4X\n", name, ACPI_GET16(target)); break; case ACPI_EXD_UINT32: acpi_os_printf("%20s : %8.8X\n", name, ACPI_GET32(target)); break; case ACPI_EXD_UINT64: acpi_os_printf("%20s : %8.8X%8.8X\n", "Value", ACPI_FORMAT_UINT64(ACPI_GET64(target))); break; case ACPI_EXD_POINTER: case ACPI_EXD_ADDRESS: acpi_ex_out_pointer(name, *ACPI_CAST_PTR(void *, target)); break; case ACPI_EXD_STRING: acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX); acpi_os_printf("\n"); break; case ACPI_EXD_BUFFER: ACPI_DUMP_BUFFER(obj_desc->buffer.pointer, obj_desc->buffer.length); break; case ACPI_EXD_PACKAGE: /* Dump the package contents */ acpi_os_printf("\nPackage Contents:\n"); acpi_ex_dump_package_obj(obj_desc, 0, 0); break; case ACPI_EXD_FIELD: acpi_ex_dump_object(obj_desc, acpi_ex_dump_field_common); break; case ACPI_EXD_REFERENCE: acpi_ex_out_string("Class Name", ACPI_CAST_PTR(char, acpi_ut_get_reference_name (obj_desc))); acpi_ex_dump_reference_obj(obj_desc); break; default: acpi_os_printf("**** Invalid table opcode [%X] ****\n", info->opcode); return; } info++; count--; } } /******************************************************************************* * * FUNCTION: acpi_ex_dump_operand * * PARAMETERS: *obj_desc - Pointer to entry to be dumped * Depth - Current nesting depth * * RETURN: None * * DESCRIPTION: Dump an operand object * ******************************************************************************/ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth) { u32 length; u32 index; ACPI_FUNCTION_NAME(ex_dump_operand) if (!((ACPI_LV_EXEC & acpi_dbg_level) && (_COMPONENT & acpi_dbg_layer))) { return; } if (!obj_desc) { /* This could be a null element of a package */ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Null Object Descriptor\n")); return; } if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p Namespace Node: ", obj_desc)); ACPI_DUMP_ENTRY(obj_desc, ACPI_LV_EXEC); return; } if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p is not a node or operand object: [%s]\n", obj_desc, acpi_ut_get_descriptor_name(obj_desc))); ACPI_DUMP_BUFFER(obj_desc, sizeof(union acpi_operand_object)); return; } /* obj_desc is a valid object */ if (depth > 0) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%*s[%u] %p ", depth, " ", depth, obj_desc)); } else { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p ", obj_desc)); } /* Decode object type */ switch (obj_desc->common.type) { case ACPI_TYPE_LOCAL_REFERENCE: acpi_os_printf("Reference: [%s] ", acpi_ut_get_reference_name(obj_desc)); switch (obj_desc->reference.class) { case ACPI_REFCLASS_DEBUG: acpi_os_printf("\n"); break; case ACPI_REFCLASS_INDEX: acpi_os_printf("%p\n", obj_desc->reference.object); break; case ACPI_REFCLASS_TABLE: acpi_os_printf("Table Index %X\n", obj_desc->reference.value); break; case ACPI_REFCLASS_REFOF: acpi_os_printf("%p [%s]\n", obj_desc->reference.object, acpi_ut_get_type_name(((union acpi_operand_object *) obj_desc-> reference. object)->common. type)); break; case ACPI_REFCLASS_NAME: acpi_os_printf("- [%4.4s]\n", obj_desc->reference.node->name.ascii); break; case ACPI_REFCLASS_ARG: case ACPI_REFCLASS_LOCAL: acpi_os_printf("%X\n", obj_desc->reference.value); break; default: /* Unknown reference class */ acpi_os_printf("%2.2X\n", obj_desc->reference.class); break; } break; case ACPI_TYPE_BUFFER: acpi_os_printf("Buffer length %.2X @ %p\n", obj_desc->buffer.length, obj_desc->buffer.pointer); /* Debug only -- dump the buffer contents */ if (obj_desc->buffer.pointer) { length = obj_desc->buffer.length; if (length > 128) { length = 128; } acpi_os_printf ("Buffer Contents: (displaying length 0x%.2X)\n", length); ACPI_DUMP_BUFFER(obj_desc->buffer.pointer, length); } break; case ACPI_TYPE_INTEGER: acpi_os_printf("Integer %8.8X%8.8X\n", ACPI_FORMAT_UINT64(obj_desc->integer.value)); break; case ACPI_TYPE_PACKAGE: acpi_os_printf("Package [Len %X] ElementArray %p\n", obj_desc->package.count, obj_desc->package.elements); /* * If elements exist, package element pointer is valid, * and debug_level exceeds 1, dump package's elements. */ if (obj_desc->package.count && obj_desc->package.elements && acpi_dbg_level > 1) { for (index = 0; index < obj_desc->package.count; index++) { acpi_ex_dump_operand(obj_desc->package. elements[index], depth + 1); } } break; case ACPI_TYPE_REGION: acpi_os_printf("Region %s (%X)", acpi_ut_get_region_name(obj_desc->region. space_id), obj_desc->region.space_id); /* * If the address and length have not been evaluated, * don't print them. */ if (!(obj_desc->region.flags & AOPOBJ_DATA_VALID)) { acpi_os_printf("\n"); } else { acpi_os_printf(" base %8.8X%8.8X Length %X\n", ACPI_FORMAT_NATIVE_UINT(obj_desc->region. address), obj_desc->region.length); } break; case ACPI_TYPE_STRING: acpi_os_printf("String length %X @ %p ", obj_desc->string.length, obj_desc->string.pointer); acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX); acpi_os_printf("\n"); break; case ACPI_TYPE_LOCAL_BANK_FIELD: acpi_os_printf("BankField\n"); break; case ACPI_TYPE_LOCAL_REGION_FIELD: acpi_os_printf ("RegionField: Bits=%X AccWidth=%X Lock=%X Update=%X at " "byte=%X bit=%X of below:\n", obj_desc->field.bit_length, obj_desc->field.access_byte_width, obj_desc->field.field_flags & AML_FIELD_LOCK_RULE_MASK, obj_desc->field.field_flags & AML_FIELD_UPDATE_RULE_MASK, obj_desc->field.base_byte_offset, obj_desc->field.start_field_bit_offset); acpi_ex_dump_operand(obj_desc->field.region_obj, depth + 1); break; case ACPI_TYPE_LOCAL_INDEX_FIELD: acpi_os_printf("IndexField\n"); break; case ACPI_TYPE_BUFFER_FIELD: acpi_os_printf("BufferField: %X bits at byte %X bit %X of\n", obj_desc->buffer_field.bit_length, obj_desc->buffer_field.base_byte_offset, obj_desc->buffer_field.start_field_bit_offset); if (!obj_desc->buffer_field.buffer_obj) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "*NULL*\n")); } else if ((obj_desc->buffer_field.buffer_obj)->common.type != ACPI_TYPE_BUFFER) { acpi_os_printf("*not a Buffer*\n"); } else { acpi_ex_dump_operand(obj_desc->buffer_field.buffer_obj, depth + 1); } break; case ACPI_TYPE_EVENT: acpi_os_printf("Event\n"); break; case ACPI_TYPE_METHOD: acpi_os_printf("Method(%X) @ %p:%X\n", obj_desc->method.param_count, obj_desc->method.aml_start, obj_desc->method.aml_length); break; case ACPI_TYPE_MUTEX: acpi_os_printf("Mutex\n"); break; case ACPI_TYPE_DEVICE: acpi_os_printf("Device\n"); break; case ACPI_TYPE_POWER: acpi_os_printf("Power\n"); break; case ACPI_TYPE_PROCESSOR: acpi_os_printf("Processor\n"); break; case ACPI_TYPE_THERMAL: acpi_os_printf("Thermal\n"); break; default: /* Unknown Type */ acpi_os_printf("Unknown Type %X\n", obj_desc->common.type); break; } return; } /******************************************************************************* * * FUNCTION: acpi_ex_dump_operands * * PARAMETERS: Operands - A list of Operand objects * opcode_name - AML opcode name * num_operands - Operand count for this opcode * * DESCRIPTION: Dump the operands associated with the opcode * ******************************************************************************/ void acpi_ex_dump_operands(union acpi_operand_object **operands, const char *opcode_name, u32 num_operands) { ACPI_FUNCTION_NAME(ex_dump_operands); if (!opcode_name) { opcode_name = "UNKNOWN"; } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "**** Start operand dump for opcode [%s], %u operands\n", opcode_name, num_operands)); if (num_operands == 0) { num_operands = 1; } /* Dump the individual operands */ while (num_operands) { acpi_ex_dump_operand(*operands, 0); operands++; num_operands--; } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "**** End operand dump for [%s]\n", opcode_name)); return; } /******************************************************************************* * * FUNCTION: acpi_ex_out* functions * * PARAMETERS: Title - Descriptive text * Value - Value to be displayed * * DESCRIPTION: Object dump output formatting functions. These functions * reduce the number of format strings required and keeps them * all in one place for easy modification. * ******************************************************************************/ static void acpi_ex_out_string(char *title, char *value) { acpi_os_printf("%20s : %s\n", title, value); } static void acpi_ex_out_pointer(char *title, void *value) { acpi_os_printf("%20s : %p\n", title, value); } /******************************************************************************* * * FUNCTION: acpi_ex_dump_namespace_node * * PARAMETERS: Node - Descriptor to dump * Flags - Force display if TRUE * * DESCRIPTION: Dumps the members of the given.Node * ******************************************************************************/ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags) { ACPI_FUNCTION_ENTRY(); if (!flags) { if (!((ACPI_LV_OBJECTS & acpi_dbg_level) && (_COMPONENT & acpi_dbg_layer))) { return; } } acpi_os_printf("%20s : %4.4s\n", "Name", acpi_ut_get_node_name(node)); acpi_ex_out_string("Type", acpi_ut_get_type_name(node->type)); acpi_ex_out_pointer("Attached Object", acpi_ns_get_attached_object(node)); acpi_ex_out_pointer("Parent", node->parent); acpi_ex_dump_object(ACPI_CAST_PTR(union acpi_operand_object, node), acpi_ex_dump_node); } /******************************************************************************* * * FUNCTION: acpi_ex_dump_reference_obj * * PARAMETERS: Object - Descriptor to dump * * DESCRIPTION: Dumps a reference object * ******************************************************************************/ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc) { struct acpi_buffer ret_buf; acpi_status status; ret_buf.length = ACPI_ALLOCATE_LOCAL_BUFFER; if (obj_desc->reference.class == ACPI_REFCLASS_NAME) { acpi_os_printf(" %p ", obj_desc->reference.node); status = acpi_ns_handle_to_pathname(obj_desc->reference.node, &ret_buf); if (ACPI_FAILURE(status)) { acpi_os_printf(" Could not convert name to pathname\n"); } else { acpi_os_printf("%s\n", (char *)ret_buf.pointer); ACPI_FREE(ret_buf.pointer); } } else if (obj_desc->reference.object) { if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_OPERAND) { acpi_os_printf(" Target: %p", obj_desc->reference.object); if (obj_desc->reference.class == ACPI_REFCLASS_TABLE) { acpi_os_printf(" Table Index: %X\n", obj_desc->reference.value); } else { acpi_os_printf(" Target: %p [%s]\n", obj_desc->reference.object, acpi_ut_get_type_name(((union acpi_operand_object *) obj_desc-> reference. object)-> common. type)); } } else { acpi_os_printf(" Target: %p\n", obj_desc->reference.object); } } } /******************************************************************************* * * FUNCTION: acpi_ex_dump_package_obj * * PARAMETERS: obj_desc - Descriptor to dump * Level - Indentation Level * Index - Package index for this object * * DESCRIPTION: Dumps the elements of the package * ******************************************************************************/ static void acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc, u32 level, u32 index) { u32 i; /* Indentation and index output */ if (level > 0) { for (i = 0; i < level; i++) { acpi_os_printf(" "); } acpi_os_printf("[%.2d] ", index); } acpi_os_printf("%p ", obj_desc); /* Null package elements are allowed */ if (!obj_desc) { acpi_os_printf("[Null Object]\n"); return; } /* Packages may only contain a few object types */ switch (obj_desc->common.type) { case ACPI_TYPE_INTEGER: acpi_os_printf("[Integer] = %8.8X%8.8X\n", ACPI_FORMAT_UINT64(obj_desc->integer.value)); break; case ACPI_TYPE_STRING: acpi_os_printf("[String] Value: "); for (i = 0; i < obj_desc->string.length; i++) { acpi_os_printf("%c", obj_desc->string.pointer[i]); } acpi_os_printf("\n"); break; case ACPI_TYPE_BUFFER: acpi_os_printf("[Buffer] Length %.2X = ", obj_desc->buffer.length); if (obj_desc->buffer.length) { acpi_ut_dump_buffer(ACPI_CAST_PTR (u8, obj_desc->buffer.pointer), obj_desc->buffer.length, DB_DWORD_DISPLAY, _COMPONENT); } else { acpi_os_printf("\n"); } break; case ACPI_TYPE_PACKAGE: acpi_os_printf("[Package] Contains %u Elements:\n", obj_desc->package.count); for (i = 0; i < obj_desc->package.count; i++) { acpi_ex_dump_package_obj(obj_desc->package.elements[i], level + 1, i); } break; case ACPI_TYPE_LOCAL_REFERENCE: acpi_os_printf("[Object Reference] Type [%s] %2.2X", acpi_ut_get_reference_name(obj_desc), obj_desc->reference.class); acpi_ex_dump_reference_obj(obj_desc); break; default: acpi_os_printf("[Unknown Type] %X\n", obj_desc->common.type); break; } } /******************************************************************************* * * FUNCTION: acpi_ex_dump_object_descriptor * * PARAMETERS: obj_desc - Descriptor to dump * Flags - Force display if TRUE * * DESCRIPTION: Dumps the members of the object descriptor given. * ******************************************************************************/ void acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags) { ACPI_FUNCTION_TRACE(ex_dump_object_descriptor); if (!obj_desc) { return_VOID; } if (!flags) { if (!((ACPI_LV_OBJECTS & acpi_dbg_level) && (_COMPONENT & acpi_dbg_layer))) { return_VOID; } } if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) { acpi_ex_dump_namespace_node((struct acpi_namespace_node *) obj_desc, flags); acpi_os_printf("\nAttached Object (%p):\n", ((struct acpi_namespace_node *)obj_desc)-> object); acpi_ex_dump_object_descriptor(((struct acpi_namespace_node *) obj_desc)->object, flags); return_VOID; } if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) { acpi_os_printf ("ExDumpObjectDescriptor: %p is not an ACPI operand object: [%s]\n", obj_desc, acpi_ut_get_descriptor_name(obj_desc)); return_VOID; } if (obj_desc->common.type > ACPI_TYPE_NS_NODE_MAX) { return_VOID; } /* Common Fields */ acpi_ex_dump_object(obj_desc, acpi_ex_dump_common); /* Object-specific fields */ acpi_ex_dump_object(obj_desc, acpi_ex_dump_info[obj_desc->common.type]); return_VOID; } #endif
gpl-2.0
ajs-sun/linux
net/x25/x25_proc.c
4502
6034
/* * X.25 Packet Layer release 002 * * This is ALPHA test software. This code may break your machine, * randomly fail to work with new releases, misbehave and/or generally * screw up. It might even work. * * This code REQUIRES 2.4 with seq_file support * * This module: * This module is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * History * 2002/10/06 Arnaldo Carvalho de Melo seq_file support */ #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/export.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/x25.h> #ifdef CONFIG_PROC_FS static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos) __acquires(x25_route_list_lock) { read_lock_bh(&x25_route_list_lock); return seq_list_start_head(&x25_route_list, *pos); } static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &x25_route_list, pos); } static void x25_seq_route_stop(struct seq_file *seq, void *v) __releases(x25_route_list_lock) { read_unlock_bh(&x25_route_list_lock); } static int x25_seq_route_show(struct seq_file *seq, void *v) { struct x25_route *rt = list_entry(v, struct x25_route, node); if (v == &x25_route_list) { seq_puts(seq, "Address Digits Device\n"); goto out; } rt = v; seq_printf(seq, "%-15s %-6d %-5s\n", rt->address.x25_addr, rt->sigdigits, rt->dev ? rt->dev->name : "???"); out: return 0; } static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos) __acquires(x25_list_lock) { read_lock_bh(&x25_list_lock); return seq_hlist_start_head(&x25_list, *pos); } static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &x25_list, pos); } static void x25_seq_socket_stop(struct seq_file *seq, void *v) __releases(x25_list_lock) { read_unlock_bh(&x25_list_lock); } static int x25_seq_socket_show(struct seq_file *seq, void *v) { struct sock *s; struct x25_sock *x25; struct net_device *dev; const char *devname; if (v == SEQ_START_TOKEN) { seq_printf(seq, "dest_addr src_addr dev lci st vs vr " "va t t2 t21 t22 t23 Snd-Q Rcv-Q inode\n"); goto out; } s = sk_entry(v); x25 = x25_sk(s); if (!x25->neighbour || (dev = x25->neighbour->dev) == NULL) devname = "???"; else devname = x25->neighbour->dev->name; seq_printf(seq, "%-10s %-10s %-5s %3.3X %d %d %d %d %3lu %3lu " "%3lu %3lu %3lu %5d %5d %ld\n", !x25->dest_addr.x25_addr[0] ? "*" : x25->dest_addr.x25_addr, !x25->source_addr.x25_addr[0] ? "*" : x25->source_addr.x25_addr, devname, x25->lci & 0x0FFF, x25->state, x25->vs, x25->vr, x25->va, x25_display_timer(s) / HZ, x25->t2 / HZ, x25->t21 / HZ, x25->t22 / HZ, x25->t23 / HZ, sk_wmem_alloc_get(s), sk_rmem_alloc_get(s), s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); out: return 0; } static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos) __acquires(x25_forward_list_lock) { read_lock_bh(&x25_forward_list_lock); return seq_list_start_head(&x25_forward_list, *pos); } static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &x25_forward_list, pos); } static void x25_seq_forward_stop(struct seq_file *seq, void *v) __releases(x25_forward_list_lock) { read_unlock_bh(&x25_forward_list_lock); } static int x25_seq_forward_show(struct seq_file *seq, void *v) { struct x25_forward *f = list_entry(v, struct x25_forward, node); if (v == &x25_forward_list) { seq_printf(seq, "lci dev1 dev2\n"); goto out; } f = v; seq_printf(seq, "%d %-10s %-10s\n", f->lci, f->dev1->name, f->dev2->name); out: return 0; } static const struct seq_operations x25_seq_route_ops = { .start = x25_seq_route_start, .next = x25_seq_route_next, .stop = x25_seq_route_stop, .show = x25_seq_route_show, }; static const struct seq_operations x25_seq_socket_ops = { .start = x25_seq_socket_start, .next = x25_seq_socket_next, .stop = x25_seq_socket_stop, .show = x25_seq_socket_show, }; static const struct seq_operations x25_seq_forward_ops = { .start = x25_seq_forward_start, .next = x25_seq_forward_next, .stop = x25_seq_forward_stop, .show = x25_seq_forward_show, }; static int x25_seq_socket_open(struct inode *inode, struct file *file) { return seq_open(file, &x25_seq_socket_ops); } static int x25_seq_route_open(struct inode *inode, struct file *file) { return seq_open(file, &x25_seq_route_ops); } static int x25_seq_forward_open(struct inode *inode, struct file *file) { return seq_open(file, &x25_seq_forward_ops); } static const struct file_operations x25_seq_socket_fops = { .open = x25_seq_socket_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations x25_seq_route_fops = { .open = x25_seq_route_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations x25_seq_forward_fops = { .open = x25_seq_forward_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; int __init x25_proc_init(void) { if (!proc_mkdir("x25", init_net.proc_net)) return -ENOMEM; if (!proc_create("x25/route", S_IRUGO, init_net.proc_net, &x25_seq_route_fops)) goto out; if (!proc_create("x25/socket", S_IRUGO, init_net.proc_net, &x25_seq_socket_fops)) goto out; if (!proc_create("x25/forward", S_IRUGO, init_net.proc_net, &x25_seq_forward_fops)) goto out; return 0; out: remove_proc_subtree("x25", init_net.proc_net); return -ENOMEM; } void __exit x25_proc_exit(void) { remove_proc_subtree("x25", init_net.proc_net); } #else /* CONFIG_PROC_FS */ int __init x25_proc_init(void) { return 0; } void __exit x25_proc_exit(void) { } #endif /* CONFIG_PROC_FS */
gpl-2.0
yaymalaga/TaurusPrime_Kernel
drivers/s390/char/ctrlchar.c
8342
1685
/* * drivers/s390/char/ctrlchar.c * Unified handling of special chars. * * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com> * */ #include <linux/stddef.h> #include <asm/errno.h> #include <linux/sysrq.h> #include <linux/ctype.h> #include "ctrlchar.h" #ifdef CONFIG_MAGIC_SYSRQ static int ctrlchar_sysrq_key; static void ctrlchar_handle_sysrq(struct work_struct *work) { handle_sysrq(ctrlchar_sysrq_key); } static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq); #endif /** * Check for special chars at start of input. * * @param buf Console input buffer. * @param len Length of valid data in buffer. * @param tty The tty struct for this console. * @return CTRLCHAR_NONE, if nothing matched, * CTRLCHAR_SYSRQ, if sysrq was encountered * otherwise char to be inserted logically or'ed * with CTRLCHAR_CTRL */ unsigned int ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty) { if ((len < 2) || (len > 3)) return CTRLCHAR_NONE; /* hat is 0xb1 in codepage 037 (US etc.) and thus */ /* converted to 0x5e in ascii ('^') */ if ((buf[0] != '^') && (buf[0] != '\252')) return CTRLCHAR_NONE; #ifdef CONFIG_MAGIC_SYSRQ /* racy */ if (len == 3 && buf[1] == '-') { ctrlchar_sysrq_key = buf[2]; schedule_work(&ctrlchar_work); return CTRLCHAR_SYSRQ; } #endif if (len != 2) return CTRLCHAR_NONE; switch (tolower(buf[1])) { case 'c': return INTR_CHAR(tty) | CTRLCHAR_CTRL; case 'd': return EOF_CHAR(tty) | CTRLCHAR_CTRL; case 'z': return SUSP_CHAR(tty) | CTRLCHAR_CTRL; } return CTRLCHAR_NONE; }
gpl-2.0
NoelMacwan/android_kernel_sony_u8500
arch/cris/arch-v32/mach-a3/arbiter.c
9878
17688
/* * Memory arbiter functions. Allocates bandwidth through the * arbiter and sets up arbiter breakpoints. * * The algorithm first assigns slots to the clients that has specified * bandwidth (e.g. ethernet) and then the remaining slots are divided * on all the active clients. * * Copyright (c) 2004-2007 Axis Communications AB. * * The artpec-3 has two arbiters. The memory hierarchy looks like this: * * * CPU DMAs * | | * | | * -------------- ------------------ * | foo arbiter|----| Internal memory| * -------------- ------------------ * | * -------------- * | L2 cache | * -------------- * | * h264 etc | * | | * | | * -------------- * | bar arbiter| * -------------- * | * --------- * | SDRAM | * --------- * */ #include <hwregs/reg_map.h> #include <hwregs/reg_rdwr.h> #include <hwregs/marb_foo_defs.h> #include <hwregs/marb_bar_defs.h> #include <arbiter.h> #include <hwregs/intr_vect.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <asm/io.h> #include <asm/irq_regs.h> #define D(x) struct crisv32_watch_entry { unsigned long instance; watch_callback *cb; unsigned long start; unsigned long end; int used; }; #define NUMBER_OF_BP 4 #define SDRAM_BANDWIDTH 400000000 #define INTMEM_BANDWIDTH 400000000 #define NBR_OF_SLOTS 64 #define NBR_OF_REGIONS 2 #define NBR_OF_CLIENTS 15 #define ARBITERS 2 #define UNASSIGNED 100 struct arbiter { unsigned long instance; int nbr_regions; int nbr_clients; int requested_slots[NBR_OF_REGIONS][NBR_OF_CLIENTS]; int active_clients[NBR_OF_REGIONS][NBR_OF_CLIENTS]; }; static struct crisv32_watch_entry watches[ARBITERS][NUMBER_OF_BP] = { { {regi_marb_foo_bp0}, {regi_marb_foo_bp1}, {regi_marb_foo_bp2}, {regi_marb_foo_bp3} }, { {regi_marb_bar_bp0}, {regi_marb_bar_bp1}, {regi_marb_bar_bp2}, {regi_marb_bar_bp3} } }; struct arbiter arbiters[ARBITERS] = { { /* L2 cache arbiter */ .instance = regi_marb_foo, .nbr_regions = 2, .nbr_clients = 15 }, { /* DDR2 arbiter */ .instance = regi_marb_bar, .nbr_regions = 1, .nbr_clients = 9 } }; static int max_bandwidth[NBR_OF_REGIONS] = {SDRAM_BANDWIDTH, INTMEM_BANDWIDTH}; DEFINE_SPINLOCK(arbiter_lock); static irqreturn_t crisv32_foo_arbiter_irq(int irq, void *dev_id); static irqreturn_t crisv32_bar_arbiter_irq(int irq, void *dev_id); /* * "I'm the arbiter, I know the score. * From square one I'll be watching all 64." * (memory arbiter slots, that is) * * Or in other words: * Program the memory arbiter slots for "region" according to what's * in requested_slots[] and active_clients[], while minimizing * latency. A caller may pass a non-zero positive amount for * "unused_slots", which must then be the unallocated, remaining * number of slots, free to hand out to any client. */ static void crisv32_arbiter_config(int arbiter, int region, int unused_slots) { int slot; int client; int interval = 0; /* * This vector corresponds to the hardware arbiter slots (see * the hardware documentation for semantics). We initialize * each slot with a suitable sentinel value outside the valid * range {0 .. NBR_OF_CLIENTS - 1} and replace them with * client indexes. Then it's fed to the hardware. */ s8 val[NBR_OF_SLOTS]; for (slot = 0; slot < NBR_OF_SLOTS; slot++) val[slot] = -1; for (client = 0; client < arbiters[arbiter].nbr_clients; client++) { int pos; /* Allocate the requested non-zero number of slots, but * also give clients with zero-requests one slot each * while stocks last. We do the latter here, in client * order. This makes sure zero-request clients are the * first to get to any spare slots, else those slots * could, when bandwidth is allocated close to the limit, * all be allocated to low-index non-zero-request clients * in the default-fill loop below. Another positive but * secondary effect is a somewhat better spread of the * zero-bandwidth clients in the vector, avoiding some of * the latency that could otherwise be caused by the * partitioning of non-zero-bandwidth clients at low * indexes and zero-bandwidth clients at high * indexes. (Note that this spreading can only affect the * unallocated bandwidth.) All the above only matters for * memory-intensive situations, of course. */ if (!arbiters[arbiter].requested_slots[region][client]) { /* * Skip inactive clients. Also skip zero-slot * allocations in this pass when there are no known * free slots. */ if (!arbiters[arbiter].active_clients[region][client] || unused_slots <= 0) continue; unused_slots--; /* Only allocate one slot for this client. */ interval = NBR_OF_SLOTS; } else interval = NBR_OF_SLOTS / arbiters[arbiter].requested_slots[region][client]; pos = 0; while (pos < NBR_OF_SLOTS) { if (val[pos] >= 0) pos++; else { val[pos] = client; pos += interval; } } } client = 0; for (slot = 0; slot < NBR_OF_SLOTS; slot++) { /* * Allocate remaining slots in round-robin * client-number order for active clients. For this * pass, we ignore requested bandwidth and previous * allocations. */ if (val[slot] < 0) { int first = client; while (!arbiters[arbiter].active_clients[region][client]) { client = (client + 1) % arbiters[arbiter].nbr_clients; if (client == first) break; } val[slot] = client; client = (client + 1) % arbiters[arbiter].nbr_clients; } if (arbiter == 0) { if (region == EXT_REGION) REG_WR_INT_VECT(marb_foo, regi_marb_foo, rw_l2_slots, slot, val[slot]); else if (region == INT_REGION) REG_WR_INT_VECT(marb_foo, regi_marb_foo, rw_intm_slots, slot, val[slot]); } else { REG_WR_INT_VECT(marb_bar, regi_marb_bar, rw_ddr2_slots, slot, val[slot]); } } } extern char _stext, _etext; static void crisv32_arbiter_init(void) { static int initialized; if (initialized) return; initialized = 1; /* * CPU caches are always set to active, but with zero * bandwidth allocated. It should be ok to allocate zero * bandwidth for the caches, because DMA for other channels * will supposedly finish, once their programmed amount is * done, and then the caches will get access according to the * "fixed scheme" for unclaimed slots. Though, if for some * use-case somewhere, there's a maximum CPU latency for * e.g. some interrupt, we have to start allocating specific * bandwidth for the CPU caches too. */ arbiters[0].active_clients[EXT_REGION][11] = 1; arbiters[0].active_clients[EXT_REGION][12] = 1; crisv32_arbiter_config(0, EXT_REGION, 0); crisv32_arbiter_config(0, INT_REGION, 0); crisv32_arbiter_config(1, EXT_REGION, 0); if (request_irq(MEMARB_FOO_INTR_VECT, crisv32_foo_arbiter_irq, IRQF_DISABLED, "arbiter", NULL)) printk(KERN_ERR "Couldn't allocate arbiter IRQ\n"); if (request_irq(MEMARB_BAR_INTR_VECT, crisv32_bar_arbiter_irq, IRQF_DISABLED, "arbiter", NULL)) printk(KERN_ERR "Couldn't allocate arbiter IRQ\n"); #ifndef CONFIG_ETRAX_KGDB /* Global watch for writes to kernel text segment. */ crisv32_arbiter_watch(virt_to_phys(&_stext), &_etext - &_stext, MARB_CLIENTS(arbiter_all_clients, arbiter_bar_all_clients), arbiter_all_write, NULL); #endif /* Set up max burst sizes by default */ REG_WR_INT(marb_bar, regi_marb_bar, rw_h264_rd_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_h264_wr_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_ccd_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_vin_wr_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_vin_rd_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_sclr_rd_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_vout_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_sclr_fifo_burst, 3); REG_WR_INT(marb_bar, regi_marb_bar, rw_l2cache_burst, 3); } int crisv32_arbiter_allocate_bandwidth(int client, int region, unsigned long bandwidth) { int i; int total_assigned = 0; int total_clients = 0; int req; int arbiter = 0; crisv32_arbiter_init(); if (client & 0xffff0000) { arbiter = 1; client >>= 16; } for (i = 0; i < arbiters[arbiter].nbr_clients; i++) { total_assigned += arbiters[arbiter].requested_slots[region][i]; total_clients += arbiters[arbiter].active_clients[region][i]; } /* Avoid division by 0 for 0-bandwidth requests. */ req = bandwidth == 0 ? 0 : NBR_OF_SLOTS / (max_bandwidth[region] / bandwidth); /* * We make sure that there are enough slots only for non-zero * requests. Requesting 0 bandwidth *may* allocate slots, * though if all bandwidth is allocated, such a client won't * get any and will have to rely on getting memory access * according to the fixed scheme that's the default when one * of the slot-allocated clients doesn't claim their slot. */ if (total_assigned + req > NBR_OF_SLOTS) return -ENOMEM; arbiters[arbiter].active_clients[region][client] = 1; arbiters[arbiter].requested_slots[region][client] = req; crisv32_arbiter_config(arbiter, region, NBR_OF_SLOTS - total_assigned); /* Propagate allocation from foo to bar */ if (arbiter == 0) crisv32_arbiter_allocate_bandwidth(8 << 16, EXT_REGION, bandwidth); return 0; } /* * Main entry for bandwidth deallocation. * * Strictly speaking, for a somewhat constant set of clients where * each client gets a constant bandwidth and is just enabled or * disabled (somewhat dynamically), no action is necessary here to * avoid starvation for non-zero-allocation clients, as the allocated * slots will just be unused. However, handing out those unused slots * to active clients avoids needless latency if the "fixed scheme" * would give unclaimed slots to an eager low-index client. */ void crisv32_arbiter_deallocate_bandwidth(int client, int region) { int i; int total_assigned = 0; int arbiter = 0; if (client & 0xffff0000) arbiter = 1; arbiters[arbiter].requested_slots[region][client] = 0; arbiters[arbiter].active_clients[region][client] = 0; for (i = 0; i < arbiters[arbiter].nbr_clients; i++) total_assigned += arbiters[arbiter].requested_slots[region][i]; crisv32_arbiter_config(arbiter, region, NBR_OF_SLOTS - total_assigned); } int crisv32_arbiter_watch(unsigned long start, unsigned long size, unsigned long clients, unsigned long accesses, watch_callback *cb) { int i; int arbiter; int used[2]; int ret = 0; crisv32_arbiter_init(); if (start > 0x80000000) { printk(KERN_ERR "Arbiter: %lX doesn't look like a " "physical address", start); return -EFAULT; } spin_lock(&arbiter_lock); if (clients & 0xffff) used[0] = 1; if (clients & 0xffff0000) used[1] = 1; for (arbiter = 0; arbiter < ARBITERS; arbiter++) { if (!used[arbiter]) continue; for (i = 0; i < NUMBER_OF_BP; i++) { if (!watches[arbiter][i].used) { unsigned intr_mask; if (arbiter) intr_mask = REG_RD_INT(marb_bar, regi_marb_bar, rw_intr_mask); else intr_mask = REG_RD_INT(marb_foo, regi_marb_foo, rw_intr_mask); watches[arbiter][i].used = 1; watches[arbiter][i].start = start; watches[arbiter][i].end = start + size; watches[arbiter][i].cb = cb; ret |= (i + 1) << (arbiter + 8); if (arbiter) { REG_WR_INT(marb_bar_bp, watches[arbiter][i].instance, rw_first_addr, watches[arbiter][i].start); REG_WR_INT(marb_bar_bp, watches[arbiter][i].instance, rw_last_addr, watches[arbiter][i].end); REG_WR_INT(marb_bar_bp, watches[arbiter][i].instance, rw_op, accesses); REG_WR_INT(marb_bar_bp, watches[arbiter][i].instance, rw_clients, clients & 0xffff); } else { REG_WR_INT(marb_foo_bp, watches[arbiter][i].instance, rw_first_addr, watches[arbiter][i].start); REG_WR_INT(marb_foo_bp, watches[arbiter][i].instance, rw_last_addr, watches[arbiter][i].end); REG_WR_INT(marb_foo_bp, watches[arbiter][i].instance, rw_op, accesses); REG_WR_INT(marb_foo_bp, watches[arbiter][i].instance, rw_clients, clients >> 16); } if (i == 0) intr_mask |= 1; else if (i == 1) intr_mask |= 2; else if (i == 2) intr_mask |= 4; else if (i == 3) intr_mask |= 8; if (arbiter) REG_WR_INT(marb_bar, regi_marb_bar, rw_intr_mask, intr_mask); else REG_WR_INT(marb_foo, regi_marb_foo, rw_intr_mask, intr_mask); spin_unlock(&arbiter_lock); break; } } } spin_unlock(&arbiter_lock); if (ret) return ret; else return -ENOMEM; } int crisv32_arbiter_unwatch(int id) { int arbiter; int intr_mask; crisv32_arbiter_init(); spin_lock(&arbiter_lock); for (arbiter = 0; arbiter < ARBITERS; arbiter++) { int id2; if (arbiter) intr_mask = REG_RD_INT(marb_bar, regi_marb_bar, rw_intr_mask); else intr_mask = REG_RD_INT(marb_foo, regi_marb_foo, rw_intr_mask); id2 = (id & (0xff << (arbiter + 8))) >> (arbiter + 8); if (id2 == 0) continue; id2--; if ((id2 >= NUMBER_OF_BP) || (!watches[arbiter][id2].used)) { spin_unlock(&arbiter_lock); return -EINVAL; } memset(&watches[arbiter][id2], 0, sizeof(struct crisv32_watch_entry)); if (id2 == 0) intr_mask &= ~1; else if (id2 == 1) intr_mask &= ~2; else if (id2 == 2) intr_mask &= ~4; else if (id2 == 3) intr_mask &= ~8; if (arbiter) REG_WR_INT(marb_bar, regi_marb_bar, rw_intr_mask, intr_mask); else REG_WR_INT(marb_foo, regi_marb_foo, rw_intr_mask, intr_mask); } spin_unlock(&arbiter_lock); return 0; } extern void show_registers(struct pt_regs *regs); static irqreturn_t crisv32_foo_arbiter_irq(int irq, void *dev_id) { reg_marb_foo_r_masked_intr masked_intr = REG_RD(marb_foo, regi_marb_foo, r_masked_intr); reg_marb_foo_bp_r_brk_clients r_clients; reg_marb_foo_bp_r_brk_addr r_addr; reg_marb_foo_bp_r_brk_op r_op; reg_marb_foo_bp_r_brk_first_client r_first; reg_marb_foo_bp_r_brk_size r_size; reg_marb_foo_bp_rw_ack ack = {0}; reg_marb_foo_rw_ack_intr ack_intr = { .bp0 = 1, .bp1 = 1, .bp2 = 1, .bp3 = 1 }; struct crisv32_watch_entry *watch; unsigned arbiter = (unsigned)dev_id; masked_intr = REG_RD(marb_foo, regi_marb_foo, r_masked_intr); if (masked_intr.bp0) watch = &watches[arbiter][0]; else if (masked_intr.bp1) watch = &watches[arbiter][1]; else if (masked_intr.bp2) watch = &watches[arbiter][2]; else if (masked_intr.bp3) watch = &watches[arbiter][3]; else return IRQ_NONE; /* Retrieve all useful information and print it. */ r_clients = REG_RD(marb_foo_bp, watch->instance, r_brk_clients); r_addr = REG_RD(marb_foo_bp, watch->instance, r_brk_addr); r_op = REG_RD(marb_foo_bp, watch->instance, r_brk_op); r_first = REG_RD(marb_foo_bp, watch->instance, r_brk_first_client); r_size = REG_RD(marb_foo_bp, watch->instance, r_brk_size); printk(KERN_DEBUG "Arbiter IRQ\n"); printk(KERN_DEBUG "Clients %X addr %X op %X first %X size %X\n", REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_clients, r_clients), REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_addr, r_addr), REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_op, r_op), REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_first_client, r_first), REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_size, r_size)); REG_WR(marb_foo_bp, watch->instance, rw_ack, ack); REG_WR(marb_foo, regi_marb_foo, rw_ack_intr, ack_intr); printk(KERN_DEBUG "IRQ occurred at %X\n", (unsigned)get_irq_regs()); if (watch->cb) watch->cb(); return IRQ_HANDLED; } static irqreturn_t crisv32_bar_arbiter_irq(int irq, void *dev_id) { reg_marb_bar_r_masked_intr masked_intr = REG_RD(marb_bar, regi_marb_bar, r_masked_intr); reg_marb_bar_bp_r_brk_clients r_clients; reg_marb_bar_bp_r_brk_addr r_addr; reg_marb_bar_bp_r_brk_op r_op; reg_marb_bar_bp_r_brk_first_client r_first; reg_marb_bar_bp_r_brk_size r_size; reg_marb_bar_bp_rw_ack ack = {0}; reg_marb_bar_rw_ack_intr ack_intr = { .bp0 = 1, .bp1 = 1, .bp2 = 1, .bp3 = 1 }; struct crisv32_watch_entry *watch; unsigned arbiter = (unsigned)dev_id; masked_intr = REG_RD(marb_bar, regi_marb_bar, r_masked_intr); if (masked_intr.bp0) watch = &watches[arbiter][0]; else if (masked_intr.bp1) watch = &watches[arbiter][1]; else if (masked_intr.bp2) watch = &watches[arbiter][2]; else if (masked_intr.bp3) watch = &watches[arbiter][3]; else return IRQ_NONE; /* Retrieve all useful information and print it. */ r_clients = REG_RD(marb_bar_bp, watch->instance, r_brk_clients); r_addr = REG_RD(marb_bar_bp, watch->instance, r_brk_addr); r_op = REG_RD(marb_bar_bp, watch->instance, r_brk_op); r_first = REG_RD(marb_bar_bp, watch->instance, r_brk_first_client); r_size = REG_RD(marb_bar_bp, watch->instance, r_brk_size); printk(KERN_DEBUG "Arbiter IRQ\n"); printk(KERN_DEBUG "Clients %X addr %X op %X first %X size %X\n", REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_clients, r_clients), REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_addr, r_addr), REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_op, r_op), REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_first_client, r_first), REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_size, r_size)); REG_WR(marb_bar_bp, watch->instance, rw_ack, ack); REG_WR(marb_bar, regi_marb_bar, rw_ack_intr, ack_intr); printk(KERN_DEBUG "IRQ occurred at %X\n", (unsigned)get_irq_regs()->erp); if (watch->cb) watch->cb(); return IRQ_HANDLED; }
gpl-2.0
FEDEVEL/tmp-imx6-tiny-rex-linux
security/security.c
151
36540
/* * Security plug functions * * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com> * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/capability.h> #include <linux/dcache.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/security.h> #include <linux/integrity.h> #include <linux/ima.h> #include <linux/evm.h> #include <linux/fsnotify.h> #include <linux/mman.h> #include <linux/mount.h> #include <linux/personality.h> #include <linux/backing-dev.h> #include <net/flow.h> #define MAX_LSM_EVM_XATTR 2 /* Boot-time LSM user choice */ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] = CONFIG_DEFAULT_SECURITY; static struct security_operations *security_ops; static struct security_operations default_security_ops = { .name = "default", }; static inline int __init verify(struct security_operations *ops) { /* verify the security_operations structure exists */ if (!ops) return -EINVAL; security_fixup_ops(ops); return 0; } static void __init do_security_initcalls(void) { initcall_t *call; call = __security_initcall_start; while (call < __security_initcall_end) { (*call) (); call++; } } /** * security_init - initializes the security framework * * This should be called early in the kernel initialization sequence. */ int __init security_init(void) { printk(KERN_INFO "Security Framework initialized\n"); security_fixup_ops(&default_security_ops); security_ops = &default_security_ops; do_security_initcalls(); return 0; } void reset_security_ops(void) { security_ops = &default_security_ops; } /* Save user chosen LSM */ static int __init choose_lsm(char *str) { strncpy(chosen_lsm, str, SECURITY_NAME_MAX); return 1; } __setup("security=", choose_lsm); /** * security_module_enable - Load given security module on boot ? * @ops: a pointer to the struct security_operations that is to be checked. * * Each LSM must pass this method before registering its own operations * to avoid security registration races. This method may also be used * to check if your LSM is currently loaded during kernel initialization. * * Return true if: * -The passed LSM is the one chosen by user at boot time, * -or the passed LSM is configured as the default and the user did not * choose an alternate LSM at boot time. * Otherwise, return false. */ int __init security_module_enable(struct security_operations *ops) { return !strcmp(ops->name, chosen_lsm); } /** * register_security - registers a security framework with the kernel * @ops: a pointer to the struct security_options that is to be registered * * This function allows a security module to register itself with the * kernel security subsystem. Some rudimentary checking is done on the @ops * value passed to this function. You'll need to check first if your LSM * is allowed to register its @ops by calling security_module_enable(@ops). * * If there is already a security module registered with the kernel, * an error will be returned. Otherwise %0 is returned on success. */ int __init register_security(struct security_operations *ops) { if (verify(ops)) { printk(KERN_DEBUG "%s could not verify " "security_operations structure.\n", __func__); return -EINVAL; } if (security_ops != &default_security_ops) return -EAGAIN; security_ops = ops; return 0; } /* Security operations */ int security_ptrace_access_check(struct task_struct *child, unsigned int mode) { #ifdef CONFIG_SECURITY_YAMA_STACKED int rc; rc = yama_ptrace_access_check(child, mode); if (rc) return rc; #endif return security_ops->ptrace_access_check(child, mode); } int security_ptrace_traceme(struct task_struct *parent) { #ifdef CONFIG_SECURITY_YAMA_STACKED int rc; rc = yama_ptrace_traceme(parent); if (rc) return rc; #endif return security_ops->ptrace_traceme(parent); } int security_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { return security_ops->capget(target, effective, inheritable, permitted); } int security_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted) { return security_ops->capset(new, old, effective, inheritable, permitted); } int security_capable(const struct cred *cred, struct user_namespace *ns, int cap) { return security_ops->capable(cred, ns, cap, SECURITY_CAP_AUDIT); } int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns, int cap) { return security_ops->capable(cred, ns, cap, SECURITY_CAP_NOAUDIT); } int security_quotactl(int cmds, int type, int id, struct super_block *sb) { return security_ops->quotactl(cmds, type, id, sb); } int security_quota_on(struct dentry *dentry) { return security_ops->quota_on(dentry); } int security_syslog(int type) { return security_ops->syslog(type); } int security_settime(const struct timespec *ts, const struct timezone *tz) { return security_ops->settime(ts, tz); } int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) { return security_ops->vm_enough_memory(mm, pages); } int security_bprm_set_creds(struct linux_binprm *bprm) { return security_ops->bprm_set_creds(bprm); } int security_bprm_check(struct linux_binprm *bprm) { int ret; ret = security_ops->bprm_check_security(bprm); if (ret) return ret; return ima_bprm_check(bprm); } void security_bprm_committing_creds(struct linux_binprm *bprm) { security_ops->bprm_committing_creds(bprm); } void security_bprm_committed_creds(struct linux_binprm *bprm) { security_ops->bprm_committed_creds(bprm); } int security_bprm_secureexec(struct linux_binprm *bprm) { return security_ops->bprm_secureexec(bprm); } int security_sb_alloc(struct super_block *sb) { return security_ops->sb_alloc_security(sb); } void security_sb_free(struct super_block *sb) { security_ops->sb_free_security(sb); } int security_sb_copy_data(char *orig, char *copy) { return security_ops->sb_copy_data(orig, copy); } EXPORT_SYMBOL(security_sb_copy_data); int security_sb_remount(struct super_block *sb, void *data) { return security_ops->sb_remount(sb, data); } int security_sb_kern_mount(struct super_block *sb, int flags, void *data) { return security_ops->sb_kern_mount(sb, flags, data); } int security_sb_show_options(struct seq_file *m, struct super_block *sb) { return security_ops->sb_show_options(m, sb); } int security_sb_statfs(struct dentry *dentry) { return security_ops->sb_statfs(dentry); } int security_sb_mount(const char *dev_name, struct path *path, const char *type, unsigned long flags, void *data) { return security_ops->sb_mount(dev_name, path, type, flags, data); } int security_sb_umount(struct vfsmount *mnt, int flags) { return security_ops->sb_umount(mnt, flags); } int security_sb_pivotroot(struct path *old_path, struct path *new_path) { return security_ops->sb_pivotroot(old_path, new_path); } int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts, unsigned long kern_flags, unsigned long *set_kern_flags) { return security_ops->sb_set_mnt_opts(sb, opts, kern_flags, set_kern_flags); } EXPORT_SYMBOL(security_sb_set_mnt_opts); int security_sb_clone_mnt_opts(const struct super_block *oldsb, struct super_block *newsb) { return security_ops->sb_clone_mnt_opts(oldsb, newsb); } EXPORT_SYMBOL(security_sb_clone_mnt_opts); int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts) { return security_ops->sb_parse_opts_str(options, opts); } EXPORT_SYMBOL(security_sb_parse_opts_str); int security_inode_alloc(struct inode *inode) { inode->i_security = NULL; return security_ops->inode_alloc_security(inode); } void security_inode_free(struct inode *inode) { integrity_inode_free(inode); security_ops->inode_free_security(inode); } int security_dentry_init_security(struct dentry *dentry, int mode, struct qstr *name, void **ctx, u32 *ctxlen) { return security_ops->dentry_init_security(dentry, mode, name, ctx, ctxlen); } EXPORT_SYMBOL(security_dentry_init_security); int security_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, const initxattrs initxattrs, void *fs_data) { struct xattr new_xattrs[MAX_LSM_EVM_XATTR + 1]; struct xattr *lsm_xattr, *evm_xattr, *xattr; int ret; if (unlikely(IS_PRIVATE(inode))) return 0; if (!initxattrs) return security_ops->inode_init_security(inode, dir, qstr, NULL, NULL, NULL); memset(new_xattrs, 0, sizeof(new_xattrs)); lsm_xattr = new_xattrs; ret = security_ops->inode_init_security(inode, dir, qstr, &lsm_xattr->name, &lsm_xattr->value, &lsm_xattr->value_len); if (ret) goto out; evm_xattr = lsm_xattr + 1; ret = evm_inode_init_security(inode, lsm_xattr, evm_xattr); if (ret) goto out; ret = initxattrs(inode, new_xattrs, fs_data); out: for (xattr = new_xattrs; xattr->value != NULL; xattr++) kfree(xattr->value); return (ret == -EOPNOTSUPP) ? 0 : ret; } EXPORT_SYMBOL(security_inode_init_security); int security_old_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, const char **name, void **value, size_t *len) { if (unlikely(IS_PRIVATE(inode))) return -EOPNOTSUPP; return security_ops->inode_init_security(inode, dir, qstr, name, value, len); } EXPORT_SYMBOL(security_old_inode_init_security); #ifdef CONFIG_SECURITY_PATH int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev) { if (unlikely(IS_PRIVATE(dir->dentry->d_inode))) return 0; return security_ops->path_mknod(dir, dentry, mode, dev); } EXPORT_SYMBOL(security_path_mknod); int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(dir->dentry->d_inode))) return 0; return security_ops->path_mkdir(dir, dentry, mode); } EXPORT_SYMBOL(security_path_mkdir); int security_path_rmdir(struct path *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(dir->dentry->d_inode))) return 0; return security_ops->path_rmdir(dir, dentry); } int security_path_unlink(struct path *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(dir->dentry->d_inode))) return 0; return security_ops->path_unlink(dir, dentry); } EXPORT_SYMBOL(security_path_unlink); int security_path_symlink(struct path *dir, struct dentry *dentry, const char *old_name) { if (unlikely(IS_PRIVATE(dir->dentry->d_inode))) return 0; return security_ops->path_symlink(dir, dentry, old_name); } int security_path_link(struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry) { if (unlikely(IS_PRIVATE(old_dentry->d_inode))) return 0; return security_ops->path_link(old_dentry, new_dir, new_dentry); } int security_path_rename(struct path *old_dir, struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry) { if (unlikely(IS_PRIVATE(old_dentry->d_inode) || (new_dentry->d_inode && IS_PRIVATE(new_dentry->d_inode)))) return 0; return security_ops->path_rename(old_dir, old_dentry, new_dir, new_dentry); } EXPORT_SYMBOL(security_path_rename); int security_path_truncate(struct path *path) { if (unlikely(IS_PRIVATE(path->dentry->d_inode))) return 0; return security_ops->path_truncate(path); } int security_path_chmod(struct path *path, umode_t mode) { if (unlikely(IS_PRIVATE(path->dentry->d_inode))) return 0; return security_ops->path_chmod(path, mode); } int security_path_chown(struct path *path, kuid_t uid, kgid_t gid) { if (unlikely(IS_PRIVATE(path->dentry->d_inode))) return 0; return security_ops->path_chown(path, uid, gid); } int security_path_chroot(struct path *path) { return security_ops->path_chroot(path); } #endif int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(dir))) return 0; return security_ops->inode_create(dir, dentry, mode); } EXPORT_SYMBOL_GPL(security_inode_create); int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { if (unlikely(IS_PRIVATE(old_dentry->d_inode))) return 0; return security_ops->inode_link(old_dentry, dir, new_dentry); } int security_inode_unlink(struct inode *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(dentry->d_inode))) return 0; return security_ops->inode_unlink(dir, dentry); } int security_inode_symlink(struct inode *dir, struct dentry *dentry, const char *old_name) { if (unlikely(IS_PRIVATE(dir))) return 0; return security_ops->inode_symlink(dir, dentry, old_name); } int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(dir))) return 0; return security_ops->inode_mkdir(dir, dentry, mode); } EXPORT_SYMBOL_GPL(security_inode_mkdir); int security_inode_rmdir(struct inode *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(dentry->d_inode))) return 0; return security_ops->inode_rmdir(dir, dentry); } int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { if (unlikely(IS_PRIVATE(dir))) return 0; return security_ops->inode_mknod(dir, dentry, mode, dev); } int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { if (unlikely(IS_PRIVATE(old_dentry->d_inode) || (new_dentry->d_inode && IS_PRIVATE(new_dentry->d_inode)))) return 0; return security_ops->inode_rename(old_dir, old_dentry, new_dir, new_dentry); } int security_inode_readlink(struct dentry *dentry) { if (unlikely(IS_PRIVATE(dentry->d_inode))) return 0; return security_ops->inode_readlink(dentry); } int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd) { if (unlikely(IS_PRIVATE(dentry->d_inode))) return 0; return security_ops->inode_follow_link(dentry, nd); } int security_inode_permission(struct inode *inode, int mask) { if (unlikely(IS_PRIVATE(inode))) return 0; return security_ops->inode_permission(inode, mask); } int security_inode_setattr(struct dentry *dentry, struct iattr *attr) { int ret; if (unlikely(IS_PRIVATE(dentry->d_inode))) return 0; ret = security_ops->inode_setattr(dentry, attr); if (ret) return ret; return evm_inode_setattr(dentry, attr); } EXPORT_SYMBOL_GPL(security_inode_setattr); int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) { if (unlikely(IS_PRIVATE(dentry->d_inode))) return 0; return security_ops->inode_getattr(mnt, dentry); } int security_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { int ret; if (unlikely(IS_PRIVATE(dentry->d_inode))) return 0; ret = security_ops->inode_setxattr(dentry, name, value, size, flags); if (ret) return ret; ret = ima_inode_setxattr(dentry, name, value, size); if (ret) return ret; return evm_inode_setxattr(dentry, name, value, size); } void security_inode_post_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { if (unlikely(IS_PRIVATE(dentry->d_inode))) return; security_ops->inode_post_setxattr(dentry, name, value, size, flags); evm_inode_post_setxattr(dentry, name, value, size); } int security_inode_getxattr(struct dentry *dentry, const char *name) { if (unlikely(IS_PRIVATE(dentry->d_inode))) return 0; return security_ops->inode_getxattr(dentry, name); } int security_inode_listxattr(struct dentry *dentry) { if (unlikely(IS_PRIVATE(dentry->d_inode))) return 0; return security_ops->inode_listxattr(dentry); } int security_inode_removexattr(struct dentry *dentry, const char *name) { int ret; if (unlikely(IS_PRIVATE(dentry->d_inode))) return 0; ret = security_ops->inode_removexattr(dentry, name); if (ret) return ret; ret = ima_inode_removexattr(dentry, name); if (ret) return ret; return evm_inode_removexattr(dentry, name); } int security_inode_need_killpriv(struct dentry *dentry) { return security_ops->inode_need_killpriv(dentry); } int security_inode_killpriv(struct dentry *dentry) { return security_ops->inode_killpriv(dentry); } int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc) { if (unlikely(IS_PRIVATE(inode))) return -EOPNOTSUPP; return security_ops->inode_getsecurity(inode, name, buffer, alloc); } int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) { if (unlikely(IS_PRIVATE(inode))) return -EOPNOTSUPP; return security_ops->inode_setsecurity(inode, name, value, size, flags); } int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size) { if (unlikely(IS_PRIVATE(inode))) return 0; return security_ops->inode_listsecurity(inode, buffer, buffer_size); } EXPORT_SYMBOL(security_inode_listsecurity); void security_inode_getsecid(const struct inode *inode, u32 *secid) { security_ops->inode_getsecid(inode, secid); } int security_file_permission(struct file *file, int mask) { int ret; ret = security_ops->file_permission(file, mask); if (ret) return ret; return fsnotify_perm(file, mask); } int security_file_alloc(struct file *file) { return security_ops->file_alloc_security(file); } void security_file_free(struct file *file) { security_ops->file_free_security(file); } int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return security_ops->file_ioctl(file, cmd, arg); } static inline unsigned long mmap_prot(struct file *file, unsigned long prot) { /* * Does we have PROT_READ and does the application expect * it to imply PROT_EXEC? If not, nothing to talk about... */ if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ) return prot; if (!(current->personality & READ_IMPLIES_EXEC)) return prot; /* * if that's an anonymous mapping, let it. */ if (!file) return prot | PROT_EXEC; /* * ditto if it's not on noexec mount, except that on !MMU we need * BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case */ if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) { #ifndef CONFIG_MMU unsigned long caps = 0; struct address_space *mapping = file->f_mapping; if (mapping && mapping->backing_dev_info) caps = mapping->backing_dev_info->capabilities; if (!(caps & BDI_CAP_EXEC_MAP)) return prot; #endif return prot | PROT_EXEC; } /* anything on noexec mount won't get PROT_EXEC */ return prot; } int security_mmap_file(struct file *file, unsigned long prot, unsigned long flags) { int ret; ret = security_ops->mmap_file(file, prot, mmap_prot(file, prot), flags); if (ret) return ret; return ima_file_mmap(file, prot); } int security_mmap_addr(unsigned long addr) { return security_ops->mmap_addr(addr); } int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) { return security_ops->file_mprotect(vma, reqprot, prot); } int security_file_lock(struct file *file, unsigned int cmd) { return security_ops->file_lock(file, cmd); } int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { return security_ops->file_fcntl(file, cmd, arg); } int security_file_set_fowner(struct file *file) { return security_ops->file_set_fowner(file); } int security_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int sig) { return security_ops->file_send_sigiotask(tsk, fown, sig); } int security_file_receive(struct file *file) { return security_ops->file_receive(file); } int security_file_open(struct file *file, const struct cred *cred) { int ret; ret = security_ops->file_open(file, cred); if (ret) return ret; return fsnotify_perm(file, MAY_OPEN); } int security_task_create(unsigned long clone_flags) { return security_ops->task_create(clone_flags); } void security_task_free(struct task_struct *task) { #ifdef CONFIG_SECURITY_YAMA_STACKED yama_task_free(task); #endif security_ops->task_free(task); } int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) { return security_ops->cred_alloc_blank(cred, gfp); } void security_cred_free(struct cred *cred) { security_ops->cred_free(cred); } int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp) { return security_ops->cred_prepare(new, old, gfp); } void security_transfer_creds(struct cred *new, const struct cred *old) { security_ops->cred_transfer(new, old); } int security_kernel_act_as(struct cred *new, u32 secid) { return security_ops->kernel_act_as(new, secid); } int security_kernel_create_files_as(struct cred *new, struct inode *inode) { return security_ops->kernel_create_files_as(new, inode); } int security_kernel_module_request(char *kmod_name) { return security_ops->kernel_module_request(kmod_name); } int security_kernel_module_from_file(struct file *file) { int ret; ret = security_ops->kernel_module_from_file(file); if (ret) return ret; return ima_module_check(file); } int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags) { return security_ops->task_fix_setuid(new, old, flags); } int security_task_setpgid(struct task_struct *p, pid_t pgid) { return security_ops->task_setpgid(p, pgid); } int security_task_getpgid(struct task_struct *p) { return security_ops->task_getpgid(p); } int security_task_getsid(struct task_struct *p) { return security_ops->task_getsid(p); } void security_task_getsecid(struct task_struct *p, u32 *secid) { security_ops->task_getsecid(p, secid); } EXPORT_SYMBOL(security_task_getsecid); int security_task_setnice(struct task_struct *p, int nice) { return security_ops->task_setnice(p, nice); } int security_task_setioprio(struct task_struct *p, int ioprio) { return security_ops->task_setioprio(p, ioprio); } int security_task_getioprio(struct task_struct *p) { return security_ops->task_getioprio(p); } int security_task_setrlimit(struct task_struct *p, unsigned int resource, struct rlimit *new_rlim) { return security_ops->task_setrlimit(p, resource, new_rlim); } int security_task_setscheduler(struct task_struct *p) { return security_ops->task_setscheduler(p); } int security_task_getscheduler(struct task_struct *p) { return security_ops->task_getscheduler(p); } int security_task_movememory(struct task_struct *p) { return security_ops->task_movememory(p); } int security_task_kill(struct task_struct *p, struct siginfo *info, int sig, u32 secid) { return security_ops->task_kill(p, info, sig, secid); } int security_task_wait(struct task_struct *p) { return security_ops->task_wait(p); } int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { #ifdef CONFIG_SECURITY_YAMA_STACKED int rc; rc = yama_task_prctl(option, arg2, arg3, arg4, arg5); if (rc != -ENOSYS) return rc; #endif return security_ops->task_prctl(option, arg2, arg3, arg4, arg5); } void security_task_to_inode(struct task_struct *p, struct inode *inode) { security_ops->task_to_inode(p, inode); } int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag) { return security_ops->ipc_permission(ipcp, flag); } void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid) { security_ops->ipc_getsecid(ipcp, secid); } int security_msg_msg_alloc(struct msg_msg *msg) { return security_ops->msg_msg_alloc_security(msg); } void security_msg_msg_free(struct msg_msg *msg) { security_ops->msg_msg_free_security(msg); } int security_msg_queue_alloc(struct msg_queue *msq) { return security_ops->msg_queue_alloc_security(msq); } void security_msg_queue_free(struct msg_queue *msq) { security_ops->msg_queue_free_security(msq); } int security_msg_queue_associate(struct msg_queue *msq, int msqflg) { return security_ops->msg_queue_associate(msq, msqflg); } int security_msg_queue_msgctl(struct msg_queue *msq, int cmd) { return security_ops->msg_queue_msgctl(msq, cmd); } int security_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg, int msqflg) { return security_ops->msg_queue_msgsnd(msq, msg, msqflg); } int security_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg, struct task_struct *target, long type, int mode) { return security_ops->msg_queue_msgrcv(msq, msg, target, type, mode); } int security_shm_alloc(struct shmid_kernel *shp) { return security_ops->shm_alloc_security(shp); } void security_shm_free(struct shmid_kernel *shp) { security_ops->shm_free_security(shp); } int security_shm_associate(struct shmid_kernel *shp, int shmflg) { return security_ops->shm_associate(shp, shmflg); } int security_shm_shmctl(struct shmid_kernel *shp, int cmd) { return security_ops->shm_shmctl(shp, cmd); } int security_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr, int shmflg) { return security_ops->shm_shmat(shp, shmaddr, shmflg); } int security_sem_alloc(struct sem_array *sma) { return security_ops->sem_alloc_security(sma); } void security_sem_free(struct sem_array *sma) { security_ops->sem_free_security(sma); } int security_sem_associate(struct sem_array *sma, int semflg) { return security_ops->sem_associate(sma, semflg); } int security_sem_semctl(struct sem_array *sma, int cmd) { return security_ops->sem_semctl(sma, cmd); } int security_sem_semop(struct sem_array *sma, struct sembuf *sops, unsigned nsops, int alter) { return security_ops->sem_semop(sma, sops, nsops, alter); } void security_d_instantiate(struct dentry *dentry, struct inode *inode) { if (unlikely(inode && IS_PRIVATE(inode))) return; security_ops->d_instantiate(dentry, inode); } EXPORT_SYMBOL(security_d_instantiate); int security_getprocattr(struct task_struct *p, char *name, char **value) { return security_ops->getprocattr(p, name, value); } int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size) { return security_ops->setprocattr(p, name, value, size); } int security_netlink_send(struct sock *sk, struct sk_buff *skb) { return security_ops->netlink_send(sk, skb); } int security_ismaclabel(const char *name) { return security_ops->ismaclabel(name); } EXPORT_SYMBOL(security_ismaclabel); int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) { return security_ops->secid_to_secctx(secid, secdata, seclen); } EXPORT_SYMBOL(security_secid_to_secctx); int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) { return security_ops->secctx_to_secid(secdata, seclen, secid); } EXPORT_SYMBOL(security_secctx_to_secid); void security_release_secctx(char *secdata, u32 seclen) { security_ops->release_secctx(secdata, seclen); } EXPORT_SYMBOL(security_release_secctx); int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) { return security_ops->inode_notifysecctx(inode, ctx, ctxlen); } EXPORT_SYMBOL(security_inode_notifysecctx); int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) { return security_ops->inode_setsecctx(dentry, ctx, ctxlen); } EXPORT_SYMBOL(security_inode_setsecctx); int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) { return security_ops->inode_getsecctx(inode, ctx, ctxlen); } EXPORT_SYMBOL(security_inode_getsecctx); #ifdef CONFIG_SECURITY_NETWORK int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk) { return security_ops->unix_stream_connect(sock, other, newsk); } EXPORT_SYMBOL(security_unix_stream_connect); int security_unix_may_send(struct socket *sock, struct socket *other) { return security_ops->unix_may_send(sock, other); } EXPORT_SYMBOL(security_unix_may_send); int security_socket_create(int family, int type, int protocol, int kern) { return security_ops->socket_create(family, type, protocol, kern); } int security_socket_post_create(struct socket *sock, int family, int type, int protocol, int kern) { return security_ops->socket_post_create(sock, family, type, protocol, kern); } int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) { return security_ops->socket_bind(sock, address, addrlen); } int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen) { return security_ops->socket_connect(sock, address, addrlen); } int security_socket_listen(struct socket *sock, int backlog) { return security_ops->socket_listen(sock, backlog); } int security_socket_accept(struct socket *sock, struct socket *newsock) { return security_ops->socket_accept(sock, newsock); } int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size) { return security_ops->socket_sendmsg(sock, msg, size); } int security_socket_recvmsg(struct socket *sock, struct msghdr *msg, int size, int flags) { return security_ops->socket_recvmsg(sock, msg, size, flags); } int security_socket_getsockname(struct socket *sock) { return security_ops->socket_getsockname(sock); } int security_socket_getpeername(struct socket *sock) { return security_ops->socket_getpeername(sock); } int security_socket_getsockopt(struct socket *sock, int level, int optname) { return security_ops->socket_getsockopt(sock, level, optname); } int security_socket_setsockopt(struct socket *sock, int level, int optname) { return security_ops->socket_setsockopt(sock, level, optname); } int security_socket_shutdown(struct socket *sock, int how) { return security_ops->socket_shutdown(sock, how); } int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) { return security_ops->socket_sock_rcv_skb(sk, skb); } EXPORT_SYMBOL(security_sock_rcv_skb); int security_socket_getpeersec_stream(struct socket *sock, char __user *optval, int __user *optlen, unsigned len) { return security_ops->socket_getpeersec_stream(sock, optval, optlen, len); } int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) { return security_ops->socket_getpeersec_dgram(sock, skb, secid); } EXPORT_SYMBOL(security_socket_getpeersec_dgram); int security_sk_alloc(struct sock *sk, int family, gfp_t priority) { return security_ops->sk_alloc_security(sk, family, priority); } void security_sk_free(struct sock *sk) { security_ops->sk_free_security(sk); } void security_sk_clone(const struct sock *sk, struct sock *newsk) { security_ops->sk_clone_security(sk, newsk); } EXPORT_SYMBOL(security_sk_clone); void security_sk_classify_flow(struct sock *sk, struct flowi *fl) { security_ops->sk_getsecid(sk, &fl->flowi_secid); } EXPORT_SYMBOL(security_sk_classify_flow); void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) { security_ops->req_classify_flow(req, fl); } EXPORT_SYMBOL(security_req_classify_flow); void security_sock_graft(struct sock *sk, struct socket *parent) { security_ops->sock_graft(sk, parent); } EXPORT_SYMBOL(security_sock_graft); int security_inet_conn_request(struct sock *sk, struct sk_buff *skb, struct request_sock *req) { return security_ops->inet_conn_request(sk, skb, req); } EXPORT_SYMBOL(security_inet_conn_request); void security_inet_csk_clone(struct sock *newsk, const struct request_sock *req) { security_ops->inet_csk_clone(newsk, req); } void security_inet_conn_established(struct sock *sk, struct sk_buff *skb) { security_ops->inet_conn_established(sk, skb); } int security_secmark_relabel_packet(u32 secid) { return security_ops->secmark_relabel_packet(secid); } EXPORT_SYMBOL(security_secmark_relabel_packet); void security_secmark_refcount_inc(void) { security_ops->secmark_refcount_inc(); } EXPORT_SYMBOL(security_secmark_refcount_inc); void security_secmark_refcount_dec(void) { security_ops->secmark_refcount_dec(); } EXPORT_SYMBOL(security_secmark_refcount_dec); int security_tun_dev_alloc_security(void **security) { return security_ops->tun_dev_alloc_security(security); } EXPORT_SYMBOL(security_tun_dev_alloc_security); void security_tun_dev_free_security(void *security) { security_ops->tun_dev_free_security(security); } EXPORT_SYMBOL(security_tun_dev_free_security); int security_tun_dev_create(void) { return security_ops->tun_dev_create(); } EXPORT_SYMBOL(security_tun_dev_create); int security_tun_dev_attach_queue(void *security) { return security_ops->tun_dev_attach_queue(security); } EXPORT_SYMBOL(security_tun_dev_attach_queue); int security_tun_dev_attach(struct sock *sk, void *security) { return security_ops->tun_dev_attach(sk, security); } EXPORT_SYMBOL(security_tun_dev_attach); int security_tun_dev_open(void *security) { return security_ops->tun_dev_open(security); } EXPORT_SYMBOL(security_tun_dev_open); void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) { security_ops->skb_owned_by(skb, sk); } #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp) { return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx, gfp); } EXPORT_SYMBOL(security_xfrm_policy_alloc); int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp) { return security_ops->xfrm_policy_clone_security(old_ctx, new_ctxp); } void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx) { security_ops->xfrm_policy_free_security(ctx); } EXPORT_SYMBOL(security_xfrm_policy_free); int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) { return security_ops->xfrm_policy_delete_security(ctx); } int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx) { return security_ops->xfrm_state_alloc(x, sec_ctx); } EXPORT_SYMBOL(security_xfrm_state_alloc); int security_xfrm_state_alloc_acquire(struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid) { return security_ops->xfrm_state_alloc_acquire(x, polsec, secid); } int security_xfrm_state_delete(struct xfrm_state *x) { return security_ops->xfrm_state_delete_security(x); } EXPORT_SYMBOL(security_xfrm_state_delete); void security_xfrm_state_free(struct xfrm_state *x) { security_ops->xfrm_state_free_security(x); } int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) { return security_ops->xfrm_policy_lookup(ctx, fl_secid, dir); } int security_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, const struct flowi *fl) { return security_ops->xfrm_state_pol_flow_match(x, xp, fl); } int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) { return security_ops->xfrm_decode_session(skb, secid, 1); } void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) { int rc = security_ops->xfrm_decode_session(skb, &fl->flowi_secid, 0); BUG_ON(rc); } EXPORT_SYMBOL(security_skb_classify_flow); #endif /* CONFIG_SECURITY_NETWORK_XFRM */ #ifdef CONFIG_KEYS int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags) { return security_ops->key_alloc(key, cred, flags); } void security_key_free(struct key *key) { security_ops->key_free(key); } int security_key_permission(key_ref_t key_ref, const struct cred *cred, key_perm_t perm) { return security_ops->key_permission(key_ref, cred, perm); } int security_key_getsecurity(struct key *key, char **_buffer) { return security_ops->key_getsecurity(key, _buffer); } #endif /* CONFIG_KEYS */ #ifdef CONFIG_AUDIT int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule) { return security_ops->audit_rule_init(field, op, rulestr, lsmrule); } int security_audit_rule_known(struct audit_krule *krule) { return security_ops->audit_rule_known(krule); } void security_audit_rule_free(void *lsmrule) { security_ops->audit_rule_free(lsmrule); } int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule, struct audit_context *actx) { return security_ops->audit_rule_match(secid, field, op, lsmrule, actx); } #endif /* CONFIG_AUDIT */
gpl-2.0
xiaolvmu/flounder-kernel
drivers/mfd/tlv320aic3xxx-irq.c
151
6413
/* * tlv320aic3262-irq.c -- Interrupt controller support for * TI OMAP44XX TLV320AIC3262 * * Author: Mukund Navada <navada@ti.com> * Mehar Bajwa <mehar.bajwa@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/irq.h> #include <linux/mfd/core.h> #include <linux/interrupt.h> #include <linux/mfd/tlv320aic3xxx-core.h> #include <linux/mfd/tlv320aic3262-registers.h> #include <linux/delay.h> struct aic3262_irq_data { int mask; int status; }; static struct aic3262_irq_data aic3262_irqs[] = { { .mask = AIC3262_HEADSET_IN_MASK, .status = AIC3262_HEADSET_PLUG_UNPLUG_INT, }, { .mask = AIC3262_BUTTON_PRESS_MASK, .status = AIC3262_BUTTON_PRESS_INT, }, { .mask = AIC3262_DAC_DRC_THRES_MASK, .status = AIC3262_LEFT_DRC_THRES_INT | AIC3262_RIGHT_DRC_THRES_INT, }, { .mask = AIC3262_AGC_NOISE_MASK, .status = AIC3262_LEFT_AGC_NOISE_INT | AIC3262_RIGHT_AGC_NOISE_INT, }, { .mask = AIC3262_OVER_CURRENT_MASK, .status = AIC3262_LEFT_OUTPUT_DRIVER_OVERCURRENT_INT | AIC3262_RIGHT_OUTPUT_DRIVER_OVERCURRENT_INT, }, { .mask = AIC3262_OVERFLOW_MASK, .status = AIC3262_LEFT_DAC_OVERFLOW_INT | AIC3262_RIGHT_DAC_OVERFLOW_INT | AIC3262_MINIDSP_D_BARREL_SHIFT_OVERFLOW_INT | AIC3262_LEFT_ADC_OVERFLOW_INT | AIC3262_RIGHT_ADC_OVERFLOW_INT | AIC3262_MINIDSP_D_BARREL_SHIFT_OVERFLOW_INT, }, { .mask = AIC3262_SPK_OVERCURRENT_MASK, .status = AIC3262_SPK_OVER_CURRENT_INT, }, }; struct aic3262_gpio_data { }; static inline struct aic3262_irq_data *irq_to_aic3262_irq(struct aic3xxx *aic3262, int irq) { return &aic3262_irqs[irq - aic3262->irq_base]; } static void aic3262_irq_lock(struct irq_data *data) { struct aic3xxx *aic3262 = irq_data_get_irq_chip_data(data); mutex_lock(&aic3262->irq_lock); } static void aic3262_irq_sync_unlock(struct irq_data *data) { struct aic3xxx *aic3262 = irq_data_get_irq_chip_data(data); /* write back to hardware any change in irq mask */ if (aic3262->irq_masks_cur != aic3262->irq_masks_cache) { aic3262->irq_masks_cache = aic3262->irq_masks_cur; aic3xxx_reg_write(aic3262, AIC3262_INT1_CNTL, aic3262->irq_masks_cur); } mutex_unlock(&aic3262->irq_lock); } static void aic3262_irq_unmask(struct irq_data *data) { struct aic3xxx *aic3262 = irq_data_get_irq_chip_data(data); struct aic3262_irq_data *irq_data = irq_to_aic3262_irq(aic3262, data->irq); aic3262->irq_masks_cur |= irq_data->mask; } static void aic3262_irq_mask(struct irq_data *data) { struct aic3xxx *aic3262 = irq_data_get_irq_chip_data(data); struct aic3262_irq_data *irq_data = irq_to_aic3262_irq(aic3262, data->irq); aic3262->irq_masks_cur &= ~irq_data->mask; } static struct irq_chip aic3262_irq_chip = { .name = "tlv320aic3262", .irq_bus_lock = aic3262_irq_lock, .irq_bus_sync_unlock = aic3262_irq_sync_unlock, .irq_mask = aic3262_irq_mask, .irq_unmask = aic3262_irq_unmask, }; static irqreturn_t aic3262_irq_thread(int irq, void *data) { struct aic3xxx *aic3262 = data; u8 status[4]; /* Reading sticky bit registers acknowledges the interrupt to the device */ aic3xxx_bulk_read(aic3262, AIC3262_INT_STICKY_FLAG1, 4, status); /* report */ if (status[2] & aic3262_irqs[AIC3262_IRQ_HEADSET_DETECT].status) handle_nested_irq(aic3262->irq_base); if (status[2] & aic3262_irqs[AIC3262_IRQ_BUTTON_PRESS].status) handle_nested_irq(aic3262->irq_base + 1); if (status[2] & aic3262_irqs[AIC3262_IRQ_DAC_DRC].status) handle_nested_irq(aic3262->irq_base + 2); if (status[3] & aic3262_irqs[AIC3262_IRQ_AGC_NOISE].status) handle_nested_irq(aic3262->irq_base + 3); if (status[2] & aic3262_irqs[AIC3262_IRQ_OVER_CURRENT].status) handle_nested_irq(aic3262->irq_base + 4); if (status[0] & aic3262_irqs[AIC3262_IRQ_OVERFLOW_EVENT].status) handle_nested_irq(aic3262->irq_base + 5); if (status[3] & aic3262_irqs[AIC3262_IRQ_SPEAKER_OVER_TEMP].status) handle_nested_irq(aic3262->irq_base + 6); /* ack unmasked irqs */ /* No need to acknowledge the interrupt on AIC3262 */ return IRQ_HANDLED; } int aic3xxx_irq_init(struct aic3xxx *aic3262) { int cur_irq, ret; mutex_init(&aic3262->irq_lock); /* mask the individual interrupt sources */ aic3262->irq_masks_cur = 0x0; aic3262->irq_masks_cache = 0x0; aic3xxx_reg_write(aic3262, AIC3262_INT1_CNTL, 0x0); if (!aic3262->irq) { dev_warn(aic3262->dev, "no interrupt specified, no interrupts\n"); aic3262->irq_base = 0; return 0; } if (!aic3262->irq_base) { dev_err(aic3262->dev, "no interrupt base specified, no interrupts\n"); return 0; } /* Register them with genirq */ for (cur_irq = aic3262->irq_base; cur_irq < aic3262->irq_base + ARRAY_SIZE(aic3262_irqs); cur_irq++) { irq_set_chip_data(cur_irq, aic3262); irq_set_chip_and_handler(cur_irq, &aic3262_irq_chip, handle_edge_irq); irq_set_nested_thread(cur_irq, 1); /* ARM needs us to explicitly flag the IRQ as valid * and will set them noprobe when we do so. */ #ifdef CONFIG_ARM set_irq_flags(cur_irq, IRQF_VALID); #else set_irq_noprobe(cur_irq); #endif } ret = request_threaded_irq(aic3262->irq, NULL, aic3262_irq_thread, IRQF_TRIGGER_RISING, "tlv320aic3262", aic3262); if (ret < 0) { dev_err(aic3262->dev, "failed to request IRQ %d: %d\n", aic3262->irq, ret); return ret; } return 0; } EXPORT_SYMBOL(aic3xxx_irq_init); void aic3xxx_irq_exit(struct aic3xxx *aic3262) { if (aic3262->irq) free_irq(aic3262->irq, aic3262); } EXPORT_SYMBOL(aic3xxx_irq_exit); MODULE_AUTHOR("Mukund navada <navada@ti.com>"); MODULE_AUTHOR("Mehar Bajwa <mehar.bajwa@ti.com>"); MODULE_DESCRIPTION ("Interrupt controller support for TI OMAP44XX TLV320AIC3262"); MODULE_LICENSE("GPL");
gpl-2.0
Talustus/i9100-uboot
board/amcc/walnut/walnut.c
151
3288
/* * (C) Copyright 2000-2005 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <asm/processor.h> #include <spd_sdram.h> int board_early_init_f(void) { /*-------------------------------------------------------------------------+ | Interrupt controller setup for the Walnut/Sycamore board. | Note: IRQ 0-15 405GP internally generated; active high; level sensitive | IRQ 16 405GP internally generated; active low; level sensitive | IRQ 17-24 RESERVED | IRQ 25 (EXT IRQ 0) FPGA; active high; level sensitive | IRQ 26 (EXT IRQ 1) SMI; active high; level sensitive | IRQ 27 (EXT IRQ 2) Not Used | IRQ 28 (EXT IRQ 3) PCI SLOT 3; active low; level sensitive | IRQ 29 (EXT IRQ 4) PCI SLOT 2; active low; level sensitive | IRQ 30 (EXT IRQ 5) PCI SLOT 1; active low; level sensitive | IRQ 31 (EXT IRQ 6) PCI SLOT 0; active low; level sensitive | Note for Walnut board: | An interrupt taken for the FPGA (IRQ 25) indicates that either | the Mouse, Keyboard, IRDA, or External Expansion caused the | interrupt. The FPGA must be read to determine which device | caused the interrupt. The default setting of the FPGA clears | +-------------------------------------------------------------------------*/ mtdcr(UIC0SR, 0xFFFFFFFF); /* clear all ints */ mtdcr(UIC0ER, 0x00000000); /* disable all ints */ mtdcr(UIC0CR, 0x00000020); /* set all but FPGA SMI to be non-critical */ mtdcr(UIC0PR, 0xFFFFFFE0); /* set int polarities */ mtdcr(UIC0TR, 0x10000000); /* set int trigger levels */ mtdcr(UIC0VCR, 0x00000001); /* set vect base=0,INT0 highest priority */ mtdcr(UIC0SR, 0xFFFFFFFF); /* clear all ints */ /* set UART1 control to select CTS/RTS */ #define FPGA_BRDC 0xF0300004 *(volatile char *)(FPGA_BRDC) |= 0x1; return 0; } /* * Check Board Identity: */ int checkboard(void) { char buf[64]; int i = getenv_f("serial#", buf, sizeof(buf)); uint pvr = get_pvr(); if (pvr == PVR_405GPR_RB) { puts("Board: Sycamore - AMCC PPC405GPr Evaluation Board"); } else { puts("Board: Walnut - AMCC PPC405GP Evaluation Board"); } if (i > 0) { puts(", serial# "); puts(buf); } putc('\n'); return (0); } /* * initdram(int board_type) reads EEPROM via I2c. EEPROM contains all of * the necessary info for SDRAM controller configuration */ phys_size_t initdram(int board_type) { return spd_sdram(); }
gpl-2.0
ReaperXL2/Overkill_v4_extended
drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
151
31694
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__ #include <linux/module.h> #include "msm_sd.h" #include "msm_actuator.h" #include "msm_cci.h" DEFINE_MSM_MUTEX(msm_actuator_mutex); /*#define MSM_ACUTUATOR_DEBUG*/ #undef CDBG #ifdef MSM_ACUTUATOR_DEBUG #define CDBG(fmt, args...) pr_err(fmt, ##args) #else #define CDBG(fmt, args...) pr_debug(fmt, ##args) #endif static struct msm_actuator msm_vcm_actuator_table; static struct msm_actuator msm_piezo_actuator_table; static struct i2c_driver msm_actuator_i2c_driver; static struct msm_actuator *actuators[] = { &msm_vcm_actuator_table, &msm_piezo_actuator_table, }; static int32_t msm_actuator_piezo_set_default_focus( struct msm_actuator_ctrl_t *a_ctrl, struct msm_actuator_move_params_t *move_params) { int32_t rc = 0; struct msm_camera_i2c_reg_setting reg_setting; CDBG("Enter\n"); if (a_ctrl->curr_step_pos != 0) { a_ctrl->i2c_tbl_index = 0; a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl, a_ctrl->initial_code, 0, 0); a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl, a_ctrl->initial_code, 0, 0); reg_setting.reg_setting = a_ctrl->i2c_reg_tbl; reg_setting.data_type = a_ctrl->i2c_data_type; reg_setting.size = a_ctrl->i2c_tbl_index; rc = a_ctrl->i2c_client.i2c_func_tbl-> i2c_write_table_w_microdelay( &a_ctrl->i2c_client, &reg_setting); if (rc < 0) { pr_err("%s: i2c write error:%d\n", __func__, rc); return rc; } a_ctrl->i2c_tbl_index = 0; a_ctrl->curr_step_pos = 0; } CDBG("Exit\n"); return rc; } static void msm_actuator_parse_i2c_params(struct msm_actuator_ctrl_t *a_ctrl, int16_t next_lens_position, uint32_t hw_params, uint16_t delay) { struct msm_actuator_reg_params_t *write_arr = a_ctrl->reg_tbl; uint32_t hw_dword = hw_params; uint16_t i2c_byte1 = 0, i2c_byte2 = 0; uint16_t value = 0; uint32_t size = a_ctrl->reg_tbl_size, i = 0; struct msm_camera_i2c_reg_array *i2c_tbl = a_ctrl->i2c_reg_tbl; CDBG("Enter\n"); for (i = 0; i < size; i++) { /* check that the index into i2c_tbl cannot grow larger that the allocated size of i2c_tbl */ if ((a_ctrl->total_steps + 1) < (a_ctrl->i2c_tbl_index)) { break; } if (write_arr[i].reg_write_type == MSM_ACTUATOR_WRITE_DAC) { value = (next_lens_position << write_arr[i].data_shift) | ((hw_dword & write_arr[i].hw_mask) >> write_arr[i].hw_shift); if (write_arr[i].reg_addr != 0xFFFF) { i2c_byte1 = write_arr[i].reg_addr; i2c_byte2 = value; if (size != (i+1)) { i2c_byte2 = value & 0xFF; CDBG("byte1:0x%x, byte2:0x%x\n", i2c_byte1, i2c_byte2); i2c_tbl[a_ctrl->i2c_tbl_index]. reg_addr = i2c_byte1; i2c_tbl[a_ctrl->i2c_tbl_index]. reg_data = i2c_byte2; i2c_tbl[a_ctrl->i2c_tbl_index]. delay = 0; a_ctrl->i2c_tbl_index++; i++; i2c_byte1 = write_arr[i].reg_addr; i2c_byte2 = (value & 0xFF00) >> 8; } } else { i2c_byte1 = (value & 0xFF00) >> 8; i2c_byte2 = value & 0xFF; } } else { i2c_byte1 = write_arr[i].reg_addr; i2c_byte2 = (hw_dword & write_arr[i].hw_mask) >> write_arr[i].hw_shift; } CDBG("i2c_byte1:0x%x, i2c_byte2:0x%x\n", i2c_byte1, i2c_byte2); i2c_tbl[a_ctrl->i2c_tbl_index].reg_addr = i2c_byte1; i2c_tbl[a_ctrl->i2c_tbl_index].reg_data = i2c_byte2; i2c_tbl[a_ctrl->i2c_tbl_index].delay = delay; a_ctrl->i2c_tbl_index++; } CDBG("Exit\n"); } static int32_t msm_actuator_init_focus(struct msm_actuator_ctrl_t *a_ctrl, uint16_t size, enum msm_actuator_data_type type, struct reg_settings_t *settings) { int32_t rc = -EFAULT; int32_t i = 0; CDBG("Enter\n"); for (i = 0; i < size; i++) { switch (type) { case MSM_ACTUATOR_BYTE_DATA: rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write( &a_ctrl->i2c_client, settings[i].reg_addr, settings[i].reg_data, MSM_CAMERA_I2C_BYTE_DATA); break; case MSM_ACTUATOR_WORD_DATA: rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write( &a_ctrl->i2c_client, settings[i].reg_addr, settings[i].reg_data, MSM_CAMERA_I2C_WORD_DATA); break; default: pr_err("Unsupport data type: %d\n", type); break; } if (rc < 0) break; } a_ctrl->curr_step_pos = 0; CDBG("Exit\n"); return rc; } static void msm_actuator_write_focus( struct msm_actuator_ctrl_t *a_ctrl, uint16_t curr_lens_pos, struct damping_params_t *damping_params, int8_t sign_direction, int16_t code_boundary) { int16_t next_lens_pos = 0; uint16_t damping_code_step = 0; uint16_t wait_time = 0; CDBG("Enter\n"); damping_code_step = damping_params->damping_step; wait_time = damping_params->damping_delay; /* Write code based on damping_code_step in a loop */ for (next_lens_pos = curr_lens_pos + (sign_direction * damping_code_step); (sign_direction * next_lens_pos) <= (sign_direction * code_boundary); next_lens_pos = (next_lens_pos + (sign_direction * damping_code_step))) { a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl, next_lens_pos, damping_params->hw_params, wait_time); curr_lens_pos = next_lens_pos; } if (curr_lens_pos != code_boundary) { a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl, code_boundary, damping_params->hw_params, wait_time); } CDBG("Exit\n"); } static int32_t msm_actuator_piezo_move_focus( struct msm_actuator_ctrl_t *a_ctrl, struct msm_actuator_move_params_t *move_params) { int32_t dest_step_position = move_params->dest_step_pos; struct damping_params_t ringing_params_kernel; int32_t rc = 0; int32_t num_steps = move_params->num_steps; struct msm_camera_i2c_reg_setting reg_setting; CDBG("Enter\n"); if (copy_from_user(&ringing_params_kernel, &(move_params->ringing_params[0]), sizeof(struct damping_params_t))) { pr_err("copy_from_user failed\n"); return -EFAULT; } if (num_steps == 0) return rc; a_ctrl->i2c_tbl_index = 0; a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl, (num_steps * a_ctrl->region_params[0].code_per_step), ringing_params_kernel.hw_params, 0); reg_setting.reg_setting = a_ctrl->i2c_reg_tbl; reg_setting.data_type = a_ctrl->i2c_data_type; reg_setting.size = a_ctrl->i2c_tbl_index; rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write_table_w_microdelay( &a_ctrl->i2c_client, &reg_setting); if (rc < 0) { pr_err("i2c write error:%d\n", rc); return rc; } a_ctrl->i2c_tbl_index = 0; a_ctrl->curr_step_pos = dest_step_position; CDBG("Exit\n"); return rc; } static int32_t msm_actuator_move_focus( struct msm_actuator_ctrl_t *a_ctrl, struct msm_actuator_move_params_t *move_params) { int32_t rc = 0; struct damping_params_t ringing_params_kernel; int8_t sign_dir = move_params->sign_dir; uint16_t step_boundary = 0; uint16_t target_step_pos = 0; uint16_t target_lens_pos = 0; int16_t dest_step_pos = move_params->dest_step_pos; uint16_t curr_lens_pos = 0; int dir = move_params->dir; int32_t num_steps = move_params->num_steps; struct msm_camera_i2c_reg_setting reg_setting; curr_lens_pos = a_ctrl->step_position_table[a_ctrl->curr_step_pos]; move_params->curr_lens_pos = curr_lens_pos; if (copy_from_user(&ringing_params_kernel, &(move_params->ringing_params[a_ctrl->curr_region_index]), sizeof(struct damping_params_t))) { pr_err("copy_from_user failed\n"); return -EFAULT; } CDBG("called, dir %d, num_steps %d\n", dir, num_steps); if (dest_step_pos == a_ctrl->curr_step_pos) return rc; if ((sign_dir > MSM_ACTUATOR_MOVE_SIGNED_NEAR) || (sign_dir < MSM_ACTUATOR_MOVE_SIGNED_FAR)) { pr_err("Invalid sign_dir = %d\n", sign_dir); return -EFAULT; } if ((dir > MOVE_FAR) || (dir < MOVE_NEAR)) { pr_err("Invalid direction = %d\n", dir); return -EFAULT; } if (dest_step_pos > a_ctrl->total_steps) { pr_err("Step pos greater than total steps = %d\n", dest_step_pos); return -EFAULT; } a_ctrl->i2c_tbl_index = 0; CDBG("curr_step_pos =%d dest_step_pos =%d curr_lens_pos=%d\n", a_ctrl->curr_step_pos, dest_step_pos, curr_lens_pos); while (a_ctrl->curr_step_pos != dest_step_pos) { step_boundary = a_ctrl->region_params[a_ctrl->curr_region_index]. step_bound[dir]; if ((dest_step_pos * sign_dir) <= (step_boundary * sign_dir)) { target_step_pos = dest_step_pos; target_lens_pos = a_ctrl->step_position_table[target_step_pos]; a_ctrl->func_tbl->actuator_write_focus(a_ctrl, curr_lens_pos, &ringing_params_kernel, sign_dir, target_lens_pos); curr_lens_pos = target_lens_pos; } else { target_step_pos = step_boundary; target_lens_pos = a_ctrl->step_position_table[target_step_pos]; a_ctrl->func_tbl->actuator_write_focus(a_ctrl, curr_lens_pos, &ringing_params_kernel, sign_dir, target_lens_pos); curr_lens_pos = target_lens_pos; a_ctrl->curr_region_index += sign_dir; } a_ctrl->curr_step_pos = target_step_pos; } move_params->curr_lens_pos = curr_lens_pos; reg_setting.reg_setting = a_ctrl->i2c_reg_tbl; reg_setting.data_type = a_ctrl->i2c_data_type; reg_setting.size = a_ctrl->i2c_tbl_index; rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write_table_w_microdelay( &a_ctrl->i2c_client, &reg_setting); if (rc < 0) { pr_err("i2c write error:%d\n", rc); return rc; } a_ctrl->i2c_tbl_index = 0; CDBG("Exit\n"); return rc; } static int32_t msm_actuator_init_step_table(struct msm_actuator_ctrl_t *a_ctrl, struct msm_actuator_set_info_t *set_info) { int16_t code_per_step = 0; int16_t cur_code = 0; int16_t step_index = 0, region_index = 0; uint16_t step_boundary = 0; uint32_t max_code_size = 1; uint16_t data_size = set_info->actuator_params.data_size; CDBG("Enter\n"); for (; data_size > 0; data_size--) max_code_size *= 2; kfree(a_ctrl->step_position_table); a_ctrl->step_position_table = NULL; if (set_info->af_tuning_params.total_steps > MAX_ACTUATOR_AF_TOTAL_STEPS) { pr_err("Max actuator totalsteps exceeded = %d\n", set_info->af_tuning_params.total_steps); return -EFAULT; } /* Fill step position table */ a_ctrl->step_position_table = kmalloc(sizeof(uint16_t) * (set_info->af_tuning_params.total_steps + 1), GFP_KERNEL); if (a_ctrl->step_position_table == NULL) return -ENOMEM; cur_code = set_info->af_tuning_params.initial_code; a_ctrl->step_position_table[step_index++] = cur_code; for (region_index = 0; region_index < a_ctrl->region_size; region_index++) { code_per_step = a_ctrl->region_params[region_index].code_per_step; step_boundary = a_ctrl->region_params[region_index]. step_bound[MOVE_NEAR]; for (; step_index <= step_boundary; step_index++) { cur_code += code_per_step; if (cur_code < max_code_size) a_ctrl->step_position_table[step_index] = cur_code; else { for (; step_index < set_info->af_tuning_params.total_steps; step_index++) a_ctrl-> step_position_table[ step_index] = max_code_size; } } } CDBG("Exit\n"); return 0; } static int32_t msm_actuator_set_default_focus( struct msm_actuator_ctrl_t *a_ctrl, struct msm_actuator_move_params_t *move_params) { int32_t rc = 0; CDBG("Enter\n"); if (a_ctrl->curr_step_pos != 0) rc = a_ctrl->func_tbl->actuator_move_focus(a_ctrl, move_params); CDBG("Exit\n"); return rc; } static int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl) { int32_t rc = 0; CDBG("Enter\n"); if (a_ctrl->vcm_enable) { rc = gpio_direction_output(a_ctrl->vcm_pwd, 0); if (!rc) gpio_free(a_ctrl->vcm_pwd); } kfree(a_ctrl->step_position_table); a_ctrl->step_position_table = NULL; kfree(a_ctrl->i2c_reg_tbl); a_ctrl->i2c_reg_tbl = NULL; a_ctrl->i2c_tbl_index = 0; CDBG("Exit\n"); return rc; } static int32_t msm_actuator_set_position( struct msm_actuator_ctrl_t *a_ctrl, struct msm_actuator_set_position_t *set_pos) { int32_t rc = 0; int32_t index; uint16_t next_lens_position; uint16_t delay; uint32_t hw_params = 0; struct msm_camera_i2c_reg_setting reg_setting; CDBG("%s Enter %d\n", __func__, __LINE__); if (set_pos->number_of_steps == 0) return rc; a_ctrl->i2c_tbl_index = 0; for (index = 0; index < set_pos->number_of_steps; index++) { next_lens_position = set_pos->pos[index]; delay = set_pos->delay[index]; a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl, next_lens_position, hw_params, delay); reg_setting.reg_setting = a_ctrl->i2c_reg_tbl; reg_setting.size = a_ctrl->i2c_tbl_index; reg_setting.data_type = a_ctrl->i2c_data_type; rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write_table_w_microdelay( &a_ctrl->i2c_client, &reg_setting); if (rc < 0) { pr_err("%s Failed I2C write Line %d\n", __func__, __LINE__); return rc; } a_ctrl->i2c_tbl_index = 0; } CDBG("%s exit %d\n", __func__, __LINE__); return rc; } static int32_t msm_actuator_init(struct msm_actuator_ctrl_t *a_ctrl, struct msm_actuator_set_info_t *set_info) { struct reg_settings_t *init_settings = NULL; int32_t rc = -EFAULT; uint16_t i = 0; struct msm_camera_cci_client *cci_client = NULL; CDBG("Enter\n"); for (i = 0; i < ARRAY_SIZE(actuators); i++) { if (set_info->actuator_params.act_type == actuators[i]->act_type) { a_ctrl->func_tbl = &actuators[i]->func_tbl; rc = 0; } } if (rc < 0) { pr_err("Actuator function table not found\n"); return rc; } if (set_info->af_tuning_params.total_steps > MAX_ACTUATOR_AF_TOTAL_STEPS) { pr_err("Max actuator totalsteps exceeded = %d\n", set_info->af_tuning_params.total_steps); return -EFAULT; } if (set_info->af_tuning_params.region_size > MAX_ACTUATOR_REGION) { pr_err("MAX_ACTUATOR_REGION is exceeded.\n"); return -EFAULT; } a_ctrl->region_size = set_info->af_tuning_params.region_size; a_ctrl->pwd_step = set_info->af_tuning_params.pwd_step; a_ctrl->total_steps = set_info->af_tuning_params.total_steps; if (copy_from_user(&a_ctrl->region_params, (void *)set_info->af_tuning_params.region_params, a_ctrl->region_size * sizeof(struct region_params_t))) return -EFAULT; if (a_ctrl->act_device_type == MSM_CAMERA_PLATFORM_DEVICE) { cci_client = a_ctrl->i2c_client.cci_client; cci_client->sid = set_info->actuator_params.i2c_addr >> 1; cci_client->retries = 3; cci_client->id_map = 0; cci_client->cci_i2c_master = a_ctrl->cci_master; } else { a_ctrl->i2c_client.client->addr = set_info->actuator_params.i2c_addr; } a_ctrl->i2c_data_type = set_info->actuator_params.i2c_data_type; a_ctrl->i2c_client.addr_type = set_info->actuator_params.i2c_addr_type; if (set_info->actuator_params.reg_tbl_size <= MAX_ACTUATOR_REG_TBL_SIZE) { a_ctrl->reg_tbl_size = set_info->actuator_params.reg_tbl_size; } else { a_ctrl->reg_tbl_size = 0; pr_err("MAX_ACTUATOR_REG_TBL_SIZE is exceeded.\n"); return -EFAULT; } kfree(a_ctrl->i2c_reg_tbl); a_ctrl->i2c_reg_tbl = kmalloc(sizeof(struct msm_camera_i2c_reg_array) * (set_info->af_tuning_params.total_steps + 1), GFP_KERNEL); if (!a_ctrl->i2c_reg_tbl) { pr_err("kmalloc fail\n"); return -ENOMEM; } if (copy_from_user(&a_ctrl->reg_tbl, (void *)set_info->actuator_params.reg_tbl_params, a_ctrl->reg_tbl_size * sizeof(struct msm_actuator_reg_params_t))) { kfree(a_ctrl->i2c_reg_tbl); return -EFAULT; } if (set_info->actuator_params.init_setting_size && set_info->actuator_params.init_setting_size <= MAX_ACTUATOR_REG_TBL_SIZE) { if (a_ctrl->func_tbl->actuator_init_focus) { init_settings = kmalloc(sizeof(struct reg_settings_t) * (set_info->actuator_params.init_setting_size), GFP_KERNEL); if (init_settings == NULL) { kfree(a_ctrl->i2c_reg_tbl); pr_err("Error allocating memory for init_settings\n"); return -EFAULT; } if (copy_from_user(init_settings, (void *)set_info->actuator_params.init_settings, set_info->actuator_params.init_setting_size * sizeof(struct reg_settings_t))) { kfree(init_settings); kfree(a_ctrl->i2c_reg_tbl); pr_err("Error copying init_settings\n"); return -EFAULT; } rc = a_ctrl->func_tbl->actuator_init_focus(a_ctrl, set_info->actuator_params.init_setting_size, a_ctrl->i2c_data_type, init_settings); kfree(init_settings); if (rc < 0) { kfree(a_ctrl->i2c_reg_tbl); pr_err("Error actuator_init_focus\n"); return -EFAULT; } } } a_ctrl->initial_code = set_info->af_tuning_params.initial_code; if (a_ctrl->func_tbl->actuator_init_step_table) rc = a_ctrl->func_tbl-> actuator_init_step_table(a_ctrl, set_info); a_ctrl->curr_step_pos = 0; a_ctrl->curr_region_index = 0; CDBG("Exit\n"); return rc; } static int32_t msm_actuator_direct_i2c_write( struct msm_actuator_ctrl_t *a_ctrl, struct msm_actuator_i2c_table *i2c_table) { int32_t rc = -1; int32_t i = 0; if (NULL == i2c_table || NULL == a_ctrl) { pr_err("%s: NULL pointer: i2c_table:%p, a_ctrl:%p\n", __func__, i2c_table, a_ctrl); return rc; } if (i2c_table->size > MSM_ACTUATOR_I2C_MAX_TABLE_SIZE) { pr_err("%s: i2c table size exceeds the maximum allowed size.\n", __func__); pr_err("%s: size:%d, max size:%d\n", __func__, i2c_table->size, MSM_ACTUATOR_I2C_MAX_TABLE_SIZE); return rc; } for (i = 0; i < i2c_table->size; i++) { uint16_t addr = i2c_table->data[i].addr; uint8_t data = i2c_table->data[i].value; uint32_t wait_time = i2c_table->data[i].wait_time; if (a_ctrl->i2c_client.i2c_func_tbl->i2c_write) { rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write( &a_ctrl->i2c_client, addr, data, MSM_CAMERA_I2C_BYTE_DATA ); if (rc < 0) { pr_err("%s: msm_camera_i2c_write failed.\n", __func__); break; } } else { pr_err("%s(%d): i2c_write is null", __func__, __LINE__); } if (wait_time) usleep_range(wait_time, wait_time + 1000); } return rc; } static int32_t msm_actuator_direct_i2c_read( struct msm_actuator_ctrl_t *a_ctrl, struct msm_actuator_i2c_read_config *actuator_i2c_read_config) { int32_t rc = -EINVAL; uint8_t *data_read = NULL; if (NULL == actuator_i2c_read_config || NULL == a_ctrl) { pr_err("%s: NULL pointer: i2c_read_config %p, a_ctrl %p\n", __func__, actuator_i2c_read_config, a_ctrl); goto exit; } data_read = kmalloc(sizeof(uint8_t)*actuator_i2c_read_config->data_size, GFP_KERNEL); if (data_read == NULL) { pr_err("%s: Unable to allocate memory!\n", __func__); rc = -ENOMEM; goto exit; } if (a_ctrl->i2c_client.i2c_func_tbl->i2c_read_seq) { rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_read_seq( &a_ctrl->i2c_client, actuator_i2c_read_config->reg_addr, data_read, actuator_i2c_read_config->data_size); if (rc < 0) { pr_err("%s: Unable to read seq (%d)\n", __func__, rc); rc = -EIO; goto exit; } } if (copy_to_user(actuator_i2c_read_config->data, data_read, sizeof(uint8_t) * actuator_i2c_read_config->data_size)) { pr_err("%s: Unable to copy to user space!\n", __func__); rc = -EFAULT; goto exit; } exit: kfree(data_read); return rc; } static int32_t msm_actuator_config(struct msm_actuator_ctrl_t *a_ctrl, void __user *argp) { struct msm_actuator_cfg_data *cdata = (struct msm_actuator_cfg_data *)argp; int32_t rc = 0; mutex_lock(a_ctrl->actuator_mutex); CDBG("Enter\n"); CDBG("%s type %d\n", __func__, cdata->cfgtype); switch (cdata->cfgtype) { case CFG_GET_ACTUATOR_INFO: cdata->is_af_supported = 1; cdata->cfg.cam_name = a_ctrl->cam_name; break; case CFG_SET_ACTUATOR_INFO: rc = msm_actuator_init(a_ctrl, &cdata->cfg.set_info); if (rc < 0) pr_err("init table failed %d\n", rc); break; case CFG_SET_DEFAULT_FOCUS: rc = a_ctrl->func_tbl->actuator_set_default_focus(a_ctrl, &cdata->cfg.move); if (rc < 0) pr_err("move focus failed %d\n", rc); break; case CFG_MOVE_FOCUS: rc = a_ctrl->func_tbl->actuator_move_focus(a_ctrl, &cdata->cfg.move); if (rc < 0) pr_err("move focus failed %d\n", rc); break; case CFG_SET_POSITION: rc = a_ctrl->func_tbl->actuator_set_position(a_ctrl, &cdata->cfg.setpos); if (rc < 0) pr_err("actuator_set_position failed %d\n", rc); break; case CFG_DIRECT_I2C_WRITE: rc = msm_actuator_direct_i2c_write(a_ctrl, &cdata->cfg.i2c_table); if (rc < 0) pr_err("%s CFG_DIRECT_I2C_WRITE failed %d\n", __func__, rc); break; case CFG_DIRECT_I2C_READ: rc = msm_actuator_direct_i2c_read(a_ctrl, &cdata->cfg.actuator_i2c_read_config); if (rc < 0) pr_err("%s CFG_DIRECT_I2C_READ failed %d\n", __func__, rc); break; default: break; } mutex_unlock(a_ctrl->actuator_mutex); CDBG("Exit\n"); return rc; } static int32_t msm_actuator_get_subdev_id(struct msm_actuator_ctrl_t *a_ctrl, void *arg) { uint32_t *subdev_id = (uint32_t *)arg; CDBG("Enter\n"); if (!subdev_id) { pr_err("failed\n"); return -EINVAL; } if (a_ctrl->act_device_type == MSM_CAMERA_PLATFORM_DEVICE) *subdev_id = a_ctrl->pdev->id; else *subdev_id = a_ctrl->subdev_id; CDBG("subdev_id %d\n", *subdev_id); CDBG("Exit\n"); return 0; } static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = { .i2c_read = msm_camera_cci_i2c_read, .i2c_read_seq = msm_camera_cci_i2c_read_seq, .i2c_write = msm_camera_cci_i2c_write, .i2c_write_table = msm_camera_cci_i2c_write_table, .i2c_write_seq_table = msm_camera_cci_i2c_write_seq_table, .i2c_write_table_w_microdelay = msm_camera_cci_i2c_write_table_w_microdelay, .i2c_util = msm_sensor_cci_i2c_util, }; static struct msm_camera_i2c_fn_t msm_sensor_qup_func_tbl = { .i2c_read = msm_camera_qup_i2c_read, .i2c_read_seq = msm_camera_qup_i2c_read_seq, .i2c_write = msm_camera_qup_i2c_write, .i2c_write_table = msm_camera_qup_i2c_write_table, .i2c_write_seq_table = msm_camera_qup_i2c_write_seq_table, .i2c_write_table_w_microdelay = msm_camera_qup_i2c_write_table_w_microdelay, }; static int msm_actuator_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { int rc = 0; struct msm_actuator_ctrl_t *a_ctrl = v4l2_get_subdevdata(sd); CDBG("Enter\n"); if (!a_ctrl) { pr_err("failed\n"); return -EINVAL; } /* CCI initialization occurs after sensor power up */ CDBG("%s - Skipping call to initialize CCI\n", __func__); CDBG("Exit\n"); return rc; } static int msm_actuator_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { int rc = 0; struct msm_actuator_ctrl_t *a_ctrl = v4l2_get_subdevdata(sd); CDBG("Enter\n"); if (!a_ctrl) { pr_err("failed\n"); return -EINVAL; } /* CCI release occurs before sensor power down */ CDBG("%s - Skipping call to release CCI\n", __func__); kfree(a_ctrl->i2c_reg_tbl); a_ctrl->i2c_reg_tbl = NULL; CDBG("Exit\n"); return rc; } static const struct v4l2_subdev_internal_ops msm_actuator_internal_ops = { .open = msm_actuator_open, .close = msm_actuator_close, }; static long msm_actuator_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct msm_actuator_ctrl_t *a_ctrl = v4l2_get_subdevdata(sd); void __user *argp = (void __user *)arg; CDBG("Enter\n"); CDBG("%s:%d a_ctrl %p argp %p\n", __func__, __LINE__, a_ctrl, argp); switch (cmd) { case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID: return msm_actuator_get_subdev_id(a_ctrl, argp); case VIDIOC_MSM_ACTUATOR_CFG: return msm_actuator_config(a_ctrl, argp); case MSM_SD_SHUTDOWN: msm_actuator_close(sd, NULL); return 0; default: return -ENOIOCTLCMD; } } static int32_t msm_actuator_power_up(struct msm_actuator_ctrl_t *a_ctrl) { int rc = 0; CDBG("%s called\n", __func__); CDBG("vcm info: %x %x\n", a_ctrl->vcm_pwd, a_ctrl->vcm_enable); if (a_ctrl->vcm_enable) { rc = gpio_request(a_ctrl->vcm_pwd, "msm_actuator"); if (!rc) { CDBG("Enable VCM PWD\n"); gpio_direction_output(a_ctrl->vcm_pwd, 1); } } CDBG("Exit\n"); return rc; } static int32_t msm_actuator_power(struct v4l2_subdev *sd, int on) { int rc = 0; struct msm_actuator_ctrl_t *a_ctrl = v4l2_get_subdevdata(sd); CDBG("Enter\n"); mutex_lock(a_ctrl->actuator_mutex); if (on) rc = msm_actuator_power_up(a_ctrl); else rc = msm_actuator_power_down(a_ctrl); mutex_unlock(a_ctrl->actuator_mutex); CDBG("Exit\n"); return rc; } static struct v4l2_subdev_core_ops msm_actuator_subdev_core_ops = { .ioctl = msm_actuator_subdev_ioctl, .s_power = msm_actuator_power, }; static struct v4l2_subdev_ops msm_actuator_subdev_ops = { .core = &msm_actuator_subdev_core_ops, }; static const struct i2c_device_id msm_actuator_i2c_id[] = { {"qcom,actuator", (kernel_ulong_t)NULL}, { } }; static int32_t msm_actuator_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc = 0; struct msm_actuator_ctrl_t *act_ctrl_t = NULL; CDBG("Enter\n"); if (client == NULL) { pr_err("msm_actuator_i2c_probe: client is null\n"); rc = -EINVAL; goto probe_failure; } act_ctrl_t = kzalloc(sizeof(struct msm_actuator_ctrl_t), GFP_KERNEL); if (!act_ctrl_t) { pr_err("%s:%d failed no memory\n", __func__, __LINE__); return -ENOMEM; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { pr_err("i2c_check_functionality failed\n"); goto probe_failure; } CDBG("client = %x\n", (unsigned int) client); rc = of_property_read_u32(client->dev.of_node, "cell-index", &act_ctrl_t->subdev_id); CDBG("cell-index %d, rc %d\n", act_ctrl_t->subdev_id, rc); if (rc < 0) { pr_err("failed rc %d\n", rc); return rc; } act_ctrl_t->i2c_driver = &msm_actuator_i2c_driver; act_ctrl_t->i2c_client.client = client; act_ctrl_t->curr_step_pos = 0, act_ctrl_t->curr_region_index = 0, /* Set device type as I2C */ act_ctrl_t->act_device_type = MSM_CAMERA_I2C_DEVICE; act_ctrl_t->i2c_client.i2c_func_tbl = &msm_sensor_qup_func_tbl; act_ctrl_t->act_v4l2_subdev_ops = &msm_actuator_subdev_ops; act_ctrl_t->actuator_mutex = &msm_actuator_mutex; act_ctrl_t->cam_name = act_ctrl_t->subdev_id; CDBG("act_ctrl_t->cam_name: %d", act_ctrl_t->cam_name); /* Assign name for sub device */ snprintf(act_ctrl_t->msm_sd.sd.name, sizeof(act_ctrl_t->msm_sd.sd.name), "%s", act_ctrl_t->i2c_driver->driver.name); /* Initialize sub device */ v4l2_i2c_subdev_init(&act_ctrl_t->msm_sd.sd, act_ctrl_t->i2c_client.client, act_ctrl_t->act_v4l2_subdev_ops); v4l2_set_subdevdata(&act_ctrl_t->msm_sd.sd, act_ctrl_t); act_ctrl_t->msm_sd.sd.internal_ops = &msm_actuator_internal_ops; act_ctrl_t->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; media_entity_init(&act_ctrl_t->msm_sd.sd.entity, 0, NULL, 0); act_ctrl_t->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV; act_ctrl_t->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_ACTUATOR; act_ctrl_t->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x2; msm_sd_register(&act_ctrl_t->msm_sd); pr_info("msm_actuator_i2c_probe: succeeded\n"); CDBG("Exit\n"); probe_failure: return rc; } static int32_t msm_actuator_platform_probe(struct platform_device *pdev) { int32_t rc = 0; struct msm_camera_cci_client *cci_client = NULL; struct msm_actuator_ctrl_t *msm_actuator_t = NULL; CDBG("Enter\n"); if (!pdev->dev.of_node) { pr_err("of_node NULL\n"); return -EINVAL; } msm_actuator_t = kzalloc(sizeof(struct msm_actuator_ctrl_t), GFP_KERNEL); if (!msm_actuator_t) { pr_err("%s:%d failed no memory\n", __func__, __LINE__); return -ENOMEM; } rc = of_property_read_u32((&pdev->dev)->of_node, "cell-index", &pdev->id); CDBG("cell-index %d, rc %d\n", pdev->id, rc); if (rc < 0) { kfree(msm_actuator_t); pr_err("failed rc %d\n", rc); return rc; } rc = of_property_read_u32((&pdev->dev)->of_node, "qcom,cci-master", &msm_actuator_t->cci_master); CDBG("qcom,cci-master %d, rc %d\n", msm_actuator_t->cci_master, rc); if (rc < 0) { kfree(msm_actuator_t); pr_err("failed rc %d\n", rc); return rc; } msm_actuator_t->act_v4l2_subdev_ops = &msm_actuator_subdev_ops; msm_actuator_t->actuator_mutex = &msm_actuator_mutex; msm_actuator_t->cam_name = pdev->id; /* Set platform device handle */ msm_actuator_t->pdev = pdev; /* Set device type as platform device */ msm_actuator_t->act_device_type = MSM_CAMERA_PLATFORM_DEVICE; msm_actuator_t->i2c_client.i2c_func_tbl = &msm_sensor_cci_func_tbl; msm_actuator_t->i2c_client.cci_client = kzalloc(sizeof( struct msm_camera_cci_client), GFP_KERNEL); if (!msm_actuator_t->i2c_client.cci_client) { kfree(msm_actuator_t); pr_err("failed no memory\n"); return -ENOMEM; } cci_client = msm_actuator_t->i2c_client.cci_client; cci_client->cci_subdev = msm_cci_get_subdev(); cci_client->cci_i2c_master = MASTER_MAX; v4l2_subdev_init(&msm_actuator_t->msm_sd.sd, msm_actuator_t->act_v4l2_subdev_ops); v4l2_set_subdevdata(&msm_actuator_t->msm_sd.sd, msm_actuator_t); msm_actuator_t->msm_sd.sd.internal_ops = &msm_actuator_internal_ops; msm_actuator_t->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(msm_actuator_t->msm_sd.sd.name, ARRAY_SIZE(msm_actuator_t->msm_sd.sd.name), "msm_actuator"); media_entity_init(&msm_actuator_t->msm_sd.sd.entity, 0, NULL, 0); msm_actuator_t->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV; msm_actuator_t->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_ACTUATOR; msm_actuator_t->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x2; msm_sd_register(&msm_actuator_t->msm_sd); CDBG("Exit\n"); return rc; } static const struct of_device_id msm_actuator_i2c_dt_match[] = { {.compatible = "qcom,actuator"}, {} }; MODULE_DEVICE_TABLE(of, msm_actuator_i2c_dt_match); static struct i2c_driver msm_actuator_i2c_driver = { .id_table = msm_actuator_i2c_id, .probe = msm_actuator_i2c_probe, .remove = __exit_p(msm_actuator_i2c_remove), .driver = { .name = "qcom,actuator", .owner = THIS_MODULE, .of_match_table = msm_actuator_i2c_dt_match, }, }; static const struct of_device_id msm_actuator_dt_match[] = { {.compatible = "qcom,actuator", .data = NULL}, {} }; MODULE_DEVICE_TABLE(of, msm_actuator_dt_match); static struct platform_driver msm_actuator_platform_driver = { .driver = { .name = "qcom,actuator", .owner = THIS_MODULE, .of_match_table = msm_actuator_dt_match, }, }; static int __init msm_actuator_init_module(void) { int32_t rc = 0; CDBG("Enter\n"); rc = platform_driver_probe(&msm_actuator_platform_driver, msm_actuator_platform_probe); if (!rc) return rc; CDBG("%s:%d rc %d\n", __func__, __LINE__, rc); return i2c_add_driver(&msm_actuator_i2c_driver); } static struct msm_actuator msm_vcm_actuator_table = { .act_type = ACTUATOR_VCM, .func_tbl = { .actuator_init_step_table = msm_actuator_init_step_table, .actuator_move_focus = msm_actuator_move_focus, .actuator_write_focus = msm_actuator_write_focus, .actuator_set_default_focus = msm_actuator_set_default_focus, .actuator_init_focus = msm_actuator_init_focus, .actuator_parse_i2c_params = msm_actuator_parse_i2c_params, .actuator_set_position = msm_actuator_set_position, }, }; static struct msm_actuator msm_piezo_actuator_table = { .act_type = ACTUATOR_PIEZO, .func_tbl = { .actuator_init_step_table = NULL, .actuator_move_focus = msm_actuator_piezo_move_focus, .actuator_write_focus = NULL, .actuator_set_default_focus = msm_actuator_piezo_set_default_focus, .actuator_init_focus = msm_actuator_init_focus, .actuator_parse_i2c_params = msm_actuator_parse_i2c_params, }, }; module_init(msm_actuator_init_module); MODULE_DESCRIPTION("MSM ACTUATOR"); MODULE_LICENSE("GPL v2");
gpl-2.0
suse110/linux-1
arch/powerpc/platforms/44x/iss4xx.c
1431
4340
/* * PPC476 board specific routines * * Copyright 2010 Torez Smith, IBM Corporation. * * Based on earlier code: * Matt Porter <mporter@kernel.crashing.org> * Copyright 2002-2005 MontaVista Software Inc. * * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * Copyright (c) 2003-2005 Zultys Technologies * * Rewritten and ported to the merged powerpc tree: * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/of_platform.h> #include <linux/rtc.h> #include <asm/machdep.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/time.h> #include <asm/uic.h> #include <asm/ppc4xx.h> #include <asm/mpic.h> #include <asm/mmu.h> static const struct of_device_id iss4xx_of_bus[] __initconst = { { .compatible = "ibm,plb4", }, { .compatible = "ibm,plb6", }, { .compatible = "ibm,opb", }, { .compatible = "ibm,ebc", }, {}, }; static int __init iss4xx_device_probe(void) { of_platform_bus_probe(NULL, iss4xx_of_bus, NULL); of_instantiate_rtc(); return 0; } machine_device_initcall(iss4xx, iss4xx_device_probe); /* We can have either UICs or MPICs */ static void __init iss4xx_init_irq(void) { struct device_node *np; /* Find top level interrupt controller */ for_each_node_with_property(np, "interrupt-controller") { if (of_get_property(np, "interrupts", NULL) == NULL) break; } if (np == NULL) panic("Can't find top level interrupt controller"); /* Check type and do appropriate initialization */ if (of_device_is_compatible(np, "ibm,uic")) { uic_init_tree(); ppc_md.get_irq = uic_get_irq; #ifdef CONFIG_MPIC } else if (of_device_is_compatible(np, "chrp,open-pic")) { /* The MPIC driver will get everything it needs from the * device-tree, just pass 0 to all arguments */ struct mpic *mpic = mpic_alloc(np, 0, MPIC_NO_RESET, 0, 0, " MPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); ppc_md.get_irq = mpic_get_irq; #endif } else panic("Unrecognized top level interrupt controller"); } #ifdef CONFIG_SMP static void smp_iss4xx_setup_cpu(int cpu) { mpic_setup_this_cpu(); } static int smp_iss4xx_kick_cpu(int cpu) { struct device_node *cpunode = of_get_cpu_node(cpu, NULL); const u64 *spin_table_addr_prop; u32 *spin_table; extern void start_secondary_47x(void); BUG_ON(cpunode == NULL); /* Assume spin table. We could test for the enable-method in * the device-tree but currently there's little point as it's * our only supported method */ spin_table_addr_prop = of_get_property(cpunode, "cpu-release-addr", NULL); if (spin_table_addr_prop == NULL) { pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu); return -ENOENT; } /* Assume it's mapped as part of the linear mapping. This is a bit * fishy but will work fine for now */ spin_table = (u32 *)__va(*spin_table_addr_prop); pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table); spin_table[3] = cpu; smp_wmb(); spin_table[1] = __pa(start_secondary_47x); mb(); return 0; } static struct smp_ops_t iss_smp_ops = { .probe = smp_mpic_probe, .message_pass = smp_mpic_message_pass, .setup_cpu = smp_iss4xx_setup_cpu, .kick_cpu = smp_iss4xx_kick_cpu, .give_timebase = smp_generic_give_timebase, .take_timebase = smp_generic_take_timebase, }; static void __init iss4xx_smp_init(void) { if (mmu_has_feature(MMU_FTR_TYPE_47x)) smp_ops = &iss_smp_ops; } #else /* CONFIG_SMP */ static void __init iss4xx_smp_init(void) { } #endif /* CONFIG_SMP */ static void __init iss4xx_setup_arch(void) { iss4xx_smp_init(); } /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init iss4xx_probe(void) { unsigned long root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "ibm,iss-4xx")) return 0; return 1; } define_machine(iss4xx) { .name = "ISS-4xx", .probe = iss4xx_probe, .progress = udbg_progress, .init_IRQ = iss4xx_init_irq, .setup_arch = iss4xx_setup_arch, .restart = ppc4xx_reset_system, .calibrate_decr = generic_calibrate_decr, };
gpl-2.0
akhilnarang/ThugLife_bullhead
arch/arm/mach-omap2/pm24xx.c
2199
8853
/* * OMAP2 Power Management Routines * * Copyright (C) 2005 Texas Instruments, Inc. * Copyright (C) 2006-2008 Nokia Corporation * * Written by: * Richard Woodruff <r-woodruff2@ti.com> * Tony Lindgren * Juha Yrjola * Amit Kucheria <amit.kucheria@nokia.com> * Igor Stoppa <igor.stoppa@nokia.com> * * Based on pm.c for omap1 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/suspend.h> #include <linux/sched.h> #include <linux/proc_fs.h> #include <linux/interrupt.h> #include <linux/sysfs.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/clk-provider.h> #include <linux/irq.h> #include <linux/time.h> #include <linux/gpio.h> #include <linux/platform_data/gpio-omap.h> #include <asm/fncpy.h> #include <asm/mach/time.h> #include <asm/mach/irq.h> #include <asm/mach-types.h> #include <asm/system_misc.h> #include <linux/omap-dma.h> #include "soc.h" #include "common.h" #include "clock.h" #include "prm2xxx.h" #include "prm-regbits-24xx.h" #include "cm2xxx.h" #include "cm-regbits-24xx.h" #include "sdrc.h" #include "sram.h" #include "pm.h" #include "control.h" #include "powerdomain.h" #include "clockdomain.h" static void (*omap2_sram_suspend)(u32 dllctrl, void __iomem *sdrc_dlla_ctrl, void __iomem *sdrc_power); static struct powerdomain *mpu_pwrdm, *core_pwrdm; static struct clockdomain *dsp_clkdm, *mpu_clkdm, *wkup_clkdm, *gfx_clkdm; static struct clk *osc_ck, *emul_ck; static int omap2_fclks_active(void) { u32 f1, f2; f1 = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); f2 = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2); return (f1 | f2) ? 1 : 0; } static int omap2_enter_full_retention(void) { u32 l; /* There is 1 reference hold for all children of the oscillator * clock, the following will remove it. If no one else uses the * oscillator itself it will be disabled if/when we enter retention * mode. */ clk_disable(osc_ck); /* Clear old wake-up events */ /* REVISIT: These write to reserved bits? */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST); pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_RET); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); /* Workaround to kill USB */ l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL; omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0); omap2_gpio_prepare_for_idle(0); /* One last check for pending IRQs to avoid extra latency due * to sleeping unnecessarily. */ if (omap_irq_pending()) goto no_sleep; /* Jump to SRAM suspend code */ omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL), OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL), OMAP_SDRC_REGADDR(SDRC_POWER)); no_sleep: omap2_gpio_resume_after_idle(); clk_enable(osc_ck); /* clear CORE wake-up events */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); /* wakeup domain events - bit 1: GPT1, bit5 GPIO */ omap2_prm_clear_mod_reg_bits(0x4 | 0x1, WKUP_MOD, PM_WKST); /* MPU domain wake events */ l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); if (l & 0x01) omap2_prm_write_mod_reg(0x01, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); if (l & 0x20) omap2_prm_write_mod_reg(0x20, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); /* Mask future PRCM-to-MPU interrupts */ omap2_prm_write_mod_reg(0x0, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_ON); return 0; } static int sti_console_enabled; static int omap2_allow_mpu_retention(void) { u32 l; /* Check for MMC, UART2, UART1, McSPI2, McSPI1 and DSS1. */ l = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); if (l & (OMAP2420_EN_MMC_MASK | OMAP24XX_EN_UART2_MASK | OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_MCSPI2_MASK | OMAP24XX_EN_MCSPI1_MASK | OMAP24XX_EN_DSS1_MASK)) return 0; /* Check for UART3. */ l = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2); if (l & OMAP24XX_EN_UART3_MASK) return 0; if (sti_console_enabled) return 0; return 1; } static void omap2_enter_mpu_retention(void) { const int zero = 0; /* The peripherals seem not to be able to wake up the MPU when * it is in retention mode. */ if (omap2_allow_mpu_retention()) { /* REVISIT: These write to reserved bits? */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST); /* Try to enter MPU retention */ pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); } else { /* Block MPU retention */ pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); } /* WFI */ asm("mcr p15, 0, %0, c7, c0, 4" : : "r" (zero) : "memory", "cc"); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); } static int omap2_can_sleep(void) { if (omap2_fclks_active()) return 0; if (__clk_is_enabled(osc_ck)) return 0; if (omap_dma_running()) return 0; return 1; } static void omap2_pm_idle(void) { if (!omap2_can_sleep()) { if (omap_irq_pending()) return; omap2_enter_mpu_retention(); return; } if (omap_irq_pending()) return; omap2_enter_full_retention(); } static void __init prcm_setup_regs(void) { int i, num_mem_banks; struct powerdomain *pwrdm; /* * Enable autoidle * XXX This should be handled by hwmod code or PRCM init code */ omap2_prm_write_mod_reg(OMAP24XX_AUTOIDLE_MASK, OCP_MOD, OMAP2_PRCM_SYSCONFIG_OFFSET); /* * Set CORE powerdomain memory banks to retain their contents * during RETENTION */ num_mem_banks = pwrdm_get_mem_bank_count(core_pwrdm); for (i = 0; i < num_mem_banks; i++) pwrdm_set_mem_retst(core_pwrdm, i, PWRDM_POWER_RET); pwrdm_set_logic_retst(core_pwrdm, PWRDM_POWER_RET); pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET); /* Force-power down DSP, GFX powerdomains */ pwrdm = clkdm_get_pwrdm(dsp_clkdm); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); pwrdm = clkdm_get_pwrdm(gfx_clkdm); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); /* Enable hardware-supervised idle for all clkdms */ clkdm_for_each(omap_pm_clkdms_setup, NULL); clkdm_add_wkdep(mpu_clkdm, wkup_clkdm); #ifdef CONFIG_SUSPEND omap_pm_suspend = omap2_enter_full_retention; #endif /* REVISIT: Configure number of 32 kHz clock cycles for sys_clk * stabilisation */ omap2_prm_write_mod_reg(15 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD, OMAP2_PRCM_CLKSSETUP_OFFSET); /* Configure automatic voltage transition */ omap2_prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTSETUP_OFFSET); omap2_prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT_MASK | (0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) | OMAP24XX_MEMRETCTRL_MASK | (0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) | (0x0 << OMAP24XX_VOLT_LEVEL_SHIFT), OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTCTRL_OFFSET); /* Enable wake-up events */ omap2_prm_write_mod_reg(OMAP24XX_EN_GPIOS_MASK | OMAP24XX_EN_GPT1_MASK, WKUP_MOD, PM_WKEN); } int __init omap2_pm_init(void) { u32 l; printk(KERN_INFO "Power Management for OMAP2 initializing\n"); l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET); printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f); /* Look up important powerdomains */ mpu_pwrdm = pwrdm_lookup("mpu_pwrdm"); if (!mpu_pwrdm) pr_err("PM: mpu_pwrdm not found\n"); core_pwrdm = pwrdm_lookup("core_pwrdm"); if (!core_pwrdm) pr_err("PM: core_pwrdm not found\n"); /* Look up important clockdomains */ mpu_clkdm = clkdm_lookup("mpu_clkdm"); if (!mpu_clkdm) pr_err("PM: mpu_clkdm not found\n"); wkup_clkdm = clkdm_lookup("wkup_clkdm"); if (!wkup_clkdm) pr_err("PM: wkup_clkdm not found\n"); dsp_clkdm = clkdm_lookup("dsp_clkdm"); if (!dsp_clkdm) pr_err("PM: dsp_clkdm not found\n"); gfx_clkdm = clkdm_lookup("gfx_clkdm"); if (!gfx_clkdm) pr_err("PM: gfx_clkdm not found\n"); osc_ck = clk_get(NULL, "osc_ck"); if (IS_ERR(osc_ck)) { printk(KERN_ERR "could not get osc_ck\n"); return -ENODEV; } if (cpu_is_omap242x()) { emul_ck = clk_get(NULL, "emul_ck"); if (IS_ERR(emul_ck)) { printk(KERN_ERR "could not get emul_ck\n"); clk_put(osc_ck); return -ENODEV; } } prcm_setup_regs(); /* * We copy the assembler sleep/wakeup routines to SRAM. * These routines need to be in SRAM as that's the only * memory the MPU can see when it wakes up after the entire * chip enters idle. */ omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend, omap24xx_cpu_suspend_sz); arm_pm_idle = omap2_pm_idle; return 0; }
gpl-2.0
shane87/android_kernel_lge_g3
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
3735
98135
/* * omap_hwmod_3xxx_data.c - hardware modules present on the OMAP3xxx chips * * Copyright (C) 2009-2011 Nokia Corporation * Paul Walmsley * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The data in this file should be completely autogeneratable from * the TI hardware database or other technical documentation. * * XXX these should be marked initdata for multi-OMAP kernels */ #include <plat/omap_hwmod.h> #include <mach/irqs.h> #include <plat/cpu.h> #include <plat/dma.h> #include <plat/serial.h> #include <plat/l3_3xxx.h> #include <plat/l4_3xxx.h> #include <plat/i2c.h> #include <plat/gpio.h> #include <plat/mmc.h> #include <plat/mcbsp.h> #include <plat/mcspi.h> #include <plat/dmtimer.h> #include "omap_hwmod_common_data.h" #include "smartreflex.h" #include "prm-regbits-34xx.h" #include "cm-regbits-34xx.h" #include "wd_timer.h" #include <mach/am35xx.h> /* * OMAP3xxx hardware module integration data * * ALl of the data in this section should be autogeneratable from the * TI hardware database or other technical documentation. Data that * is driver-specific or driver-kernel integration-specific belongs * elsewhere. */ static struct omap_hwmod omap3xxx_mpu_hwmod; static struct omap_hwmod omap3xxx_iva_hwmod; static struct omap_hwmod omap3xxx_l3_main_hwmod; static struct omap_hwmod omap3xxx_l4_core_hwmod; static struct omap_hwmod omap3xxx_l4_per_hwmod; static struct omap_hwmod omap3xxx_wd_timer2_hwmod; static struct omap_hwmod omap3430es1_dss_core_hwmod; static struct omap_hwmod omap3xxx_dss_core_hwmod; static struct omap_hwmod omap3xxx_dss_dispc_hwmod; static struct omap_hwmod omap3xxx_dss_dsi1_hwmod; static struct omap_hwmod omap3xxx_dss_rfbi_hwmod; static struct omap_hwmod omap3xxx_dss_venc_hwmod; static struct omap_hwmod omap3xxx_i2c1_hwmod; static struct omap_hwmod omap3xxx_i2c2_hwmod; static struct omap_hwmod omap3xxx_i2c3_hwmod; static struct omap_hwmod omap3xxx_gpio1_hwmod; static struct omap_hwmod omap3xxx_gpio2_hwmod; static struct omap_hwmod omap3xxx_gpio3_hwmod; static struct omap_hwmod omap3xxx_gpio4_hwmod; static struct omap_hwmod omap3xxx_gpio5_hwmod; static struct omap_hwmod omap3xxx_gpio6_hwmod; static struct omap_hwmod omap34xx_sr1_hwmod; static struct omap_hwmod omap34xx_sr2_hwmod; static struct omap_hwmod omap34xx_mcspi1; static struct omap_hwmod omap34xx_mcspi2; static struct omap_hwmod omap34xx_mcspi3; static struct omap_hwmod omap34xx_mcspi4; static struct omap_hwmod omap3xxx_mmc1_hwmod; static struct omap_hwmod omap3xxx_mmc2_hwmod; static struct omap_hwmod omap3xxx_mmc3_hwmod; static struct omap_hwmod am35xx_usbhsotg_hwmod; static struct omap_hwmod omap3xxx_dma_system_hwmod; static struct omap_hwmod omap3xxx_mcbsp1_hwmod; static struct omap_hwmod omap3xxx_mcbsp2_hwmod; static struct omap_hwmod omap3xxx_mcbsp3_hwmod; static struct omap_hwmod omap3xxx_mcbsp4_hwmod; static struct omap_hwmod omap3xxx_mcbsp5_hwmod; static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod; static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod; static struct omap_hwmod omap3xxx_usb_host_hs_hwmod; static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod; /* L3 -> L4_CORE interface */ static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_core = { .master = &omap3xxx_l3_main_hwmod, .slave = &omap3xxx_l4_core_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L3 -> L4_PER interface */ static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_per = { .master = &omap3xxx_l3_main_hwmod, .slave = &omap3xxx_l4_per_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L3 taret configuration and error log registers */ static struct omap_hwmod_irq_info omap3xxx_l3_main_irqs[] = { { .irq = INT_34XX_L3_DBG_IRQ }, { .irq = INT_34XX_L3_APP_IRQ }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap3xxx_l3_main_addrs[] = { { .pa_start = 0x68000000, .pa_end = 0x6800ffff, .flags = ADDR_TYPE_RT, }, { } }; /* MPU -> L3 interface */ static struct omap_hwmod_ocp_if omap3xxx_mpu__l3_main = { .master = &omap3xxx_mpu_hwmod, .slave = &omap3xxx_l3_main_hwmod, .addr = omap3xxx_l3_main_addrs, .user = OCP_USER_MPU, }; /* Slave interfaces on the L3 interconnect */ static struct omap_hwmod_ocp_if *omap3xxx_l3_main_slaves[] = { &omap3xxx_mpu__l3_main, }; /* DSS -> l3 */ static struct omap_hwmod_ocp_if omap3xxx_dss__l3 = { .master = &omap3xxx_dss_core_hwmod, .slave = &omap3xxx_l3_main_hwmod, .fw = { .omap2 = { .l3_perm_bit = OMAP3_L3_CORE_FW_INIT_ID_DSS, .flags = OMAP_FIREWALL_L3, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* Master interfaces on the L3 interconnect */ static struct omap_hwmod_ocp_if *omap3xxx_l3_main_masters[] = { &omap3xxx_l3_main__l4_core, &omap3xxx_l3_main__l4_per, }; /* L3 */ static struct omap_hwmod omap3xxx_l3_main_hwmod = { .name = "l3_main", .class = &l3_hwmod_class, .mpu_irqs = omap3xxx_l3_main_irqs, .masters = omap3xxx_l3_main_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_l3_main_masters), .slaves = omap3xxx_l3_main_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_l3_main_slaves), .flags = HWMOD_NO_IDLEST, }; static struct omap_hwmod omap3xxx_l4_wkup_hwmod; static struct omap_hwmod omap3xxx_uart1_hwmod; static struct omap_hwmod omap3xxx_uart2_hwmod; static struct omap_hwmod omap3xxx_uart3_hwmod; static struct omap_hwmod omap3xxx_uart4_hwmod; static struct omap_hwmod am35xx_uart4_hwmod; static struct omap_hwmod omap3xxx_usbhsotg_hwmod; /* l3_core -> usbhsotg interface */ static struct omap_hwmod_ocp_if omap3xxx_usbhsotg__l3 = { .master = &omap3xxx_usbhsotg_hwmod, .slave = &omap3xxx_l3_main_hwmod, .clk = "core_l3_ick", .user = OCP_USER_MPU, }; /* l3_core -> am35xx_usbhsotg interface */ static struct omap_hwmod_ocp_if am35xx_usbhsotg__l3 = { .master = &am35xx_usbhsotg_hwmod, .slave = &omap3xxx_l3_main_hwmod, .clk = "core_l3_ick", .user = OCP_USER_MPU, }; /* L4_CORE -> L4_WKUP interface */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__l4_wkup = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_l4_wkup_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> MMC1 interface */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mmc1_hwmod, .clk = "mmchs1_ick", .addr = omap2430_mmc1_addr_space, .user = OCP_USER_MPU | OCP_USER_SDMA, .flags = OMAP_FIREWALL_L4 }; /* L4 CORE -> MMC2 interface */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mmc2_hwmod, .clk = "mmchs2_ick", .addr = omap2430_mmc2_addr_space, .user = OCP_USER_MPU | OCP_USER_SDMA, .flags = OMAP_FIREWALL_L4 }; /* L4 CORE -> MMC3 interface */ static struct omap_hwmod_addr_space omap3xxx_mmc3_addr_space[] = { { .pa_start = 0x480ad000, .pa_end = 0x480ad1ff, .flags = ADDR_TYPE_RT, }, { } }; static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc3 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mmc3_hwmod, .clk = "mmchs3_ick", .addr = omap3xxx_mmc3_addr_space, .user = OCP_USER_MPU | OCP_USER_SDMA, .flags = OMAP_FIREWALL_L4 }; /* L4 CORE -> UART1 interface */ static struct omap_hwmod_addr_space omap3xxx_uart1_addr_space[] = { { .pa_start = OMAP3_UART1_BASE, .pa_end = OMAP3_UART1_BASE + SZ_8K - 1, .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT, }, { } }; static struct omap_hwmod_ocp_if omap3_l4_core__uart1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_uart1_hwmod, .clk = "uart1_ick", .addr = omap3xxx_uart1_addr_space, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> UART2 interface */ static struct omap_hwmod_addr_space omap3xxx_uart2_addr_space[] = { { .pa_start = OMAP3_UART2_BASE, .pa_end = OMAP3_UART2_BASE + SZ_1K - 1, .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT, }, { } }; static struct omap_hwmod_ocp_if omap3_l4_core__uart2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_uart2_hwmod, .clk = "uart2_ick", .addr = omap3xxx_uart2_addr_space, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 PER -> UART3 interface */ static struct omap_hwmod_addr_space omap3xxx_uart3_addr_space[] = { { .pa_start = OMAP3_UART3_BASE, .pa_end = OMAP3_UART3_BASE + SZ_1K - 1, .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT, }, { } }; static struct omap_hwmod_ocp_if omap3_l4_per__uart3 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_uart3_hwmod, .clk = "uart3_ick", .addr = omap3xxx_uart3_addr_space, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 PER -> UART4 interface */ static struct omap_hwmod_addr_space omap3xxx_uart4_addr_space[] = { { .pa_start = OMAP3_UART4_BASE, .pa_end = OMAP3_UART4_BASE + SZ_1K - 1, .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT, }, { } }; static struct omap_hwmod_ocp_if omap3_l4_per__uart4 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_uart4_hwmod, .clk = "uart4_ick", .addr = omap3xxx_uart4_addr_space, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* AM35xx: L4 CORE -> UART4 interface */ static struct omap_hwmod_addr_space am35xx_uart4_addr_space[] = { { .pa_start = OMAP3_UART4_AM35XX_BASE, .pa_end = OMAP3_UART4_AM35XX_BASE + SZ_1K - 1, .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if am35xx_l4_core__uart4 = { .master = &omap3xxx_l4_core_hwmod, .slave = &am35xx_uart4_hwmod, .clk = "uart4_ick", .addr = am35xx_uart4_addr_space, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> I2C1 interface */ static struct omap_hwmod_ocp_if omap3_l4_core__i2c1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_i2c1_hwmod, .clk = "i2c1_ick", .addr = omap2_i2c1_addr_space, .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_I2C1_REGION, .l4_prot_group = 7, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> I2C2 interface */ static struct omap_hwmod_ocp_if omap3_l4_core__i2c2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_i2c2_hwmod, .clk = "i2c2_ick", .addr = omap2_i2c2_addr_space, .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_I2C2_REGION, .l4_prot_group = 7, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> I2C3 interface */ static struct omap_hwmod_addr_space omap3xxx_i2c3_addr_space[] = { { .pa_start = 0x48060000, .pa_end = 0x48060000 + SZ_128 - 1, .flags = ADDR_TYPE_RT, }, { } }; static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_i2c3_hwmod, .clk = "i2c3_ick", .addr = omap3xxx_i2c3_addr_space, .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_I2C3_REGION, .l4_prot_group = 7, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_irq_info omap3_smartreflex_mpu_irqs[] = { { .irq = 18}, { .irq = -1 } }; static struct omap_hwmod_irq_info omap3_smartreflex_core_irqs[] = { { .irq = 19}, { .irq = -1 } }; /* L4 CORE -> SR1 interface */ static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = { { .pa_start = OMAP34XX_SR1_BASE, .pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1, .flags = ADDR_TYPE_RT, }, { } }; static struct omap_hwmod_ocp_if omap3_l4_core__sr1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_sr1_hwmod, .clk = "sr_l4_ick", .addr = omap3_sr1_addr_space, .user = OCP_USER_MPU, }; /* L4 CORE -> SR1 interface */ static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = { { .pa_start = OMAP34XX_SR2_BASE, .pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1, .flags = ADDR_TYPE_RT, }, { } }; static struct omap_hwmod_ocp_if omap3_l4_core__sr2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_sr2_hwmod, .clk = "sr_l4_ick", .addr = omap3_sr2_addr_space, .user = OCP_USER_MPU, }; /* * usbhsotg interface data */ static struct omap_hwmod_addr_space omap3xxx_usbhsotg_addrs[] = { { .pa_start = OMAP34XX_HSUSB_OTG_BASE, .pa_end = OMAP34XX_HSUSB_OTG_BASE + SZ_4K - 1, .flags = ADDR_TYPE_RT }, { } }; /* l4_core -> usbhsotg */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__usbhsotg = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_usbhsotg_hwmod, .clk = "l4_ick", .addr = omap3xxx_usbhsotg_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_ocp_if *omap3xxx_usbhsotg_masters[] = { &omap3xxx_usbhsotg__l3, }; static struct omap_hwmod_ocp_if *omap3xxx_usbhsotg_slaves[] = { &omap3xxx_l4_core__usbhsotg, }; static struct omap_hwmod_addr_space am35xx_usbhsotg_addrs[] = { { .pa_start = AM35XX_IPSS_USBOTGSS_BASE, .pa_end = AM35XX_IPSS_USBOTGSS_BASE + SZ_4K - 1, .flags = ADDR_TYPE_RT }, { } }; /* l4_core -> usbhsotg */ static struct omap_hwmod_ocp_if am35xx_l4_core__usbhsotg = { .master = &omap3xxx_l4_core_hwmod, .slave = &am35xx_usbhsotg_hwmod, .clk = "l4_ick", .addr = am35xx_usbhsotg_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_ocp_if *am35xx_usbhsotg_masters[] = { &am35xx_usbhsotg__l3, }; static struct omap_hwmod_ocp_if *am35xx_usbhsotg_slaves[] = { &am35xx_l4_core__usbhsotg, }; /* Slave interfaces on the L4_CORE interconnect */ static struct omap_hwmod_ocp_if *omap3xxx_l4_core_slaves[] = { &omap3xxx_l3_main__l4_core, }; /* L4 CORE */ static struct omap_hwmod omap3xxx_l4_core_hwmod = { .name = "l4_core", .class = &l4_hwmod_class, .slaves = omap3xxx_l4_core_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_l4_core_slaves), .flags = HWMOD_NO_IDLEST, }; /* Slave interfaces on the L4_PER interconnect */ static struct omap_hwmod_ocp_if *omap3xxx_l4_per_slaves[] = { &omap3xxx_l3_main__l4_per, }; /* L4 PER */ static struct omap_hwmod omap3xxx_l4_per_hwmod = { .name = "l4_per", .class = &l4_hwmod_class, .slaves = omap3xxx_l4_per_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_l4_per_slaves), .flags = HWMOD_NO_IDLEST, }; /* Slave interfaces on the L4_WKUP interconnect */ static struct omap_hwmod_ocp_if *omap3xxx_l4_wkup_slaves[] = { &omap3xxx_l4_core__l4_wkup, }; /* L4 WKUP */ static struct omap_hwmod omap3xxx_l4_wkup_hwmod = { .name = "l4_wkup", .class = &l4_hwmod_class, .slaves = omap3xxx_l4_wkup_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_l4_wkup_slaves), .flags = HWMOD_NO_IDLEST, }; /* Master interfaces on the MPU device */ static struct omap_hwmod_ocp_if *omap3xxx_mpu_masters[] = { &omap3xxx_mpu__l3_main, }; /* MPU */ static struct omap_hwmod omap3xxx_mpu_hwmod = { .name = "mpu", .class = &mpu_hwmod_class, .main_clk = "arm_fck", .masters = omap3xxx_mpu_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_mpu_masters), }; /* * IVA2_2 interface data */ /* IVA2 <- L3 interface */ static struct omap_hwmod_ocp_if omap3xxx_l3__iva = { .master = &omap3xxx_l3_main_hwmod, .slave = &omap3xxx_iva_hwmod, .clk = "iva2_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if *omap3xxx_iva_masters[] = { &omap3xxx_l3__iva, }; /* * IVA2 (IVA2) */ static struct omap_hwmod omap3xxx_iva_hwmod = { .name = "iva", .class = &iva_hwmod_class, .masters = omap3xxx_iva_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_iva_masters), }; /* timer class */ static struct omap_hwmod_class_sysconfig omap3xxx_timer_1ms_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_EMUFREE | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_timer_1ms_hwmod_class = { .name = "timer", .sysc = &omap3xxx_timer_1ms_sysc, .rev = OMAP_TIMER_IP_VERSION_1, }; static struct omap_hwmod_class_sysconfig omap3xxx_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_timer_hwmod_class = { .name = "timer", .sysc = &omap3xxx_timer_sysc, .rev = OMAP_TIMER_IP_VERSION_1, }; /* secure timers dev attribute */ static struct omap_timer_capability_dev_attr capability_secure_dev_attr = { .timer_capability = OMAP_TIMER_SECURE, }; /* always-on timers dev attribute */ static struct omap_timer_capability_dev_attr capability_alwon_dev_attr = { .timer_capability = OMAP_TIMER_ALWON, }; /* pwm timers dev attribute */ static struct omap_timer_capability_dev_attr capability_pwm_dev_attr = { .timer_capability = OMAP_TIMER_HAS_PWM, }; /* timer1 */ static struct omap_hwmod omap3xxx_timer1_hwmod; static struct omap_hwmod_addr_space omap3xxx_timer1_addrs[] = { { .pa_start = 0x48318000, .pa_end = 0x48318000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, { } }; /* l4_wkup -> timer1 */ static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__timer1 = { .master = &omap3xxx_l4_wkup_hwmod, .slave = &omap3xxx_timer1_hwmod, .clk = "gpt1_ick", .addr = omap3xxx_timer1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer1 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer1_slaves[] = { &omap3xxx_l4_wkup__timer1, }; /* timer1 hwmod */ static struct omap_hwmod omap3xxx_timer1_hwmod = { .name = "timer1", .mpu_irqs = omap2_timer1_mpu_irqs, .main_clk = "gpt1_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT1_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT1_SHIFT, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap3xxx_timer1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer1_slaves), .class = &omap3xxx_timer_1ms_hwmod_class, }; /* timer2 */ static struct omap_hwmod omap3xxx_timer2_hwmod; static struct omap_hwmod_addr_space omap3xxx_timer2_addrs[] = { { .pa_start = 0x49032000, .pa_end = 0x49032000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer2 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer2 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer2_hwmod, .clk = "gpt2_ick", .addr = omap3xxx_timer2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer2 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer2_slaves[] = { &omap3xxx_l4_per__timer2, }; /* timer2 hwmod */ static struct omap_hwmod omap3xxx_timer2_hwmod = { .name = "timer2", .mpu_irqs = omap2_timer2_mpu_irqs, .main_clk = "gpt2_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT2_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT2_SHIFT, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap3xxx_timer2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer2_slaves), .class = &omap3xxx_timer_1ms_hwmod_class, }; /* timer3 */ static struct omap_hwmod omap3xxx_timer3_hwmod; static struct omap_hwmod_addr_space omap3xxx_timer3_addrs[] = { { .pa_start = 0x49034000, .pa_end = 0x49034000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer3 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer3 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer3_hwmod, .clk = "gpt3_ick", .addr = omap3xxx_timer3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer3 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer3_slaves[] = { &omap3xxx_l4_per__timer3, }; /* timer3 hwmod */ static struct omap_hwmod omap3xxx_timer3_hwmod = { .name = "timer3", .mpu_irqs = omap2_timer3_mpu_irqs, .main_clk = "gpt3_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT3_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT3_SHIFT, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap3xxx_timer3_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer3_slaves), .class = &omap3xxx_timer_hwmod_class, }; /* timer4 */ static struct omap_hwmod omap3xxx_timer4_hwmod; static struct omap_hwmod_addr_space omap3xxx_timer4_addrs[] = { { .pa_start = 0x49036000, .pa_end = 0x49036000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer4 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer4 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer4_hwmod, .clk = "gpt4_ick", .addr = omap3xxx_timer4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer4 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer4_slaves[] = { &omap3xxx_l4_per__timer4, }; /* timer4 hwmod */ static struct omap_hwmod omap3xxx_timer4_hwmod = { .name = "timer4", .mpu_irqs = omap2_timer4_mpu_irqs, .main_clk = "gpt4_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT4_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT4_SHIFT, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap3xxx_timer4_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer4_slaves), .class = &omap3xxx_timer_hwmod_class, }; /* timer5 */ static struct omap_hwmod omap3xxx_timer5_hwmod; static struct omap_hwmod_addr_space omap3xxx_timer5_addrs[] = { { .pa_start = 0x49038000, .pa_end = 0x49038000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer5 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer5 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer5_hwmod, .clk = "gpt5_ick", .addr = omap3xxx_timer5_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer5 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer5_slaves[] = { &omap3xxx_l4_per__timer5, }; /* timer5 hwmod */ static struct omap_hwmod omap3xxx_timer5_hwmod = { .name = "timer5", .mpu_irqs = omap2_timer5_mpu_irqs, .main_clk = "gpt5_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT5_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT5_SHIFT, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap3xxx_timer5_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer5_slaves), .class = &omap3xxx_timer_hwmod_class, }; /* timer6 */ static struct omap_hwmod omap3xxx_timer6_hwmod; static struct omap_hwmod_addr_space omap3xxx_timer6_addrs[] = { { .pa_start = 0x4903A000, .pa_end = 0x4903A000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer6 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer6 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer6_hwmod, .clk = "gpt6_ick", .addr = omap3xxx_timer6_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer6 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer6_slaves[] = { &omap3xxx_l4_per__timer6, }; /* timer6 hwmod */ static struct omap_hwmod omap3xxx_timer6_hwmod = { .name = "timer6", .mpu_irqs = omap2_timer6_mpu_irqs, .main_clk = "gpt6_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT6_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT6_SHIFT, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap3xxx_timer6_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer6_slaves), .class = &omap3xxx_timer_hwmod_class, }; /* timer7 */ static struct omap_hwmod omap3xxx_timer7_hwmod; static struct omap_hwmod_addr_space omap3xxx_timer7_addrs[] = { { .pa_start = 0x4903C000, .pa_end = 0x4903C000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer7 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer7 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer7_hwmod, .clk = "gpt7_ick", .addr = omap3xxx_timer7_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer7 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer7_slaves[] = { &omap3xxx_l4_per__timer7, }; /* timer7 hwmod */ static struct omap_hwmod omap3xxx_timer7_hwmod = { .name = "timer7", .mpu_irqs = omap2_timer7_mpu_irqs, .main_clk = "gpt7_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT7_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT7_SHIFT, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap3xxx_timer7_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer7_slaves), .class = &omap3xxx_timer_hwmod_class, }; /* timer8 */ static struct omap_hwmod omap3xxx_timer8_hwmod; static struct omap_hwmod_addr_space omap3xxx_timer8_addrs[] = { { .pa_start = 0x4903E000, .pa_end = 0x4903E000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer8 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer8 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer8_hwmod, .clk = "gpt8_ick", .addr = omap3xxx_timer8_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer8 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer8_slaves[] = { &omap3xxx_l4_per__timer8, }; /* timer8 hwmod */ static struct omap_hwmod omap3xxx_timer8_hwmod = { .name = "timer8", .mpu_irqs = omap2_timer8_mpu_irqs, .main_clk = "gpt8_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT8_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT8_SHIFT, }, }, .dev_attr = &capability_pwm_dev_attr, .slaves = omap3xxx_timer8_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer8_slaves), .class = &omap3xxx_timer_hwmod_class, }; /* timer9 */ static struct omap_hwmod omap3xxx_timer9_hwmod; static struct omap_hwmod_addr_space omap3xxx_timer9_addrs[] = { { .pa_start = 0x49040000, .pa_end = 0x49040000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer9 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer9 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_timer9_hwmod, .clk = "gpt9_ick", .addr = omap3xxx_timer9_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer9 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer9_slaves[] = { &omap3xxx_l4_per__timer9, }; /* timer9 hwmod */ static struct omap_hwmod omap3xxx_timer9_hwmod = { .name = "timer9", .mpu_irqs = omap2_timer9_mpu_irqs, .main_clk = "gpt9_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT9_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT9_SHIFT, }, }, .dev_attr = &capability_pwm_dev_attr, .slaves = omap3xxx_timer9_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer9_slaves), .class = &omap3xxx_timer_hwmod_class, }; /* timer10 */ static struct omap_hwmod omap3xxx_timer10_hwmod; /* l4_core -> timer10 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer10 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_timer10_hwmod, .clk = "gpt10_ick", .addr = omap2_timer10_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer10 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer10_slaves[] = { &omap3xxx_l4_core__timer10, }; /* timer10 hwmod */ static struct omap_hwmod omap3xxx_timer10_hwmod = { .name = "timer10", .mpu_irqs = omap2_timer10_mpu_irqs, .main_clk = "gpt10_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT10_SHIFT, .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT10_SHIFT, }, }, .dev_attr = &capability_pwm_dev_attr, .slaves = omap3xxx_timer10_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer10_slaves), .class = &omap3xxx_timer_1ms_hwmod_class, }; /* timer11 */ static struct omap_hwmod omap3xxx_timer11_hwmod; /* l4_core -> timer11 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer11 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_timer11_hwmod, .clk = "gpt11_ick", .addr = omap2_timer11_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer11 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer11_slaves[] = { &omap3xxx_l4_core__timer11, }; /* timer11 hwmod */ static struct omap_hwmod omap3xxx_timer11_hwmod = { .name = "timer11", .mpu_irqs = omap2_timer11_mpu_irqs, .main_clk = "gpt11_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT11_SHIFT, .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT11_SHIFT, }, }, .dev_attr = &capability_pwm_dev_attr, .slaves = omap3xxx_timer11_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer11_slaves), .class = &omap3xxx_timer_hwmod_class, }; /* timer12*/ static struct omap_hwmod omap3xxx_timer12_hwmod; static struct omap_hwmod_irq_info omap3xxx_timer12_mpu_irqs[] = { { .irq = 95, }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap3xxx_timer12_addrs[] = { { .pa_start = 0x48304000, .pa_end = 0x48304000 + SZ_1K - 1, .flags = ADDR_TYPE_RT }, { } }; /* l4_core -> timer12 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer12 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_timer12_hwmod, .clk = "gpt12_ick", .addr = omap3xxx_timer12_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer12 slave port */ static struct omap_hwmod_ocp_if *omap3xxx_timer12_slaves[] = { &omap3xxx_l4_core__timer12, }; /* timer12 hwmod */ static struct omap_hwmod omap3xxx_timer12_hwmod = { .name = "timer12", .mpu_irqs = omap3xxx_timer12_mpu_irqs, .main_clk = "gpt12_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPT12_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPT12_SHIFT, }, }, .dev_attr = &capability_secure_dev_attr, .slaves = omap3xxx_timer12_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_timer12_slaves), .class = &omap3xxx_timer_hwmod_class, }; /* l4_wkup -> wd_timer2 */ static struct omap_hwmod_addr_space omap3xxx_wd_timer2_addrs[] = { { .pa_start = 0x48314000, .pa_end = 0x4831407f, .flags = ADDR_TYPE_RT }, { } }; static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__wd_timer2 = { .master = &omap3xxx_l4_wkup_hwmod, .slave = &omap3xxx_wd_timer2_hwmod, .clk = "wdt2_ick", .addr = omap3xxx_wd_timer2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* * 'wd_timer' class * 32-bit watchdog upward counter that generates a pulse on the reset pin on * overflow condition */ static struct omap_hwmod_class_sysconfig omap3xxx_wd_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_EMUFREE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; /* I2C common */ static struct omap_hwmod_class_sysconfig i2c_sysc = { .rev_offs = 0x00, .sysc_offs = 0x20, .syss_offs = 0x10, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .clockact = CLOCKACT_TEST_ICLK, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_wd_timer_hwmod_class = { .name = "wd_timer", .sysc = &omap3xxx_wd_timer_sysc, .pre_shutdown = &omap2_wd_timer_disable }; /* wd_timer2 */ static struct omap_hwmod_ocp_if *omap3xxx_wd_timer2_slaves[] = { &omap3xxx_l4_wkup__wd_timer2, }; static struct omap_hwmod omap3xxx_wd_timer2_hwmod = { .name = "wd_timer2", .class = &omap3xxx_wd_timer_hwmod_class, .main_clk = "wdt2_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_WDT2_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_WDT2_SHIFT, }, }, .slaves = omap3xxx_wd_timer2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_wd_timer2_slaves), /* * XXX: Use software supervised mode, HW supervised smartidle seems to * block CORE power domain idle transitions. Maybe a HW bug in wdt2? */ .flags = HWMOD_SWSUP_SIDLE, }; /* UART1 */ static struct omap_hwmod_ocp_if *omap3xxx_uart1_slaves[] = { &omap3_l4_core__uart1, }; static struct omap_hwmod omap3xxx_uart1_hwmod = { .name = "uart1", .mpu_irqs = omap2_uart1_mpu_irqs, .sdma_reqs = omap2_uart1_sdma_reqs, .main_clk = "uart1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_UART1_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_UART1_SHIFT, }, }, .slaves = omap3xxx_uart1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_uart1_slaves), .class = &omap2_uart_class, }; /* UART2 */ static struct omap_hwmod_ocp_if *omap3xxx_uart2_slaves[] = { &omap3_l4_core__uart2, }; static struct omap_hwmod omap3xxx_uart2_hwmod = { .name = "uart2", .mpu_irqs = omap2_uart2_mpu_irqs, .sdma_reqs = omap2_uart2_sdma_reqs, .main_clk = "uart2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_UART2_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_UART2_SHIFT, }, }, .slaves = omap3xxx_uart2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_uart2_slaves), .class = &omap2_uart_class, }; /* UART3 */ static struct omap_hwmod_ocp_if *omap3xxx_uart3_slaves[] = { &omap3_l4_per__uart3, }; static struct omap_hwmod omap3xxx_uart3_hwmod = { .name = "uart3", .mpu_irqs = omap2_uart3_mpu_irqs, .sdma_reqs = omap2_uart3_sdma_reqs, .main_clk = "uart3_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_UART3_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_UART3_SHIFT, }, }, .slaves = omap3xxx_uart3_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_uart3_slaves), .class = &omap2_uart_class, }; /* UART4 */ static struct omap_hwmod_irq_info uart4_mpu_irqs[] = { { .irq = INT_36XX_UART4_IRQ, }, { .irq = -1 } }; static struct omap_hwmod_dma_info uart4_sdma_reqs[] = { { .name = "rx", .dma_req = OMAP36XX_DMA_UART4_RX, }, { .name = "tx", .dma_req = OMAP36XX_DMA_UART4_TX, }, { .dma_req = -1 } }; static struct omap_hwmod_ocp_if *omap3xxx_uart4_slaves[] = { &omap3_l4_per__uart4, }; static struct omap_hwmod omap3xxx_uart4_hwmod = { .name = "uart4", .mpu_irqs = uart4_mpu_irqs, .sdma_reqs = uart4_sdma_reqs, .main_clk = "uart4_fck", .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, .prcm_reg_id = 1, .module_bit = OMAP3630_EN_UART4_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3630_EN_UART4_SHIFT, }, }, .slaves = omap3xxx_uart4_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_uart4_slaves), .class = &omap2_uart_class, }; static struct omap_hwmod_irq_info am35xx_uart4_mpu_irqs[] = { { .irq = INT_35XX_UART4_IRQ, }, }; static struct omap_hwmod_dma_info am35xx_uart4_sdma_reqs[] = { { .name = "rx", .dma_req = AM35XX_DMA_UART4_RX, }, { .name = "tx", .dma_req = AM35XX_DMA_UART4_TX, }, }; static struct omap_hwmod_ocp_if *am35xx_uart4_slaves[] = { &am35xx_l4_core__uart4, }; static struct omap_hwmod am35xx_uart4_hwmod = { .name = "uart4", .mpu_irqs = am35xx_uart4_mpu_irqs, .sdma_reqs = am35xx_uart4_sdma_reqs, .main_clk = "uart4_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_UART4_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_UART4_SHIFT, }, }, .slaves = am35xx_uart4_slaves, .slaves_cnt = ARRAY_SIZE(am35xx_uart4_slaves), .class = &omap2_uart_class, }; static struct omap_hwmod_class i2c_class = { .name = "i2c", .sysc = &i2c_sysc, .rev = OMAP_I2C_IP_VERSION_1, .reset = &omap_i2c_reset, }; static struct omap_hwmod_dma_info omap3xxx_dss_sdma_chs[] = { { .name = "dispc", .dma_req = 5 }, { .name = "dsi1", .dma_req = 74 }, { .dma_req = -1 } }; /* dss */ /* dss master ports */ static struct omap_hwmod_ocp_if *omap3xxx_dss_masters[] = { &omap3xxx_dss__l3, }; /* l4_core -> dss */ static struct omap_hwmod_ocp_if omap3430es1_l4_core__dss = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3430es1_dss_core_hwmod, .clk = "dss_ick", .addr = omap2_dss_addrs, .fw = { .omap2 = { .l4_fw_region = OMAP3ES1_L4_CORE_FW_DSS_CORE_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_core_hwmod, .clk = "dss_ick", .addr = omap2_dss_addrs, .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_CORE_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dss slave ports */ static struct omap_hwmod_ocp_if *omap3430es1_dss_slaves[] = { &omap3430es1_l4_core__dss, }; static struct omap_hwmod_ocp_if *omap3xxx_dss_slaves[] = { &omap3xxx_l4_core__dss, }; static struct omap_hwmod_opt_clk dss_opt_clks[] = { /* * The DSS HW needs all DSS clocks enabled during reset. The dss_core * driver does not use these clocks. */ { .role = "sys_clk", .clk = "dss2_alwon_fck" }, { .role = "tv_clk", .clk = "dss_tv_fck" }, /* required only on OMAP3430 */ { .role = "tv_dac_clk", .clk = "dss_96m_fck" }, }; static struct omap_hwmod omap3430es1_dss_core_hwmod = { .name = "dss_core", .class = &omap2_dss_hwmod_class, .main_clk = "dss1_alwon_fck", /* instead of dss_fck */ .sdma_reqs = omap3xxx_dss_sdma_chs, .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_DSS1_SHIFT, .module_offs = OMAP3430_DSS_MOD, .idlest_reg_id = 1, .idlest_stdby_bit = OMAP3430ES1_ST_DSS_SHIFT, }, }, .opt_clks = dss_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks), .slaves = omap3430es1_dss_slaves, .slaves_cnt = ARRAY_SIZE(omap3430es1_dss_slaves), .masters = omap3xxx_dss_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters), .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET, }; static struct omap_hwmod omap3xxx_dss_core_hwmod = { .name = "dss_core", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .class = &omap2_dss_hwmod_class, .main_clk = "dss1_alwon_fck", /* instead of dss_fck */ .sdma_reqs = omap3xxx_dss_sdma_chs, .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_DSS1_SHIFT, .module_offs = OMAP3430_DSS_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT, .idlest_stdby_bit = OMAP3430ES2_ST_DSS_STDBY_SHIFT, }, }, .opt_clks = dss_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks), .slaves = omap3xxx_dss_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_slaves), .masters = omap3xxx_dss_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters), }; /* * 'dispc' class * display controller */ static struct omap_hwmod_class_sysconfig omap3_dispc_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3_dispc_hwmod_class = { .name = "dispc", .sysc = &omap3_dispc_sysc, }; /* l4_core -> dss_dispc */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_dispc_hwmod, .clk = "dss_ick", .addr = omap2_dss_dispc_addrs, .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_DISPC_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dss_dispc slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_dss_dispc_slaves[] = { &omap3xxx_l4_core__dss_dispc, }; static struct omap_hwmod omap3xxx_dss_dispc_hwmod = { .name = "dss_dispc", .class = &omap3_dispc_hwmod_class, .mpu_irqs = omap2_dispc_irqs, .main_clk = "dss1_alwon_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_DSS1_SHIFT, .module_offs = OMAP3430_DSS_MOD, }, }, .slaves = omap3xxx_dss_dispc_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_slaves), .flags = HWMOD_NO_IDLEST, .dev_attr = &omap2_3_dss_dispc_dev_attr }; /* * 'dsi' class * display serial interface controller */ static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = { .name = "dsi", }; static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = { { .irq = 25 }, { .irq = -1 } }; /* dss_dsi1 */ static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = { { .pa_start = 0x4804FC00, .pa_end = 0x4804FFFF, .flags = ADDR_TYPE_RT }, { } }; /* l4_core -> dss_dsi1 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_dsi1_hwmod, .clk = "dss_ick", .addr = omap3xxx_dss_dsi1_addrs, .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_DSI_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dss_dsi1 slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_dss_dsi1_slaves[] = { &omap3xxx_l4_core__dss_dsi1, }; static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = { { .role = "sys_clk", .clk = "dss2_alwon_fck" }, }; static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = { .name = "dss_dsi1", .class = &omap3xxx_dsi_hwmod_class, .mpu_irqs = omap3xxx_dsi1_irqs, .main_clk = "dss1_alwon_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_DSS1_SHIFT, .module_offs = OMAP3430_DSS_MOD, }, }, .opt_clks = dss_dsi1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_dsi1_opt_clks), .slaves = omap3xxx_dss_dsi1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves), .flags = HWMOD_NO_IDLEST, }; /* l4_core -> dss_rfbi */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_rfbi = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_rfbi_hwmod, .clk = "dss_ick", .addr = omap2_dss_rfbi_addrs, .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_RFBI_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP , .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dss_rfbi slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_dss_rfbi_slaves[] = { &omap3xxx_l4_core__dss_rfbi, }; static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = { { .role = "ick", .clk = "dss_ick" }, }; static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = { .name = "dss_rfbi", .class = &omap2_rfbi_hwmod_class, .main_clk = "dss1_alwon_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_DSS1_SHIFT, .module_offs = OMAP3430_DSS_MOD, }, }, .opt_clks = dss_rfbi_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks), .slaves = omap3xxx_dss_rfbi_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves), .flags = HWMOD_NO_IDLEST, }; /* l4_core -> dss_venc */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dss_venc_hwmod, .clk = "dss_ick", .addr = omap2_dss_venc_addrs, .fw = { .omap2 = { .l4_fw_region = OMAP3_L4_CORE_FW_DSS_VENC_REGION, .l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP, .flags = OMAP_FIREWALL_L4, } }, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dss_venc slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_dss_venc_slaves[] = { &omap3xxx_l4_core__dss_venc, }; static struct omap_hwmod_opt_clk dss_venc_opt_clks[] = { /* required only on OMAP3430 */ { .role = "tv_dac_clk", .clk = "dss_96m_fck" }, }; static struct omap_hwmod omap3xxx_dss_venc_hwmod = { .name = "dss_venc", .class = &omap2_venc_hwmod_class, .main_clk = "dss_tv_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_DSS1_SHIFT, .module_offs = OMAP3430_DSS_MOD, }, }, .opt_clks = dss_venc_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_venc_opt_clks), .slaves = omap3xxx_dss_venc_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_dss_venc_slaves), .flags = HWMOD_NO_IDLEST, }; /* I2C1 */ static struct omap_i2c_dev_attr i2c1_dev_attr = { .fifo_depth = 8, /* bytes */ .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | OMAP_I2C_FLAG_BUS_SHIFT_2, }; static struct omap_hwmod_ocp_if *omap3xxx_i2c1_slaves[] = { &omap3_l4_core__i2c1, }; static struct omap_hwmod omap3xxx_i2c1_hwmod = { .name = "i2c1", .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap2_i2c1_mpu_irqs, .sdma_reqs = omap2_i2c1_sdma_reqs, .main_clk = "i2c1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_I2C1_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_I2C1_SHIFT, }, }, .slaves = omap3xxx_i2c1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_i2c1_slaves), .class = &i2c_class, .dev_attr = &i2c1_dev_attr, }; /* I2C2 */ static struct omap_i2c_dev_attr i2c2_dev_attr = { .fifo_depth = 8, /* bytes */ .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | OMAP_I2C_FLAG_BUS_SHIFT_2, }; static struct omap_hwmod_ocp_if *omap3xxx_i2c2_slaves[] = { &omap3_l4_core__i2c2, }; static struct omap_hwmod omap3xxx_i2c2_hwmod = { .name = "i2c2", .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap2_i2c2_mpu_irqs, .sdma_reqs = omap2_i2c2_sdma_reqs, .main_clk = "i2c2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_I2C2_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_I2C2_SHIFT, }, }, .slaves = omap3xxx_i2c2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_i2c2_slaves), .class = &i2c_class, .dev_attr = &i2c2_dev_attr, }; /* I2C3 */ static struct omap_i2c_dev_attr i2c3_dev_attr = { .fifo_depth = 64, /* bytes */ .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | OMAP_I2C_FLAG_BUS_SHIFT_2, }; static struct omap_hwmod_irq_info i2c3_mpu_irqs[] = { { .irq = INT_34XX_I2C3_IRQ, }, { .irq = -1 } }; static struct omap_hwmod_dma_info i2c3_sdma_reqs[] = { { .name = "tx", .dma_req = OMAP34XX_DMA_I2C3_TX }, { .name = "rx", .dma_req = OMAP34XX_DMA_I2C3_RX }, { .dma_req = -1 } }; static struct omap_hwmod_ocp_if *omap3xxx_i2c3_slaves[] = { &omap3_l4_core__i2c3, }; static struct omap_hwmod omap3xxx_i2c3_hwmod = { .name = "i2c3", .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = i2c3_mpu_irqs, .sdma_reqs = i2c3_sdma_reqs, .main_clk = "i2c3_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_I2C3_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_I2C3_SHIFT, }, }, .slaves = omap3xxx_i2c3_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_i2c3_slaves), .class = &i2c_class, .dev_attr = &i2c3_dev_attr, }; /* l4_wkup -> gpio1 */ static struct omap_hwmod_addr_space omap3xxx_gpio1_addrs[] = { { .pa_start = 0x48310000, .pa_end = 0x483101ff, .flags = ADDR_TYPE_RT }, { } }; static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__gpio1 = { .master = &omap3xxx_l4_wkup_hwmod, .slave = &omap3xxx_gpio1_hwmod, .addr = omap3xxx_gpio1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> gpio2 */ static struct omap_hwmod_addr_space omap3xxx_gpio2_addrs[] = { { .pa_start = 0x49050000, .pa_end = 0x490501ff, .flags = ADDR_TYPE_RT }, { } }; static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio2 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio2_hwmod, .addr = omap3xxx_gpio2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> gpio3 */ static struct omap_hwmod_addr_space omap3xxx_gpio3_addrs[] = { { .pa_start = 0x49052000, .pa_end = 0x490521ff, .flags = ADDR_TYPE_RT }, { } }; static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio3 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio3_hwmod, .addr = omap3xxx_gpio3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> gpio4 */ static struct omap_hwmod_addr_space omap3xxx_gpio4_addrs[] = { { .pa_start = 0x49054000, .pa_end = 0x490541ff, .flags = ADDR_TYPE_RT }, { } }; static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio4 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio4_hwmod, .addr = omap3xxx_gpio4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> gpio5 */ static struct omap_hwmod_addr_space omap3xxx_gpio5_addrs[] = { { .pa_start = 0x49056000, .pa_end = 0x490561ff, .flags = ADDR_TYPE_RT }, { } }; static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio5 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio5_hwmod, .addr = omap3xxx_gpio5_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per -> gpio6 */ static struct omap_hwmod_addr_space omap3xxx_gpio6_addrs[] = { { .pa_start = 0x49058000, .pa_end = 0x490581ff, .flags = ADDR_TYPE_RT }, { } }; static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio6 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_gpio6_hwmod, .addr = omap3xxx_gpio6_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* * 'gpio' class * general purpose io module */ static struct omap_hwmod_class_sysconfig omap3xxx_gpio_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_gpio_hwmod_class = { .name = "gpio", .sysc = &omap3xxx_gpio_sysc, .rev = 1, }; /* gpio_dev_attr*/ static struct omap_gpio_dev_attr gpio_dev_attr = { .bank_width = 32, .dbck_flag = true, }; /* gpio1 */ static struct omap_hwmod_opt_clk gpio1_opt_clks[] = { { .role = "dbclk", .clk = "gpio1_dbck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_gpio1_slaves[] = { &omap3xxx_l4_wkup__gpio1, }; static struct omap_hwmod omap3xxx_gpio1_hwmod = { .name = "gpio1", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap2_gpio1_irqs, .main_clk = "gpio1_ick", .opt_clks = gpio1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks), .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPIO1_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO1_SHIFT, }, }, .slaves = omap3xxx_gpio1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_gpio1_slaves), .class = &omap3xxx_gpio_hwmod_class, .dev_attr = &gpio_dev_attr, }; /* gpio2 */ static struct omap_hwmod_opt_clk gpio2_opt_clks[] = { { .role = "dbclk", .clk = "gpio2_dbck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_gpio2_slaves[] = { &omap3xxx_l4_per__gpio2, }; static struct omap_hwmod omap3xxx_gpio2_hwmod = { .name = "gpio2", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap2_gpio2_irqs, .main_clk = "gpio2_ick", .opt_clks = gpio2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks), .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPIO2_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO2_SHIFT, }, }, .slaves = omap3xxx_gpio2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_gpio2_slaves), .class = &omap3xxx_gpio_hwmod_class, .dev_attr = &gpio_dev_attr, }; /* gpio3 */ static struct omap_hwmod_opt_clk gpio3_opt_clks[] = { { .role = "dbclk", .clk = "gpio3_dbck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_gpio3_slaves[] = { &omap3xxx_l4_per__gpio3, }; static struct omap_hwmod omap3xxx_gpio3_hwmod = { .name = "gpio3", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap2_gpio3_irqs, .main_clk = "gpio3_ick", .opt_clks = gpio3_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks), .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPIO3_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO3_SHIFT, }, }, .slaves = omap3xxx_gpio3_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_gpio3_slaves), .class = &omap3xxx_gpio_hwmod_class, .dev_attr = &gpio_dev_attr, }; /* gpio4 */ static struct omap_hwmod_opt_clk gpio4_opt_clks[] = { { .role = "dbclk", .clk = "gpio4_dbck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_gpio4_slaves[] = { &omap3xxx_l4_per__gpio4, }; static struct omap_hwmod omap3xxx_gpio4_hwmod = { .name = "gpio4", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap2_gpio4_irqs, .main_clk = "gpio4_ick", .opt_clks = gpio4_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks), .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPIO4_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO4_SHIFT, }, }, .slaves = omap3xxx_gpio4_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_gpio4_slaves), .class = &omap3xxx_gpio_hwmod_class, .dev_attr = &gpio_dev_attr, }; /* gpio5 */ static struct omap_hwmod_irq_info omap3xxx_gpio5_irqs[] = { { .irq = 33 }, /* INT_34XX_GPIO_BANK5 */ { .irq = -1 } }; static struct omap_hwmod_opt_clk gpio5_opt_clks[] = { { .role = "dbclk", .clk = "gpio5_dbck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_gpio5_slaves[] = { &omap3xxx_l4_per__gpio5, }; static struct omap_hwmod omap3xxx_gpio5_hwmod = { .name = "gpio5", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio5_irqs, .main_clk = "gpio5_ick", .opt_clks = gpio5_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio5_opt_clks), .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPIO5_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO5_SHIFT, }, }, .slaves = omap3xxx_gpio5_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_gpio5_slaves), .class = &omap3xxx_gpio_hwmod_class, .dev_attr = &gpio_dev_attr, }; /* gpio6 */ static struct omap_hwmod_irq_info omap3xxx_gpio6_irqs[] = { { .irq = 34 }, /* INT_34XX_GPIO_BANK6 */ { .irq = -1 } }; static struct omap_hwmod_opt_clk gpio6_opt_clks[] = { { .role = "dbclk", .clk = "gpio6_dbck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_gpio6_slaves[] = { &omap3xxx_l4_per__gpio6, }; static struct omap_hwmod omap3xxx_gpio6_hwmod = { .name = "gpio6", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio6_irqs, .main_clk = "gpio6_ick", .opt_clks = gpio6_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio6_opt_clks), .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_GPIO6_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_GPIO6_SHIFT, }, }, .slaves = omap3xxx_gpio6_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_gpio6_slaves), .class = &omap3xxx_gpio_hwmod_class, .dev_attr = &gpio_dev_attr, }; /* dma_system -> L3 */ static struct omap_hwmod_ocp_if omap3xxx_dma_system__l3 = { .master = &omap3xxx_dma_system_hwmod, .slave = &omap3xxx_l3_main_hwmod, .clk = "core_l3_ick", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dma attributes */ static struct omap_dma_dev_attr dma_dev_attr = { .dev_caps = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY | IS_CSSA_32 | IS_CDSA_32 | IS_RW_PRIORITY, .lch_count = 32, }; static struct omap_hwmod_class_sysconfig omap3xxx_dma_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x002c, .syss_offs = 0x0028, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_dma_hwmod_class = { .name = "dma", .sysc = &omap3xxx_dma_sysc, }; /* dma_system */ static struct omap_hwmod_addr_space omap3xxx_dma_system_addrs[] = { { .pa_start = 0x48056000, .pa_end = 0x48056fff, .flags = ADDR_TYPE_RT }, { } }; /* dma_system master ports */ static struct omap_hwmod_ocp_if *omap3xxx_dma_system_masters[] = { &omap3xxx_dma_system__l3, }; /* l4_cfg -> dma_system */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dma_system = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_dma_system_hwmod, .clk = "core_l4_ick", .addr = omap3xxx_dma_system_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dma_system slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_dma_system_slaves[] = { &omap3xxx_l4_core__dma_system, }; static struct omap_hwmod omap3xxx_dma_system_hwmod = { .name = "dma", .class = &omap3xxx_dma_hwmod_class, .mpu_irqs = omap2_dma_system_irqs, .main_clk = "core_l3_ick", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_ST_SDMA_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_SDMA_SHIFT, }, }, .slaves = omap3xxx_dma_system_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_dma_system_slaves), .masters = omap3xxx_dma_system_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_dma_system_masters), .dev_attr = &dma_dev_attr, .flags = HWMOD_NO_IDLEST, }; /* * 'mcbsp' class * multi channel buffered serial port controller */ static struct omap_hwmod_class_sysconfig omap3xxx_mcbsp_sysc = { .sysc_offs = 0x008c, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, .clockact = 0x2, }; static struct omap_hwmod_class omap3xxx_mcbsp_hwmod_class = { .name = "mcbsp", .sysc = &omap3xxx_mcbsp_sysc, .rev = MCBSP_CONFIG_TYPE3, }; /* mcbsp1 */ static struct omap_hwmod_irq_info omap3xxx_mcbsp1_irqs[] = { { .name = "irq", .irq = 16 }, { .name = "tx", .irq = 59 }, { .name = "rx", .irq = 60 }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap3xxx_mcbsp1_addrs[] = { { .name = "mpu", .pa_start = 0x48074000, .pa_end = 0x480740ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_core -> mcbsp1 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mcbsp1_hwmod, .clk = "mcbsp1_ick", .addr = omap3xxx_mcbsp1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcbsp1 slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mcbsp1_slaves[] = { &omap3xxx_l4_core__mcbsp1, }; static struct omap_hwmod omap3xxx_mcbsp1_hwmod = { .name = "mcbsp1", .class = &omap3xxx_mcbsp_hwmod_class, .mpu_irqs = omap3xxx_mcbsp1_irqs, .sdma_reqs = omap2_mcbsp1_sdma_reqs, .main_clk = "mcbsp1_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCBSP1_SHIFT, .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP1_SHIFT, }, }, .slaves = omap3xxx_mcbsp1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp1_slaves), }; /* mcbsp2 */ static struct omap_hwmod_irq_info omap3xxx_mcbsp2_irqs[] = { { .name = "irq", .irq = 17 }, { .name = "tx", .irq = 62 }, { .name = "rx", .irq = 63 }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap3xxx_mcbsp2_addrs[] = { { .name = "mpu", .pa_start = 0x49022000, .pa_end = 0x490220ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcbsp2 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp2_hwmod, .clk = "mcbsp2_ick", .addr = omap3xxx_mcbsp2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcbsp2 slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mcbsp2_slaves[] = { &omap3xxx_l4_per__mcbsp2, }; static struct omap_mcbsp_dev_attr omap34xx_mcbsp2_dev_attr = { .sidetone = "mcbsp2_sidetone", }; static struct omap_hwmod omap3xxx_mcbsp2_hwmod = { .name = "mcbsp2", .class = &omap3xxx_mcbsp_hwmod_class, .mpu_irqs = omap3xxx_mcbsp2_irqs, .sdma_reqs = omap2_mcbsp2_sdma_reqs, .main_clk = "mcbsp2_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCBSP2_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP2_SHIFT, }, }, .slaves = omap3xxx_mcbsp2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_slaves), .dev_attr = &omap34xx_mcbsp2_dev_attr, }; /* mcbsp3 */ static struct omap_hwmod_irq_info omap3xxx_mcbsp3_irqs[] = { { .name = "irq", .irq = 22 }, { .name = "tx", .irq = 89 }, { .name = "rx", .irq = 90 }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap3xxx_mcbsp3_addrs[] = { { .name = "mpu", .pa_start = 0x49024000, .pa_end = 0x490240ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcbsp3 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp3_hwmod, .clk = "mcbsp3_ick", .addr = omap3xxx_mcbsp3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcbsp3 slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mcbsp3_slaves[] = { &omap3xxx_l4_per__mcbsp3, }; static struct omap_mcbsp_dev_attr omap34xx_mcbsp3_dev_attr = { .sidetone = "mcbsp3_sidetone", }; static struct omap_hwmod omap3xxx_mcbsp3_hwmod = { .name = "mcbsp3", .class = &omap3xxx_mcbsp_hwmod_class, .mpu_irqs = omap3xxx_mcbsp3_irqs, .sdma_reqs = omap2_mcbsp3_sdma_reqs, .main_clk = "mcbsp3_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCBSP3_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP3_SHIFT, }, }, .slaves = omap3xxx_mcbsp3_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_slaves), .dev_attr = &omap34xx_mcbsp3_dev_attr, }; /* mcbsp4 */ static struct omap_hwmod_irq_info omap3xxx_mcbsp4_irqs[] = { { .name = "irq", .irq = 23 }, { .name = "tx", .irq = 54 }, { .name = "rx", .irq = 55 }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap3xxx_mcbsp4_sdma_chs[] = { { .name = "rx", .dma_req = 20 }, { .name = "tx", .dma_req = 19 }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap3xxx_mcbsp4_addrs[] = { { .name = "mpu", .pa_start = 0x49026000, .pa_end = 0x490260ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcbsp4 */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp4 = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp4_hwmod, .clk = "mcbsp4_ick", .addr = omap3xxx_mcbsp4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcbsp4 slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mcbsp4_slaves[] = { &omap3xxx_l4_per__mcbsp4, }; static struct omap_hwmod omap3xxx_mcbsp4_hwmod = { .name = "mcbsp4", .class = &omap3xxx_mcbsp_hwmod_class, .mpu_irqs = omap3xxx_mcbsp4_irqs, .sdma_reqs = omap3xxx_mcbsp4_sdma_chs, .main_clk = "mcbsp4_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCBSP4_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP4_SHIFT, }, }, .slaves = omap3xxx_mcbsp4_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp4_slaves), }; /* mcbsp5 */ static struct omap_hwmod_irq_info omap3xxx_mcbsp5_irqs[] = { { .name = "irq", .irq = 27 }, { .name = "tx", .irq = 81 }, { .name = "rx", .irq = 82 }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap3xxx_mcbsp5_sdma_chs[] = { { .name = "rx", .dma_req = 22 }, { .name = "tx", .dma_req = 21 }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap3xxx_mcbsp5_addrs[] = { { .name = "mpu", .pa_start = 0x48096000, .pa_end = 0x480960ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_core -> mcbsp5 */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp5 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mcbsp5_hwmod, .clk = "mcbsp5_ick", .addr = omap3xxx_mcbsp5_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcbsp5 slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mcbsp5_slaves[] = { &omap3xxx_l4_core__mcbsp5, }; static struct omap_hwmod omap3xxx_mcbsp5_hwmod = { .name = "mcbsp5", .class = &omap3xxx_mcbsp_hwmod_class, .mpu_irqs = omap3xxx_mcbsp5_irqs, .sdma_reqs = omap3xxx_mcbsp5_sdma_chs, .main_clk = "mcbsp5_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCBSP5_SHIFT, .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP5_SHIFT, }, }, .slaves = omap3xxx_mcbsp5_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp5_slaves), }; /* 'mcbsp sidetone' class */ static struct omap_hwmod_class_sysconfig omap3xxx_mcbsp_sidetone_sysc = { .sysc_offs = 0x0010, .sysc_flags = SYSC_HAS_AUTOIDLE, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_mcbsp_sidetone_hwmod_class = { .name = "mcbsp_sidetone", .sysc = &omap3xxx_mcbsp_sidetone_sysc, }; /* mcbsp2_sidetone */ static struct omap_hwmod_irq_info omap3xxx_mcbsp2_sidetone_irqs[] = { { .name = "irq", .irq = 4 }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap3xxx_mcbsp2_sidetone_addrs[] = { { .name = "sidetone", .pa_start = 0x49028000, .pa_end = 0x490280ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcbsp2_sidetone */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2_sidetone = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp2_sidetone_hwmod, .clk = "mcbsp2_ick", .addr = omap3xxx_mcbsp2_sidetone_addrs, .user = OCP_USER_MPU, }; /* mcbsp2_sidetone slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mcbsp2_sidetone_slaves[] = { &omap3xxx_l4_per__mcbsp2_sidetone, }; static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod = { .name = "mcbsp2_sidetone", .class = &omap3xxx_mcbsp_sidetone_hwmod_class, .mpu_irqs = omap3xxx_mcbsp2_sidetone_irqs, .main_clk = "mcbsp2_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCBSP2_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP2_SHIFT, }, }, .slaves = omap3xxx_mcbsp2_sidetone_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_sidetone_slaves), }; /* mcbsp3_sidetone */ static struct omap_hwmod_irq_info omap3xxx_mcbsp3_sidetone_irqs[] = { { .name = "irq", .irq = 5 }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap3xxx_mcbsp3_sidetone_addrs[] = { { .name = "sidetone", .pa_start = 0x4902A000, .pa_end = 0x4902A0ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcbsp3_sidetone */ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3_sidetone = { .master = &omap3xxx_l4_per_hwmod, .slave = &omap3xxx_mcbsp3_sidetone_hwmod, .clk = "mcbsp3_ick", .addr = omap3xxx_mcbsp3_sidetone_addrs, .user = OCP_USER_MPU, }; /* mcbsp3_sidetone slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mcbsp3_sidetone_slaves[] = { &omap3xxx_l4_per__mcbsp3_sidetone, }; static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod = { .name = "mcbsp3_sidetone", .class = &omap3xxx_mcbsp_sidetone_hwmod_class, .mpu_irqs = omap3xxx_mcbsp3_sidetone_irqs, .main_clk = "mcbsp3_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCBSP3_SHIFT, .module_offs = OMAP3430_PER_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCBSP3_SHIFT, }, }, .slaves = omap3xxx_mcbsp3_sidetone_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_sidetone_slaves), }; /* SR common */ static struct omap_hwmod_sysc_fields omap34xx_sr_sysc_fields = { .clkact_shift = 20, }; static struct omap_hwmod_class_sysconfig omap34xx_sr_sysc = { .sysc_offs = 0x24, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_NO_CACHE), .clockact = CLOCKACT_TEST_ICLK, .sysc_fields = &omap34xx_sr_sysc_fields, }; static struct omap_hwmod_class omap34xx_smartreflex_hwmod_class = { .name = "smartreflex", .sysc = &omap34xx_sr_sysc, .rev = 1, }; static struct omap_hwmod_sysc_fields omap36xx_sr_sysc_fields = { .sidle_shift = 24, .enwkup_shift = 26 }; static struct omap_hwmod_class_sysconfig omap36xx_sr_sysc = { .sysc_offs = 0x38, .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_NO_CACHE), .sysc_fields = &omap36xx_sr_sysc_fields, }; static struct omap_hwmod_class omap36xx_smartreflex_hwmod_class = { .name = "smartreflex", .sysc = &omap36xx_sr_sysc, .rev = 2, }; /* SR1 */ static struct omap_smartreflex_dev_attr sr1_dev_attr = { .sensor_voltdm_name = "mpu_iva", }; static struct omap_hwmod_ocp_if *omap3_sr1_slaves[] = { &omap3_l4_core__sr1, }; static struct omap_hwmod omap34xx_sr1_hwmod = { .name = "sr1_hwmod", .class = &omap34xx_smartreflex_hwmod_class, .main_clk = "sr1_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_SR1_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_SR1_SHIFT, }, }, .slaves = omap3_sr1_slaves, .slaves_cnt = ARRAY_SIZE(omap3_sr1_slaves), .dev_attr = &sr1_dev_attr, .mpu_irqs = omap3_smartreflex_mpu_irqs, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; static struct omap_hwmod omap36xx_sr1_hwmod = { .name = "sr1_hwmod", .class = &omap36xx_smartreflex_hwmod_class, .main_clk = "sr1_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_SR1_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_SR1_SHIFT, }, }, .slaves = omap3_sr1_slaves, .slaves_cnt = ARRAY_SIZE(omap3_sr1_slaves), .dev_attr = &sr1_dev_attr, .mpu_irqs = omap3_smartreflex_mpu_irqs, }; /* SR2 */ static struct omap_smartreflex_dev_attr sr2_dev_attr = { .sensor_voltdm_name = "core", }; static struct omap_hwmod_ocp_if *omap3_sr2_slaves[] = { &omap3_l4_core__sr2, }; static struct omap_hwmod omap34xx_sr2_hwmod = { .name = "sr2_hwmod", .class = &omap34xx_smartreflex_hwmod_class, .main_clk = "sr2_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_SR2_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_SR2_SHIFT, }, }, .slaves = omap3_sr2_slaves, .slaves_cnt = ARRAY_SIZE(omap3_sr2_slaves), .dev_attr = &sr2_dev_attr, .mpu_irqs = omap3_smartreflex_core_irqs, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; static struct omap_hwmod omap36xx_sr2_hwmod = { .name = "sr2_hwmod", .class = &omap36xx_smartreflex_hwmod_class, .main_clk = "sr2_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_SR2_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_EN_SR2_SHIFT, }, }, .slaves = omap3_sr2_slaves, .slaves_cnt = ARRAY_SIZE(omap3_sr2_slaves), .dev_attr = &sr2_dev_attr, .mpu_irqs = omap3_smartreflex_core_irqs, }; /* * 'mailbox' class * mailbox module allowing communication between the on-chip processors * using a queued mailbox-interrupt mechanism. */ static struct omap_hwmod_class_sysconfig omap3xxx_mailbox_sysc = { .rev_offs = 0x000, .sysc_offs = 0x010, .syss_offs = 0x014, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_mailbox_hwmod_class = { .name = "mailbox", .sysc = &omap3xxx_mailbox_sysc, }; static struct omap_hwmod omap3xxx_mailbox_hwmod; static struct omap_hwmod_irq_info omap3xxx_mailbox_irqs[] = { { .irq = 26 }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap3xxx_mailbox_addrs[] = { { .pa_start = 0x48094000, .pa_end = 0x480941ff, .flags = ADDR_TYPE_RT, }, { } }; /* l4_core -> mailbox */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mailbox = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_mailbox_hwmod, .addr = omap3xxx_mailbox_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mailbox slave ports */ static struct omap_hwmod_ocp_if *omap3xxx_mailbox_slaves[] = { &omap3xxx_l4_core__mailbox, }; static struct omap_hwmod omap3xxx_mailbox_hwmod = { .name = "mailbox", .class = &omap3xxx_mailbox_hwmod_class, .mpu_irqs = omap3xxx_mailbox_irqs, .main_clk = "mailboxes_ick", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MAILBOXES_SHIFT, .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MAILBOXES_SHIFT, }, }, .slaves = omap3xxx_mailbox_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mailbox_slaves), }; /* l4 core -> mcspi1 interface */ static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_mcspi1, .clk = "mcspi1_ick", .addr = omap2_mcspi1_addr_space, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> mcspi2 interface */ static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_mcspi2, .clk = "mcspi2_ick", .addr = omap2_mcspi2_addr_space, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> mcspi3 interface */ static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi3 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_mcspi3, .clk = "mcspi3_ick", .addr = omap2430_mcspi3_addr_space, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4 core -> mcspi4 interface */ static struct omap_hwmod_addr_space omap34xx_mcspi4_addr_space[] = { { .pa_start = 0x480ba000, .pa_end = 0x480ba0ff, .flags = ADDR_TYPE_RT, }, { } }; static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi4 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_mcspi4, .clk = "mcspi4_ick", .addr = omap34xx_mcspi4_addr_space, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* * 'mcspi' class * multichannel serial port interface (mcspi) / master/slave synchronous serial * bus */ static struct omap_hwmod_class_sysconfig omap34xx_mcspi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap34xx_mcspi_class = { .name = "mcspi", .sysc = &omap34xx_mcspi_sysc, .rev = OMAP3_MCSPI_REV, }; /* mcspi1 */ static struct omap_hwmod_ocp_if *omap34xx_mcspi1_slaves[] = { &omap34xx_l4_core__mcspi1, }; static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = { .num_chipselect = 4, }; static struct omap_hwmod omap34xx_mcspi1 = { .name = "mcspi1", .mpu_irqs = omap2_mcspi1_mpu_irqs, .sdma_reqs = omap2_mcspi1_sdma_reqs, .main_clk = "mcspi1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCSPI1_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCSPI1_SHIFT, }, }, .slaves = omap34xx_mcspi1_slaves, .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi1_slaves), .class = &omap34xx_mcspi_class, .dev_attr = &omap_mcspi1_dev_attr, }; /* mcspi2 */ static struct omap_hwmod_ocp_if *omap34xx_mcspi2_slaves[] = { &omap34xx_l4_core__mcspi2, }; static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = { .num_chipselect = 2, }; static struct omap_hwmod omap34xx_mcspi2 = { .name = "mcspi2", .mpu_irqs = omap2_mcspi2_mpu_irqs, .sdma_reqs = omap2_mcspi2_sdma_reqs, .main_clk = "mcspi2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCSPI2_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCSPI2_SHIFT, }, }, .slaves = omap34xx_mcspi2_slaves, .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi2_slaves), .class = &omap34xx_mcspi_class, .dev_attr = &omap_mcspi2_dev_attr, }; /* mcspi3 */ static struct omap_hwmod_irq_info omap34xx_mcspi3_mpu_irqs[] = { { .name = "irq", .irq = 91 }, /* 91 */ { .irq = -1 } }; static struct omap_hwmod_dma_info omap34xx_mcspi3_sdma_reqs[] = { { .name = "tx0", .dma_req = 15 }, { .name = "rx0", .dma_req = 16 }, { .name = "tx1", .dma_req = 23 }, { .name = "rx1", .dma_req = 24 }, { .dma_req = -1 } }; static struct omap_hwmod_ocp_if *omap34xx_mcspi3_slaves[] = { &omap34xx_l4_core__mcspi3, }; static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = { .num_chipselect = 2, }; static struct omap_hwmod omap34xx_mcspi3 = { .name = "mcspi3", .mpu_irqs = omap34xx_mcspi3_mpu_irqs, .sdma_reqs = omap34xx_mcspi3_sdma_reqs, .main_clk = "mcspi3_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCSPI3_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCSPI3_SHIFT, }, }, .slaves = omap34xx_mcspi3_slaves, .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi3_slaves), .class = &omap34xx_mcspi_class, .dev_attr = &omap_mcspi3_dev_attr, }; /* SPI4 */ static struct omap_hwmod_irq_info omap34xx_mcspi4_mpu_irqs[] = { { .name = "irq", .irq = INT_34XX_SPI4_IRQ }, /* 48 */ { .irq = -1 } }; static struct omap_hwmod_dma_info omap34xx_mcspi4_sdma_reqs[] = { { .name = "tx0", .dma_req = 70 }, /* DMA_SPI4_TX0 */ { .name = "rx0", .dma_req = 71 }, /* DMA_SPI4_RX0 */ { .dma_req = -1 } }; static struct omap_hwmod_ocp_if *omap34xx_mcspi4_slaves[] = { &omap34xx_l4_core__mcspi4, }; static struct omap2_mcspi_dev_attr omap_mcspi4_dev_attr = { .num_chipselect = 1, }; static struct omap_hwmod omap34xx_mcspi4 = { .name = "mcspi4", .mpu_irqs = omap34xx_mcspi4_mpu_irqs, .sdma_reqs = omap34xx_mcspi4_sdma_reqs, .main_clk = "mcspi4_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MCSPI4_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MCSPI4_SHIFT, }, }, .slaves = omap34xx_mcspi4_slaves, .slaves_cnt = ARRAY_SIZE(omap34xx_mcspi4_slaves), .class = &omap34xx_mcspi_class, .dev_attr = &omap_mcspi4_dev_attr, }; /* * usbhsotg */ static struct omap_hwmod_class_sysconfig omap3xxx_usbhsotg_sysc = { .rev_offs = 0x0400, .sysc_offs = 0x0404, .syss_offs = 0x0408, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE| SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class usbotg_class = { .name = "usbotg", .sysc = &omap3xxx_usbhsotg_sysc, }; /* usb_otg_hs */ static struct omap_hwmod_irq_info omap3xxx_usbhsotg_mpu_irqs[] = { { .name = "mc", .irq = 92 }, { .name = "dma", .irq = 93 }, { .irq = -1 } }; static struct omap_hwmod omap3xxx_usbhsotg_hwmod = { .name = "usb_otg_hs", .mpu_irqs = omap3xxx_usbhsotg_mpu_irqs, .main_clk = "hsotgusb_ick", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_HSOTGUSB_SHIFT, .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT, .idlest_stdby_bit = OMAP3430ES2_ST_HSOTGUSB_STDBY_SHIFT }, }, .masters = omap3xxx_usbhsotg_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_masters), .slaves = omap3xxx_usbhsotg_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_slaves), .class = &usbotg_class, /* * Erratum ID: i479 idle_req / idle_ack mechanism potentially * broken when autoidle is enabled * workaround is to disable the autoidle bit at module level. */ .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, }; /* usb_otg_hs */ static struct omap_hwmod_irq_info am35xx_usbhsotg_mpu_irqs[] = { { .name = "mc", .irq = 71 }, { .irq = -1 } }; static struct omap_hwmod_class am35xx_usbotg_class = { .name = "am35xx_usbotg", .sysc = NULL, }; static struct omap_hwmod am35xx_usbhsotg_hwmod = { .name = "am35x_otg_hs", .mpu_irqs = am35xx_usbhsotg_mpu_irqs, .main_clk = NULL, .prcm = { .omap2 = { }, }, .masters = am35xx_usbhsotg_masters, .masters_cnt = ARRAY_SIZE(am35xx_usbhsotg_masters), .slaves = am35xx_usbhsotg_slaves, .slaves_cnt = ARRAY_SIZE(am35xx_usbhsotg_slaves), .class = &am35xx_usbotg_class, }; /* MMC/SD/SDIO common */ static struct omap_hwmod_class_sysconfig omap34xx_mmc_sysc = { .rev_offs = 0x1fc, .sysc_offs = 0x10, .syss_offs = 0x14, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap34xx_mmc_class = { .name = "mmc", .sysc = &omap34xx_mmc_sysc, }; /* MMC/SD/SDIO1 */ static struct omap_hwmod_irq_info omap34xx_mmc1_mpu_irqs[] = { { .irq = 83, }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap34xx_mmc1_sdma_reqs[] = { { .name = "tx", .dma_req = 61, }, { .name = "rx", .dma_req = 62, }, { .dma_req = -1 } }; static struct omap_hwmod_opt_clk omap34xx_mmc1_opt_clks[] = { { .role = "dbck", .clk = "omap_32k_fck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_mmc1_slaves[] = { &omap3xxx_l4_core__mmc1, }; static struct omap_mmc_dev_attr mmc1_dev_attr = { .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT, }; /* See 35xx errata 2.1.1.128 in SPRZ278F */ static struct omap_mmc_dev_attr mmc1_pre_es3_dev_attr = { .flags = (OMAP_HSMMC_SUPPORTS_DUAL_VOLT | OMAP_HSMMC_BROKEN_MULTIBLOCK_READ), }; static struct omap_hwmod omap3xxx_pre_es3_mmc1_hwmod = { .name = "mmc1", .mpu_irqs = omap34xx_mmc1_mpu_irqs, .sdma_reqs = omap34xx_mmc1_sdma_reqs, .opt_clks = omap34xx_mmc1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc1_opt_clks), .main_clk = "mmchs1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MMC1_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MMC1_SHIFT, }, }, .dev_attr = &mmc1_pre_es3_dev_attr, .slaves = omap3xxx_mmc1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc1_slaves), .class = &omap34xx_mmc_class, }; static struct omap_hwmod omap3xxx_es3plus_mmc1_hwmod = { .name = "mmc1", .mpu_irqs = omap34xx_mmc1_mpu_irqs, .sdma_reqs = omap34xx_mmc1_sdma_reqs, .opt_clks = omap34xx_mmc1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc1_opt_clks), .main_clk = "mmchs1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MMC1_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MMC1_SHIFT, }, }, .dev_attr = &mmc1_dev_attr, .slaves = omap3xxx_mmc1_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc1_slaves), .class = &omap34xx_mmc_class, }; /* MMC/SD/SDIO2 */ static struct omap_hwmod_irq_info omap34xx_mmc2_mpu_irqs[] = { { .irq = INT_24XX_MMC2_IRQ, }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap34xx_mmc2_sdma_reqs[] = { { .name = "tx", .dma_req = 47, }, { .name = "rx", .dma_req = 48, }, { .dma_req = -1 } }; static struct omap_hwmod_opt_clk omap34xx_mmc2_opt_clks[] = { { .role = "dbck", .clk = "omap_32k_fck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_mmc2_slaves[] = { &omap3xxx_l4_core__mmc2, }; /* See 35xx errata 2.1.1.128 in SPRZ278F */ static struct omap_mmc_dev_attr mmc2_pre_es3_dev_attr = { .flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ, }; static struct omap_hwmod omap3xxx_pre_es3_mmc2_hwmod = { .name = "mmc2", .mpu_irqs = omap34xx_mmc2_mpu_irqs, .sdma_reqs = omap34xx_mmc2_sdma_reqs, .opt_clks = omap34xx_mmc2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc2_opt_clks), .main_clk = "mmchs2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MMC2_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MMC2_SHIFT, }, }, .dev_attr = &mmc2_pre_es3_dev_attr, .slaves = omap3xxx_mmc2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc2_slaves), .class = &omap34xx_mmc_class, }; static struct omap_hwmod omap3xxx_es3plus_mmc2_hwmod = { .name = "mmc2", .mpu_irqs = omap34xx_mmc2_mpu_irqs, .sdma_reqs = omap34xx_mmc2_sdma_reqs, .opt_clks = omap34xx_mmc2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc2_opt_clks), .main_clk = "mmchs2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MMC2_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MMC2_SHIFT, }, }, .slaves = omap3xxx_mmc2_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc2_slaves), .class = &omap34xx_mmc_class, }; /* MMC/SD/SDIO3 */ static struct omap_hwmod_irq_info omap34xx_mmc3_mpu_irqs[] = { { .irq = 94, }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap34xx_mmc3_sdma_reqs[] = { { .name = "tx", .dma_req = 77, }, { .name = "rx", .dma_req = 78, }, { .dma_req = -1 } }; static struct omap_hwmod_opt_clk omap34xx_mmc3_opt_clks[] = { { .role = "dbck", .clk = "omap_32k_fck", }, }; static struct omap_hwmod_ocp_if *omap3xxx_mmc3_slaves[] = { &omap3xxx_l4_core__mmc3, }; static struct omap_hwmod omap3xxx_mmc3_hwmod = { .name = "mmc3", .mpu_irqs = omap34xx_mmc3_mpu_irqs, .sdma_reqs = omap34xx_mmc3_sdma_reqs, .opt_clks = omap34xx_mmc3_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc3_opt_clks), .main_clk = "mmchs3_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MMC3_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430_ST_MMC3_SHIFT, }, }, .slaves = omap3xxx_mmc3_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc3_slaves), .class = &omap34xx_mmc_class, }; /* * 'usb_host_hs' class * high-speed multi-port usb host controller */ static struct omap_hwmod_ocp_if omap3xxx_usb_host_hs__l3_main_2 = { .master = &omap3xxx_usb_host_hs_hwmod, .slave = &omap3xxx_l3_main_hwmod, .clk = "core_l3_ick", .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_usb_host_hs_hwmod_class = { .name = "usb_host_hs", .sysc = &omap3xxx_usb_host_hs_sysc, }; static struct omap_hwmod_ocp_if *omap3xxx_usb_host_hs_masters[] = { &omap3xxx_usb_host_hs__l3_main_2, }; static struct omap_hwmod_addr_space omap3xxx_usb_host_hs_addrs[] = { { .name = "uhh", .pa_start = 0x48064000, .pa_end = 0x480643ff, .flags = ADDR_TYPE_RT }, { .name = "ohci", .pa_start = 0x48064400, .pa_end = 0x480647ff, }, { .name = "ehci", .pa_start = 0x48064800, .pa_end = 0x48064cff, }, {} }; static struct omap_hwmod_ocp_if omap3xxx_l4_core__usb_host_hs = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_usb_host_hs_hwmod, .clk = "usbhost_ick", .addr = omap3xxx_usb_host_hs_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if *omap3xxx_usb_host_hs_slaves[] = { &omap3xxx_l4_core__usb_host_hs, }; static struct omap_hwmod_opt_clk omap3xxx_usb_host_hs_opt_clks[] = { { .role = "ehci_logic_fck", .clk = "usbhost_120m_fck", }, }; static struct omap_hwmod_irq_info omap3xxx_usb_host_hs_irqs[] = { { .name = "ohci-irq", .irq = 76 }, { .name = "ehci-irq", .irq = 77 }, { .irq = -1 } }; static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = { .name = "usb_host_hs", .class = &omap3xxx_usb_host_hs_hwmod_class, .clkdm_name = "l3_init_clkdm", .mpu_irqs = omap3xxx_usb_host_hs_irqs, .main_clk = "usbhost_48m_fck", .prcm = { .omap2 = { .module_offs = OMAP3430ES2_USBHOST_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430ES2_EN_USBHOST1_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP3430ES2_ST_USBHOST_IDLE_SHIFT, .idlest_stdby_bit = OMAP3430ES2_ST_USBHOST_STDBY_SHIFT, }, }, .opt_clks = omap3xxx_usb_host_hs_opt_clks, .opt_clks_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_opt_clks), .slaves = omap3xxx_usb_host_hs_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_slaves), .masters = omap3xxx_usb_host_hs_masters, .masters_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_masters), /* * Errata: USBHOST Configured In Smart-Idle Can Lead To a Deadlock * id: i660 * * Description: * In the following configuration : * - USBHOST module is set to smart-idle mode * - PRCM asserts idle_req to the USBHOST module ( This typically * happens when the system is going to a low power mode : all ports * have been suspended, the master part of the USBHOST module has * entered the standby state, and SW has cut the functional clocks) * - an USBHOST interrupt occurs before the module is able to answer * idle_ack, typically a remote wakeup IRQ. * Then the USB HOST module will enter a deadlock situation where it * is no more accessible nor functional. * * Workaround: * Don't use smart idle; use only force idle, hence HWMOD_SWSUP_SIDLE */ /* * Errata: USB host EHCI may stall when entering smart-standby mode * Id: i571 * * Description: * When the USBHOST module is set to smart-standby mode, and when it is * ready to enter the standby state (i.e. all ports are suspended and * all attached devices are in suspend mode), then it can wrongly assert * the Mstandby signal too early while there are still some residual OCP * transactions ongoing. If this condition occurs, the internal state * machine may go to an undefined state and the USB link may be stuck * upon the next resume. * * Workaround: * Don't use smart standby; use only force standby, * hence HWMOD_SWSUP_MSTANDBY */ /* * During system boot; If the hwmod framework resets the module * the module will have smart idle settings; which can lead to deadlock * (above Errata Id:i660); so, dont reset the module during boot; * Use HWMOD_INIT_NO_RESET. */ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY | HWMOD_INIT_NO_RESET, }; /* * 'usb_tll_hs' class * usb_tll_hs module is the adapter on the usb_host_hs ports */ static struct omap_hwmod_class_sysconfig omap3xxx_usb_tll_hs_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap3xxx_usb_tll_hs_hwmod_class = { .name = "usb_tll_hs", .sysc = &omap3xxx_usb_tll_hs_sysc, }; static struct omap_hwmod_irq_info omap3xxx_usb_tll_hs_irqs[] = { { .name = "tll-irq", .irq = 78 }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap3xxx_usb_tll_hs_addrs[] = { { .name = "tll", .pa_start = 0x48062000, .pa_end = 0x48062fff, .flags = ADDR_TYPE_RT }, {} }; static struct omap_hwmod_ocp_if omap3xxx_l4_core__usb_tll_hs = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap3xxx_usb_tll_hs_hwmod, .clk = "usbtll_ick", .addr = omap3xxx_usb_tll_hs_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if *omap3xxx_usb_tll_hs_slaves[] = { &omap3xxx_l4_core__usb_tll_hs, }; static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod = { .name = "usb_tll_hs", .class = &omap3xxx_usb_tll_hs_hwmod_class, .clkdm_name = "l3_init_clkdm", .mpu_irqs = omap3xxx_usb_tll_hs_irqs, .main_clk = "usbtll_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 3, .module_bit = OMAP3430ES2_EN_USBTLL_SHIFT, .idlest_reg_id = 3, .idlest_idle_bit = OMAP3430ES2_ST_USBTLL_SHIFT, }, }, .slaves = omap3xxx_usb_tll_hs_slaves, .slaves_cnt = ARRAY_SIZE(omap3xxx_usb_tll_hs_slaves), }; static __initdata struct omap_hwmod *omap3xxx_hwmods[] = { &omap3xxx_l3_main_hwmod, &omap3xxx_l4_core_hwmod, &omap3xxx_l4_per_hwmod, &omap3xxx_l4_wkup_hwmod, &omap3xxx_mmc3_hwmod, &omap3xxx_mpu_hwmod, &omap3xxx_timer1_hwmod, &omap3xxx_timer2_hwmod, &omap3xxx_timer3_hwmod, &omap3xxx_timer4_hwmod, &omap3xxx_timer5_hwmod, &omap3xxx_timer6_hwmod, &omap3xxx_timer7_hwmod, &omap3xxx_timer8_hwmod, &omap3xxx_timer9_hwmod, &omap3xxx_timer10_hwmod, &omap3xxx_timer11_hwmod, &omap3xxx_wd_timer2_hwmod, &omap3xxx_uart1_hwmod, &omap3xxx_uart2_hwmod, &omap3xxx_uart3_hwmod, /* i2c class */ &omap3xxx_i2c1_hwmod, &omap3xxx_i2c2_hwmod, &omap3xxx_i2c3_hwmod, /* gpio class */ &omap3xxx_gpio1_hwmod, &omap3xxx_gpio2_hwmod, &omap3xxx_gpio3_hwmod, &omap3xxx_gpio4_hwmod, &omap3xxx_gpio5_hwmod, &omap3xxx_gpio6_hwmod, /* dma_system class*/ &omap3xxx_dma_system_hwmod, /* mcbsp class */ &omap3xxx_mcbsp1_hwmod, &omap3xxx_mcbsp2_hwmod, &omap3xxx_mcbsp3_hwmod, &omap3xxx_mcbsp4_hwmod, &omap3xxx_mcbsp5_hwmod, &omap3xxx_mcbsp2_sidetone_hwmod, &omap3xxx_mcbsp3_sidetone_hwmod, /* mcspi class */ &omap34xx_mcspi1, &omap34xx_mcspi2, &omap34xx_mcspi3, &omap34xx_mcspi4, NULL, }; /* GP-only hwmods */ static __initdata struct omap_hwmod *omap3xxx_gp_hwmods[] = { &omap3xxx_timer12_hwmod, NULL }; /* 3430ES1-only hwmods */ static __initdata struct omap_hwmod *omap3430es1_hwmods[] = { &omap3430es1_dss_core_hwmod, NULL }; /* 3430ES2+-only hwmods */ static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = { &omap3xxx_dss_core_hwmod, &omap3xxx_usbhsotg_hwmod, &omap3xxx_usb_host_hs_hwmod, &omap3xxx_usb_tll_hs_hwmod, NULL }; /* <= 3430ES3-only hwmods */ static struct omap_hwmod *omap3430_pre_es3_hwmods[] __initdata = { &omap3xxx_pre_es3_mmc1_hwmod, &omap3xxx_pre_es3_mmc2_hwmod, NULL }; /* 3430ES3+-only hwmods */ static struct omap_hwmod *omap3430_es3plus_hwmods[] __initdata = { &omap3xxx_es3plus_mmc1_hwmod, &omap3xxx_es3plus_mmc2_hwmod, NULL }; /* 34xx-only hwmods (all ES revisions) */ static __initdata struct omap_hwmod *omap34xx_hwmods[] = { &omap3xxx_iva_hwmod, &omap34xx_sr1_hwmod, &omap34xx_sr2_hwmod, &omap3xxx_mailbox_hwmod, NULL }; /* 36xx-only hwmods (all ES revisions) */ static __initdata struct omap_hwmod *omap36xx_hwmods[] = { &omap3xxx_iva_hwmod, &omap3xxx_uart4_hwmod, &omap3xxx_dss_core_hwmod, &omap36xx_sr1_hwmod, &omap36xx_sr2_hwmod, &omap3xxx_usbhsotg_hwmod, &omap3xxx_mailbox_hwmod, &omap3xxx_usb_host_hs_hwmod, &omap3xxx_usb_tll_hs_hwmod, &omap3xxx_es3plus_mmc1_hwmod, &omap3xxx_es3plus_mmc2_hwmod, NULL }; static __initdata struct omap_hwmod *am35xx_hwmods[] = { &omap3xxx_dss_core_hwmod, /* XXX ??? */ &am35xx_usbhsotg_hwmod, &am35xx_uart4_hwmod, &omap3xxx_usb_host_hs_hwmod, &omap3xxx_usb_tll_hs_hwmod, &omap3xxx_es3plus_mmc1_hwmod, &omap3xxx_es3plus_mmc2_hwmod, NULL }; static __initdata struct omap_hwmod *omap3xxx_dss_hwmods[] = { /* dss class */ &omap3xxx_dss_dispc_hwmod, &omap3xxx_dss_dsi1_hwmod, &omap3xxx_dss_rfbi_hwmod, &omap3xxx_dss_venc_hwmod, NULL }; int __init omap3xxx_hwmod_init(void) { int r; struct omap_hwmod **h = NULL; unsigned int rev; /* Register hwmods common to all OMAP3 */ r = omap_hwmod_register(omap3xxx_hwmods); if (r < 0) return r; /* Register GP-only hwmods. */ if (omap_type() == OMAP2_DEVICE_TYPE_GP) { r = omap_hwmod_register(omap3xxx_gp_hwmods); if (r < 0) return r; } rev = omap_rev(); /* * Register hwmods common to individual OMAP3 families, all * silicon revisions (e.g., 34xx, or AM3505/3517, or 36xx) * All possible revisions should be included in this conditional. */ if (rev == OMAP3430_REV_ES1_0 || rev == OMAP3430_REV_ES2_0 || rev == OMAP3430_REV_ES2_1 || rev == OMAP3430_REV_ES3_0 || rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2) { h = omap34xx_hwmods; } else if (rev == OMAP3517_REV_ES1_0 || rev == OMAP3517_REV_ES1_1) { h = am35xx_hwmods; } else if (rev == OMAP3630_REV_ES1_0 || rev == OMAP3630_REV_ES1_1 || rev == OMAP3630_REV_ES1_2) { h = omap36xx_hwmods; } else { WARN(1, "OMAP3 hwmod family init: unknown chip type\n"); return -EINVAL; }; r = omap_hwmod_register(h); if (r < 0) return r; /* * Register hwmods specific to certain ES levels of a * particular family of silicon (e.g., 34xx ES1.0) */ h = NULL; if (rev == OMAP3430_REV_ES1_0) { h = omap3430es1_hwmods; } else if (rev == OMAP3430_REV_ES2_0 || rev == OMAP3430_REV_ES2_1 || rev == OMAP3430_REV_ES3_0 || rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2) { h = omap3430es2plus_hwmods; }; if (h) { r = omap_hwmod_register(h); if (r < 0) return r; } h = NULL; if (rev == OMAP3430_REV_ES1_0 || rev == OMAP3430_REV_ES2_0 || rev == OMAP3430_REV_ES2_1) { h = omap3430_pre_es3_hwmods; } else if (rev == OMAP3430_REV_ES3_0 || rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2) { h = omap3430_es3plus_hwmods; }; if (h) r = omap_hwmod_register(h); if (r < 0) return r; /* * DSS code presumes that dss_core hwmod is handled first, * _before_ any other DSS related hwmods so register common * DSS hwmods last to ensure that dss_core is already registered. * Otherwise some change things may happen, for ex. if dispc * is handled before dss_core and DSS is enabled in bootloader * DIPSC will be reset with outputs enabled which sometimes leads * to unrecoverable L3 error. * XXX The long-term fix to this is to ensure modules are set up * in dependency order in the hwmod core code. */ r = omap_hwmod_register(omap3xxx_dss_hwmods); return r; }
gpl-2.0
CyanHacker-Lollipop/kernel_motorola_msm8226
fs/notify/fsnotify.c
3991
9526
/* * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/dcache.h> #include <linux/fs.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/srcu.h> #include <linux/fsnotify_backend.h> #include "fsnotify.h" #include "../mount.h" /* * Clear all of the marks on an inode when it is being evicted from core */ void __fsnotify_inode_delete(struct inode *inode) { fsnotify_clear_marks_by_inode(inode); } EXPORT_SYMBOL_GPL(__fsnotify_inode_delete); void __fsnotify_vfsmount_delete(struct vfsmount *mnt) { fsnotify_clear_marks_by_mount(mnt); } /* * Given an inode, first check if we care what happens to our children. Inotify * and dnotify both tell their parents about events. If we care about any event * on a child we run all of our children and set a dentry flag saying that the * parent cares. Thus when an event happens on a child it can quickly tell if * if there is a need to find a parent and send the event to the parent. */ void __fsnotify_update_child_dentry_flags(struct inode *inode) { struct dentry *alias; int watched; if (!S_ISDIR(inode->i_mode)) return; /* determine if the children should tell inode about their events */ watched = fsnotify_inode_watches_children(inode); spin_lock(&inode->i_lock); /* run all of the dentries associated with this inode. Since this is a * directory, there damn well better only be one item on this list */ list_for_each_entry(alias, &inode->i_dentry, d_alias) { struct dentry *child; /* run all of the children of the original inode and fix their * d_flags to indicate parental interest (their parent is the * original inode) */ spin_lock(&alias->d_lock); list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { if (!child->d_inode) continue; spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED); if (watched) child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; else child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; spin_unlock(&child->d_lock); } spin_unlock(&alias->d_lock); } spin_unlock(&inode->i_lock); } /* Notify this dentry's parent about a child's events. */ int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) { struct dentry *parent; struct inode *p_inode; int ret = 0; if (!dentry) dentry = path->dentry; if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) return 0; parent = dget_parent(dentry); p_inode = parent->d_inode; if (unlikely(!fsnotify_inode_watches_children(p_inode))) __fsnotify_update_child_dentry_flags(p_inode); else if (p_inode->i_fsnotify_mask & mask) { /* we are notifying a parent so come up with the new mask which * specifies these are events which came from a child. */ mask |= FS_EVENT_ON_CHILD; if (path) ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH, dentry->d_name.name, 0); else ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); } dput(parent); return ret; } EXPORT_SYMBOL_GPL(__fsnotify_parent); static int send_to_group(struct inode *to_tell, struct vfsmount *mnt, struct fsnotify_mark *inode_mark, struct fsnotify_mark *vfsmount_mark, __u32 mask, void *data, int data_is, u32 cookie, const unsigned char *file_name, struct fsnotify_event **event) { struct fsnotify_group *group = NULL; __u32 inode_test_mask = 0; __u32 vfsmount_test_mask = 0; if (unlikely(!inode_mark && !vfsmount_mark)) { BUG(); return 0; } /* clear ignored on inode modification */ if (mask & FS_MODIFY) { if (inode_mark && !(inode_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) inode_mark->ignored_mask = 0; if (vfsmount_mark && !(vfsmount_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) vfsmount_mark->ignored_mask = 0; } /* does the inode mark tell us to do something? */ if (inode_mark) { group = inode_mark->group; inode_test_mask = (mask & ~FS_EVENT_ON_CHILD); inode_test_mask &= inode_mark->mask; inode_test_mask &= ~inode_mark->ignored_mask; } /* does the vfsmount_mark tell us to do something? */ if (vfsmount_mark) { vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD); group = vfsmount_mark->group; vfsmount_test_mask &= vfsmount_mark->mask; vfsmount_test_mask &= ~vfsmount_mark->ignored_mask; if (inode_mark) vfsmount_test_mask &= ~inode_mark->ignored_mask; } pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x inode_mark=%p" " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x" " data=%p data_is=%d cookie=%d event=%p\n", __func__, group, to_tell, mnt, mask, inode_mark, inode_test_mask, vfsmount_mark, vfsmount_test_mask, data, data_is, cookie, *event); if (!inode_test_mask && !vfsmount_test_mask) return 0; if (group->ops->should_send_event(group, to_tell, inode_mark, vfsmount_mark, mask, data, data_is) == false) return 0; if (!*event) { *event = fsnotify_create_event(to_tell, mask, data, data_is, file_name, cookie, GFP_KERNEL); if (!*event) return -ENOMEM; } return group->ops->handle_event(group, inode_mark, vfsmount_mark, *event); } /* * This is the main call to fsnotify. The VFS calls into hook specific functions * in linux/fsnotify.h. Those functions then in turn call here. Here will call * out to all of the registered fsnotify_group. Those groups can then use the * notification event in whatever means they feel necessary. */ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const unsigned char *file_name, u32 cookie) { struct hlist_node *inode_node = NULL, *vfsmount_node = NULL; struct fsnotify_mark *inode_mark = NULL, *vfsmount_mark = NULL; struct fsnotify_group *inode_group, *vfsmount_group; struct fsnotify_event *event = NULL; struct mount *mnt; int idx, ret = 0; /* global tests shouldn't care about events on child only the specific event */ __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD); if (data_is == FSNOTIFY_EVENT_PATH) mnt = real_mount(((struct path *)data)->mnt); else mnt = NULL; /* * if this is a modify event we may need to clear the ignored masks * otherwise return if neither the inode nor the vfsmount care about * this type of event. */ if (!(mask & FS_MODIFY) && !(test_mask & to_tell->i_fsnotify_mask) && !(mnt && test_mask & mnt->mnt_fsnotify_mask)) return 0; idx = srcu_read_lock(&fsnotify_mark_srcu); if ((mask & FS_MODIFY) || (test_mask & to_tell->i_fsnotify_mask)) inode_node = srcu_dereference(to_tell->i_fsnotify_marks.first, &fsnotify_mark_srcu); if (mnt && ((mask & FS_MODIFY) || (test_mask & mnt->mnt_fsnotify_mask))) { vfsmount_node = srcu_dereference(mnt->mnt_fsnotify_marks.first, &fsnotify_mark_srcu); inode_node = srcu_dereference(to_tell->i_fsnotify_marks.first, &fsnotify_mark_srcu); } while (inode_node || vfsmount_node) { inode_group = vfsmount_group = NULL; if (inode_node) { inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu), struct fsnotify_mark, i.i_list); inode_group = inode_mark->group; } if (vfsmount_node) { vfsmount_mark = hlist_entry(srcu_dereference(vfsmount_node, &fsnotify_mark_srcu), struct fsnotify_mark, m.m_list); vfsmount_group = vfsmount_mark->group; } if (inode_group > vfsmount_group) { /* handle inode */ ret = send_to_group(to_tell, NULL, inode_mark, NULL, mask, data, data_is, cookie, file_name, &event); /* we didn't use the vfsmount_mark */ vfsmount_group = NULL; } else if (vfsmount_group > inode_group) { ret = send_to_group(to_tell, &mnt->mnt, NULL, vfsmount_mark, mask, data, data_is, cookie, file_name, &event); inode_group = NULL; } else { ret = send_to_group(to_tell, &mnt->mnt, inode_mark, vfsmount_mark, mask, data, data_is, cookie, file_name, &event); } if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS)) goto out; if (inode_group) inode_node = srcu_dereference(inode_node->next, &fsnotify_mark_srcu); if (vfsmount_group) vfsmount_node = srcu_dereference(vfsmount_node->next, &fsnotify_mark_srcu); } ret = 0; out: srcu_read_unlock(&fsnotify_mark_srcu, idx); /* * fsnotify_create_event() took a reference so the event can't be cleaned * up while we are still trying to add it to lists, drop that one. */ if (event) fsnotify_put_event(event); return ret; } EXPORT_SYMBOL_GPL(fsnotify); static __init int fsnotify_init(void) { int ret; BUG_ON(hweight32(ALL_FSNOTIFY_EVENTS) != 23); ret = init_srcu_struct(&fsnotify_mark_srcu); if (ret) panic("initializing fsnotify_mark_srcu"); return 0; } core_initcall(fsnotify_init);
gpl-2.0
mifl/android_kernel_pantech_ef44s
drivers/staging/keucr/usb.c
5015
17634
#include <linux/sched.h> #include <linux/errno.h> #include <linux/freezer.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/utsname.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include "usb.h" #include "scsiglue.h" #include "smil.h" #include "transport.h" /* Some informational data */ MODULE_AUTHOR("Domao"); MODULE_DESCRIPTION("ENE USB Mass Storage driver for Linux"); MODULE_LICENSE("GPL"); static unsigned int delay_use = 1; static struct usb_device_id eucr_usb_ids [] = { { USB_DEVICE(0x058f, 0x6366) }, { USB_DEVICE(0x0cf2, 0x6230) }, { USB_DEVICE(0x0cf2, 0x6250) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, eucr_usb_ids); #ifdef CONFIG_PM static int eucr_suspend(struct usb_interface *iface, pm_message_t message) { struct us_data *us = usb_get_intfdata(iface); pr_info("--- eucr_suspend ---\n"); /* Wait until no command is running */ mutex_lock(&us->dev_mutex); //US_DEBUGP("%s\n", __func__); if (us->suspend_resume_hook) (us->suspend_resume_hook)(us, US_SUSPEND); /* When runtime PM is working, we'll set a flag to indicate * whether we should autoresume when a SCSI request arrives. */ // us->Power_IsResum = true; //us->SD_Status.Ready = 0; mutex_unlock(&us->dev_mutex); return 0; } //EXPORT_SYMBOL_GPL(eucr_suspend); static int eucr_resume(struct usb_interface *iface) { BYTE tmp = 0; struct us_data *us = usb_get_intfdata(iface); pr_info("--- eucr_resume---\n"); mutex_lock(&us->dev_mutex); //US_DEBUGP("%s\n", __func__); if (us->suspend_resume_hook) (us->suspend_resume_hook)(us, US_RESUME); mutex_unlock(&us->dev_mutex); us->Power_IsResum = true; // //us->SD_Status.Ready = 0; //?? us->SM_Status = *(PSM_STATUS)&tmp; return 0; } //EXPORT_SYMBOL_GPL(eucr_resume); static int eucr_reset_resume(struct usb_interface *iface) { BYTE tmp = 0; struct us_data *us = usb_get_intfdata(iface); pr_info("--- eucr_reset_resume---\n"); //US_DEBUGP("%s\n", __func__); /* Report the reset to the SCSI core */ usb_stor_report_bus_reset(us); /* FIXME: Notify the subdrivers that they need to reinitialize * the device */ //ENE_InitMedia(us); us->Power_IsResum = true; // //us->SD_Status.Ready = 0; //?? us->SM_Status = *(PSM_STATUS)&tmp; return 0; } //EXPORT_SYMBOL_GPL(usb_stor_reset_resume); #else #define eucr_suspend NULL #define eucr_resume NULL #define eucr_reset_resume NULL #endif //----- eucr_pre_reset() --------------------- static int eucr_pre_reset(struct usb_interface *iface) { struct us_data *us = usb_get_intfdata(iface); pr_info("usb --- eucr_pre_reset\n"); /* Make sure no command runs during the reset */ mutex_lock(&us->dev_mutex); return 0; } //----- eucr_post_reset() --------------------- static int eucr_post_reset(struct usb_interface *iface) { struct us_data *us = usb_get_intfdata(iface); pr_info("usb --- eucr_post_reset\n"); /* Report the reset to the SCSI core */ usb_stor_report_bus_reset(us); mutex_unlock(&us->dev_mutex); return 0; } //----- fill_inquiry_response() --------------------- void fill_inquiry_response(struct us_data *us, unsigned char *data, unsigned int data_len) { pr_info("usb --- fill_inquiry_response\n"); if (data_len<36) // You lose. return; if (data[0]&0x20) { memset(data+8,0,28); } else { u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice); memcpy(data+8, us->unusual_dev->vendorName, strlen(us->unusual_dev->vendorName) > 8 ? 8 : strlen(us->unusual_dev->vendorName)); memcpy(data+16, us->unusual_dev->productName, strlen(us->unusual_dev->productName) > 16 ? 16 : strlen(us->unusual_dev->productName)); data[32] = 0x30 + ((bcdDevice>>12) & 0x0F); data[33] = 0x30 + ((bcdDevice>>8) & 0x0F); data[34] = 0x30 + ((bcdDevice>>4) & 0x0F); data[35] = 0x30 + ((bcdDevice) & 0x0F); } usb_stor_set_xfer_buf(us, data, data_len, us->srb, TO_XFER_BUF); } //----- usb_stor_control_thread() --------------------- static int usb_stor_control_thread(void * __us) { struct us_data *us = (struct us_data *)__us; struct Scsi_Host *host = us_to_host(us); pr_info("usb --- usb_stor_control_thread\n"); for(;;) { if (wait_for_completion_interruptible(&us->cmnd_ready)) break; /* lock the device pointers */ mutex_lock(&(us->dev_mutex)); /* if the device has disconnected, we are free to exit */ if (test_bit(US_FLIDX_DISCONNECTING, &us->dflags)) { mutex_unlock(&us->dev_mutex); break; } /* lock access to the state */ scsi_lock(host); /* When we are called with no command pending, we're done */ if (us->srb == NULL) { scsi_unlock(host); mutex_unlock(&us->dev_mutex); //US_DEBUGP("-- exiting\n"); break; } /* has the command timed out *already* ? */ if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { us->srb->result = DID_ABORT << 16; goto SkipForAbort; } scsi_unlock(host); if (us->srb->sc_data_direction == DMA_BIDIRECTIONAL) { us->srb->result = DID_ERROR << 16; } else if (us->srb->device->id && !(us->fflags & US_FL_SCM_MULT_TARG)) { us->srb->result = DID_BAD_TARGET << 16; } else if (us->srb->device->lun > us->max_lun) { us->srb->result = DID_BAD_TARGET << 16; } else if ((us->srb->cmnd[0] == INQUIRY) && (us->fflags & US_FL_FIX_INQUIRY)) { unsigned char data_ptr[36] = {0x00, 0x80, 0x02, 0x02, 0x1F, 0x00, 0x00, 0x00}; fill_inquiry_response(us, data_ptr, 36); us->srb->result = SAM_STAT_GOOD; } else { us->proto_handler(us->srb, us); } /* lock access to the state */ scsi_lock(host); /* indicate that the command is done */ if (us->srb->result != DID_ABORT << 16) { us->srb->scsi_done(us->srb); } else { SkipForAbort: pr_info("scsi command aborted\n"); } if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { complete(&(us->notify)); /* Allow USB transfers to resume */ clear_bit(US_FLIDX_ABORTING, &us->dflags); clear_bit(US_FLIDX_TIMED_OUT, &us->dflags); } /* finished working on this command */ us->srb = NULL; scsi_unlock(host); /* unlock the device pointers */ mutex_unlock(&us->dev_mutex); } /* for (;;) */ /* Wait until we are told to stop */ for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) break; schedule(); } __set_current_state(TASK_RUNNING); return 0; } //----- associate_dev() --------------------- static int associate_dev(struct us_data *us, struct usb_interface *intf) { pr_info("usb --- associate_dev\n"); /* Fill in the device-related fields */ us->pusb_dev = interface_to_usbdev(intf); us->pusb_intf = intf; us->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; /* Store our private data in the interface */ usb_set_intfdata(intf, us); /* Allocate the device-related DMA-mapped buffers */ us->cr = usb_alloc_coherent(us->pusb_dev, sizeof(*us->cr), GFP_KERNEL, &us->cr_dma); if (!us->cr) { pr_info("usb_ctrlrequest allocation failed\n"); return -ENOMEM; } us->iobuf = usb_alloc_coherent(us->pusb_dev, US_IOBUF_SIZE, GFP_KERNEL, &us->iobuf_dma); if (!us->iobuf) { pr_info("I/O buffer allocation failed\n"); return -ENOMEM; } us->sensebuf = kmalloc(US_SENSE_SIZE, GFP_KERNEL); if (!us->sensebuf) { pr_info("Sense buffer allocation failed\n"); return -ENOMEM; } return 0; } //----- get_device_info() --------------------- static int get_device_info(struct us_data *us, const struct usb_device_id *id) { struct usb_device *dev = us->pusb_dev; struct usb_interface_descriptor *idesc = &us->pusb_intf->cur_altsetting->desc; pr_info("usb --- get_device_info\n"); us->subclass = idesc->bInterfaceSubClass; us->protocol = idesc->bInterfaceProtocol; us->fflags = USB_US_ORIG_FLAGS(id->driver_info); us->Power_IsResum = false; if (us->fflags & US_FL_IGNORE_DEVICE) { pr_info("device ignored\n"); return -ENODEV; } if (dev->speed != USB_SPEED_HIGH) us->fflags &= ~US_FL_GO_SLOW; return 0; } //----- get_transport() --------------------- static int get_transport(struct us_data *us) { pr_info("usb --- get_transport\n"); switch (us->protocol) { case USB_PR_BULK: us->transport_name = "Bulk"; us->transport = usb_stor_Bulk_transport; us->transport_reset = usb_stor_Bulk_reset; break; default: return -EIO; } /* pr_info("Transport: %s\n", us->transport_name); */ /* fix for single-lun devices */ if (us->fflags & US_FL_SINGLE_LUN) us->max_lun = 0; return 0; } //----- get_protocol() --------------------- static int get_protocol(struct us_data *us) { pr_info("usb --- get_protocol\n"); pr_info("us->pusb_dev->descriptor.idVendor = %x\n", us->pusb_dev->descriptor.idVendor); pr_info("us->pusb_dev->descriptor.idProduct = %x\n", us->pusb_dev->descriptor.idProduct); switch (us->subclass) { case USB_SC_SCSI: us->protocol_name = "Transparent SCSI"; if( (us->pusb_dev->descriptor.idVendor == 0x0CF2) && (us->pusb_dev->descriptor.idProduct == 0x6250) ) us->proto_handler = ENE_stor_invoke_transport; else us->proto_handler = usb_stor_invoke_transport; break; default: return -EIO; } /* pr_info("Protocol: %s\n", us->protocol_name); */ return 0; } //----- get_pipes() --------------------- static int get_pipes(struct us_data *us) { struct usb_host_interface *altsetting = us->pusb_intf->cur_altsetting; int i; struct usb_endpoint_descriptor *ep; struct usb_endpoint_descriptor *ep_in = NULL; struct usb_endpoint_descriptor *ep_out = NULL; struct usb_endpoint_descriptor *ep_int = NULL; pr_info("usb --- get_pipes\n"); for (i = 0; i < altsetting->desc.bNumEndpoints; i++) { ep = &altsetting->endpoint[i].desc; if (usb_endpoint_xfer_bulk(ep)) { if (usb_endpoint_dir_in(ep)) { if (!ep_in) ep_in = ep; } else { if (!ep_out) ep_out = ep; } } else if (usb_endpoint_is_int_in(ep)) { if (!ep_int) ep_int = ep; } } if (!ep_in || !ep_out || (us->protocol == USB_PR_CBI && !ep_int)) { pr_info("Endpoint sanity check failed! Rejecting dev.\n"); return -EIO; } /* Calculate and store the pipe values */ us->send_ctrl_pipe = usb_sndctrlpipe(us->pusb_dev, 0); us->recv_ctrl_pipe = usb_rcvctrlpipe(us->pusb_dev, 0); us->send_bulk_pipe = usb_sndbulkpipe(us->pusb_dev, ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); us->recv_bulk_pipe = usb_rcvbulkpipe(us->pusb_dev, ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); if (ep_int) { us->recv_intr_pipe = usb_rcvintpipe(us->pusb_dev, ep_int->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); us->ep_bInterval = ep_int->bInterval; } return 0; } //----- usb_stor_acquire_resources() --------------------- static int usb_stor_acquire_resources(struct us_data *us) { struct task_struct *th; pr_info("usb --- usb_stor_acquire_resources\n"); us->current_urb = usb_alloc_urb(0, GFP_KERNEL); if (!us->current_urb) { pr_info("URB allocation failed\n"); return -ENOMEM; } /* Start up our control thread */ th = kthread_run(usb_stor_control_thread, us, "eucr-storage"); if (IS_ERR(th)) { pr_info("Unable to start control thread\n"); return PTR_ERR(th); } us->ctl_thread = th; return 0; } //----- usb_stor_release_resources() --------------------- static void usb_stor_release_resources(struct us_data *us) { pr_info("usb --- usb_stor_release_resources\n"); SM_FreeMem(); complete(&us->cmnd_ready); if (us->ctl_thread) kthread_stop(us->ctl_thread); /* Call the destructor routine, if it exists */ if (us->extra_destructor) { pr_info("-- calling extra_destructor()\n"); us->extra_destructor(us->extra); } /* Free the extra data and the URB */ kfree(us->extra); usb_free_urb(us->current_urb); } //----- dissociate_dev() --------------------- static void dissociate_dev(struct us_data *us) { pr_info("usb --- dissociate_dev\n"); kfree(us->sensebuf); /* Free the device-related DMA-mapped buffers */ if (us->cr) usb_free_coherent(us->pusb_dev, sizeof(*us->cr), us->cr, us->cr_dma); if (us->iobuf) usb_free_coherent(us->pusb_dev, US_IOBUF_SIZE, us->iobuf, us->iobuf_dma); /* Remove our private data from the interface */ usb_set_intfdata(us->pusb_intf, NULL); } //----- quiesce_and_remove_host() --------------------- static void quiesce_and_remove_host(struct us_data *us) { struct Scsi_Host *host = us_to_host(us); pr_info("usb --- quiesce_and_remove_host\n"); /* If the device is really gone, cut short reset delays */ if (us->pusb_dev->state == USB_STATE_NOTATTACHED) set_bit(US_FLIDX_DISCONNECTING, &us->dflags); /* Prevent SCSI-scanning (if it hasn't started yet) * and wait for the SCSI-scanning thread to stop. */ set_bit(US_FLIDX_DONT_SCAN, &us->dflags); wake_up(&us->delay_wait); wait_for_completion(&us->scanning_done); /* Removing the host will perform an orderly shutdown: caches * synchronized, disks spun down, etc. */ scsi_remove_host(host); /* Prevent any new commands from being accepted and cut short * reset delays. */ scsi_lock(host); set_bit(US_FLIDX_DISCONNECTING, &us->dflags); scsi_unlock(host); wake_up(&us->delay_wait); } //----- release_everything() --------------------- static void release_everything(struct us_data *us) { pr_info("usb --- release_everything\n"); usb_stor_release_resources(us); dissociate_dev(us); scsi_host_put(us_to_host(us)); } //----- usb_stor_scan_thread() --------------------- static int usb_stor_scan_thread(void * __us) { struct us_data *us = (struct us_data *)__us; pr_info("usb --- usb_stor_scan_thread\n"); pr_info("EUCR : device found at %d\n", us->pusb_dev->devnum); set_freezable(); /* Wait for the timeout to expire or for a disconnect */ if (delay_use > 0) { wait_event_freezable_timeout(us->delay_wait, test_bit(US_FLIDX_DONT_SCAN, &us->dflags), delay_use * HZ); } /* If the device is still connected, perform the scanning */ if (!test_bit(US_FLIDX_DONT_SCAN, &us->dflags)) { /* For bulk-only devices, determine the max LUN value */ if (us->protocol == USB_PR_BULK && !(us->fflags & US_FL_SINGLE_LUN)) { mutex_lock(&us->dev_mutex); us->max_lun = usb_stor_Bulk_max_lun(us); mutex_unlock(&us->dev_mutex); } scsi_scan_host(us_to_host(us)); pr_info("EUCR : device scan complete\n"); } complete_and_exit(&us->scanning_done, 0); } //----- eucr_probe() --------------------- static int eucr_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct Scsi_Host *host; struct us_data *us; int result; BYTE MiscReg03 = 0; struct task_struct *th; pr_info("usb --- eucr_probe\n"); host = scsi_host_alloc(&usb_stor_host_template, sizeof(*us)); if (!host) { pr_info("Unable to allocate the scsi host\n"); return -ENOMEM; } /* Allow 16-byte CDBs and thus > 2TB */ host->max_cmd_len = 16; us = host_to_us(host); memset(us, 0, sizeof(struct us_data)); mutex_init(&(us->dev_mutex)); init_completion(&us->cmnd_ready); init_completion(&(us->notify)); init_waitqueue_head(&us->delay_wait); init_completion(&us->scanning_done); /* Associate the us_data structure with the USB device */ result = associate_dev(us, intf); if (result) goto BadDevice; /* Get Device info */ result = get_device_info(us, id); if (result) goto BadDevice; /* Get the transport, protocol, and pipe settings */ result = get_transport(us); if (result) goto BadDevice; result = get_protocol(us); if (result) goto BadDevice; result = get_pipes(us); if (result) goto BadDevice; /* Acquire all the other resources and add the host */ result = usb_stor_acquire_resources(us); if (result) goto BadDevice; result = scsi_add_host(host, &intf->dev); if (result) { pr_info("Unable to add the scsi host\n"); goto BadDevice; } /* Start up the thread for delayed SCSI-device scanning */ th = kthread_create(usb_stor_scan_thread, us, "eucr-stor-scan"); if (IS_ERR(th)) { pr_info("Unable to start the device-scanning thread\n"); complete(&us->scanning_done); quiesce_and_remove_host(us); result = PTR_ERR(th); goto BadDevice; } wake_up_process(th); /* probe card type */ result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03); if (result != USB_STOR_XFER_GOOD) { result = USB_STOR_TRANSPORT_ERROR; quiesce_and_remove_host(us); goto BadDevice; } if (!(MiscReg03 & 0x02)) { result = -ENODEV; quiesce_and_remove_host(us); pr_info("keucr: The driver only supports SM/MS card.\ To use SD card, \ please build driver/usb/storage/ums-eneub6250.ko\n"); goto BadDevice; } return 0; /* We come here if there are any problems */ BadDevice: pr_info("usb --- eucr_probe failed\n"); release_everything(us); return result; } //----- eucr_disconnect() --------------------- static void eucr_disconnect(struct usb_interface *intf) { struct us_data *us = usb_get_intfdata(intf); pr_info("usb --- eucr_disconnect\n"); quiesce_and_remove_host(us); release_everything(us); } /*********************************************************************** * Initialization and registration ***********************************************************************/ //----- usb_storage_driver() --------------------- static struct usb_driver usb_storage_driver = { .name = "eucr", .probe = eucr_probe, .suspend = eucr_suspend, .resume = eucr_resume, .reset_resume = eucr_reset_resume, .disconnect = eucr_disconnect, .pre_reset = eucr_pre_reset, .post_reset = eucr_post_reset, .id_table = eucr_usb_ids, .soft_unbind = 1, }; module_usb_driver(usb_storage_driver);
gpl-2.0
kamarush/android_kernel_lge_hammerhead
drivers/hid/hid-zydacron.c
5271
4578
/* * HID driver for zydacron remote control * * Copyright (c) 2010 Don Prince <dhprince.devel@yahoo.co.uk> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" struct zc_device { struct input_dev *input_ep81; unsigned short last_key[4]; }; /* * Zydacron remote control has an invalid HID report descriptor, * that needs fixing before we can parse it. */ static __u8 *zc_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 253 && rdesc[0x96] == 0xbc && rdesc[0x97] == 0xff && rdesc[0xca] == 0xbc && rdesc[0xcb] == 0xff && rdesc[0xe1] == 0xbc && rdesc[0xe2] == 0xff) { hid_info(hdev, "fixing up zydacron remote control report descriptor\n"); rdesc[0x96] = rdesc[0xca] = rdesc[0xe1] = 0x0c; rdesc[0x97] = rdesc[0xcb] = rdesc[0xe2] = 0x00; } return rdesc; } #define zc_map_key_clear(c) \ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) static int zc_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { int i; struct zc_device *zc = hid_get_drvdata(hdev); zc->input_ep81 = hi->input; if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; dbg_hid("zynacron input mapping event [0x%x]\n", usage->hid & HID_USAGE); switch (usage->hid & HID_USAGE) { /* report 2 */ case 0x10: zc_map_key_clear(KEY_MODE); break; case 0x30: zc_map_key_clear(KEY_SCREEN); break; case 0x70: zc_map_key_clear(KEY_INFO); break; /* report 3 */ case 0x04: zc_map_key_clear(KEY_RADIO); break; /* report 4 */ case 0x0d: zc_map_key_clear(KEY_PVR); break; case 0x25: zc_map_key_clear(KEY_TV); break; case 0x47: zc_map_key_clear(KEY_AUDIO); break; case 0x49: zc_map_key_clear(KEY_AUX); break; case 0x4a: zc_map_key_clear(KEY_VIDEO); break; case 0x48: zc_map_key_clear(KEY_DVD); break; case 0x24: zc_map_key_clear(KEY_MENU); break; case 0x32: zc_map_key_clear(KEY_TEXT); break; default: return 0; } for (i = 0; i < 4; i++) zc->last_key[i] = 0; return 1; } static int zc_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct zc_device *zc = hid_get_drvdata(hdev); int ret = 0; unsigned key; unsigned short index; if (report->id == data[0]) { /* break keys */ for (index = 0; index < 4; index++) { key = zc->last_key[index]; if (key) { input_event(zc->input_ep81, EV_KEY, key, 0); zc->last_key[index] = 0; } } key = 0; switch (report->id) { case 0x02: case 0x03: switch (data[1]) { case 0x10: key = KEY_MODE; index = 0; break; case 0x30: key = KEY_SCREEN; index = 1; break; case 0x70: key = KEY_INFO; index = 2; break; case 0x04: key = KEY_RADIO; index = 3; break; } if (key) { input_event(zc->input_ep81, EV_KEY, key, 1); zc->last_key[index] = key; } ret = 1; break; } } return ret; } static int zc_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; struct zc_device *zc; zc = kzalloc(sizeof(*zc), GFP_KERNEL); if (zc == NULL) { hid_err(hdev, "can't alloc descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, zc); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } return 0; err_free: kfree(zc); return ret; } static void zc_remove(struct hid_device *hdev) { struct zc_device *zc = hid_get_drvdata(hdev); hid_hw_stop(hdev); kfree(zc); } static const struct hid_device_id zc_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, { } }; MODULE_DEVICE_TABLE(hid, zc_devices); static struct hid_driver zc_driver = { .name = "zydacron", .id_table = zc_devices, .report_fixup = zc_report_fixup, .input_mapping = zc_input_mapping, .raw_event = zc_raw_event, .probe = zc_probe, .remove = zc_remove, }; static int __init zc_init(void) { return hid_register_driver(&zc_driver); } static void __exit zc_exit(void) { hid_unregister_driver(&zc_driver); } module_init(zc_init); module_exit(zc_exit); MODULE_LICENSE("GPL");
gpl-2.0
Toni5830/kernel_u8500
arch/sparc/kernel/auxio_64.c
7575
3195
/* auxio.c: Probing for the Sparc AUXIO register at boot time. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * * Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/of_device.h> #include <asm/prom.h> #include <asm/io.h> #include <asm/auxio.h> void __iomem *auxio_register = NULL; EXPORT_SYMBOL(auxio_register); enum auxio_type { AUXIO_TYPE_NODEV, AUXIO_TYPE_SBUS, AUXIO_TYPE_EBUS }; static enum auxio_type auxio_devtype = AUXIO_TYPE_NODEV; static DEFINE_SPINLOCK(auxio_lock); static void __auxio_rmw(u8 bits_on, u8 bits_off, int ebus) { if (auxio_register) { unsigned long flags; u8 regval, newval; spin_lock_irqsave(&auxio_lock, flags); regval = (ebus ? (u8) readl(auxio_register) : sbus_readb(auxio_register)); newval = regval | bits_on; newval &= ~bits_off; if (!ebus) newval &= ~AUXIO_AUX1_MASK; if (ebus) writel((u32) newval, auxio_register); else sbus_writeb(newval, auxio_register); spin_unlock_irqrestore(&auxio_lock, flags); } } static void __auxio_set_bit(u8 bit, int on, int ebus) { u8 bits_on = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED); u8 bits_off = 0; if (!on) { u8 tmp = bits_off; bits_off = bits_on; bits_on = tmp; } __auxio_rmw(bits_on, bits_off, ebus); } void auxio_set_led(int on) { int ebus = auxio_devtype == AUXIO_TYPE_EBUS; u8 bit; bit = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED); __auxio_set_bit(bit, on, ebus); } EXPORT_SYMBOL(auxio_set_led); static void __auxio_sbus_set_lte(int on) { __auxio_set_bit(AUXIO_AUX1_LTE, on, 0); } void auxio_set_lte(int on) { switch(auxio_devtype) { case AUXIO_TYPE_SBUS: __auxio_sbus_set_lte(on); break; case AUXIO_TYPE_EBUS: /* FALL-THROUGH */ default: break; } } EXPORT_SYMBOL(auxio_set_lte); static const struct of_device_id auxio_match[] = { { .name = "auxio", }, {}, }; MODULE_DEVICE_TABLE(of, auxio_match); static int __devinit auxio_probe(struct platform_device *dev) { struct device_node *dp = dev->dev.of_node; unsigned long size; if (!strcmp(dp->parent->name, "ebus")) { auxio_devtype = AUXIO_TYPE_EBUS; size = sizeof(u32); } else if (!strcmp(dp->parent->name, "sbus")) { auxio_devtype = AUXIO_TYPE_SBUS; size = 1; } else { printk("auxio: Unknown parent bus type [%s]\n", dp->parent->name); return -ENODEV; } auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio"); if (!auxio_register) return -ENODEV; printk(KERN_INFO "AUXIO: Found device at %s\n", dp->full_name); if (auxio_devtype == AUXIO_TYPE_EBUS) auxio_set_led(AUXIO_LED_ON); return 0; } static struct platform_driver auxio_driver = { .probe = auxio_probe, .driver = { .name = "auxio", .owner = THIS_MODULE, .of_match_table = auxio_match, }, }; static int __init auxio_init(void) { return platform_driver_register(&auxio_driver); } /* Must be after subsys_initcall() so that busses are probed. Must * be before device_initcall() because things like the floppy driver * need to use the AUXIO register. */ fs_initcall(auxio_init);
gpl-2.0
invisiblek/android_kernel_lge_vs450pp
drivers/input/gameport/lightning.c
13975
7051
/* * Copyright (c) 1998-2001 Vojtech Pavlik */ /* * PDPI Lightning 4 gamecard driver for Linux. */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <asm/io.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/gameport.h> #define L4_PORT 0x201 #define L4_SELECT_ANALOG 0xa4 #define L4_SELECT_DIGITAL 0xa5 #define L4_SELECT_SECONDARY 0xa6 #define L4_CMD_ID 0x80 #define L4_CMD_GETCAL 0x92 #define L4_CMD_SETCAL 0x93 #define L4_ID 0x04 #define L4_BUSY 0x01 #define L4_TIMEOUT 80 /* 80 us */ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("PDPI Lightning 4 gamecard driver"); MODULE_LICENSE("GPL"); struct l4 { struct gameport *gameport; unsigned char port; }; static struct l4 l4_ports[8]; /* * l4_wait_ready() waits for the L4 to become ready. */ static int l4_wait_ready(void) { unsigned int t = L4_TIMEOUT; while ((inb(L4_PORT) & L4_BUSY) && t > 0) t--; return -(t <= 0); } /* * l4_cooked_read() reads data from the Lightning 4. */ static int l4_cooked_read(struct gameport *gameport, int *axes, int *buttons) { struct l4 *l4 = gameport->port_data; unsigned char status; int i, result = -1; outb(L4_SELECT_ANALOG, L4_PORT); outb(L4_SELECT_DIGITAL + (l4->port >> 2), L4_PORT); if (inb(L4_PORT) & L4_BUSY) goto fail; outb(l4->port & 3, L4_PORT); if (l4_wait_ready()) goto fail; status = inb(L4_PORT); for (i = 0; i < 4; i++) if (status & (1 << i)) { if (l4_wait_ready()) goto fail; axes[i] = inb(L4_PORT); if (axes[i] > 252) axes[i] = -1; } if (status & 0x10) { if (l4_wait_ready()) goto fail; *buttons = inb(L4_PORT) & 0x0f; } result = 0; fail: outb(L4_SELECT_ANALOG, L4_PORT); return result; } static int l4_open(struct gameport *gameport, int mode) { struct l4 *l4 = gameport->port_data; if (l4->port != 0 && mode != GAMEPORT_MODE_COOKED) return -1; outb(L4_SELECT_ANALOG, L4_PORT); return 0; } /* * l4_getcal() reads the L4 with calibration values. */ static int l4_getcal(int port, int *cal) { int i, result = -1; outb(L4_SELECT_ANALOG, L4_PORT); outb(L4_SELECT_DIGITAL + (port >> 2), L4_PORT); if (inb(L4_PORT) & L4_BUSY) goto out; outb(L4_CMD_GETCAL, L4_PORT); if (l4_wait_ready()) goto out; if (inb(L4_PORT) != L4_SELECT_DIGITAL + (port >> 2)) goto out; if (l4_wait_ready()) goto out; outb(port & 3, L4_PORT); for (i = 0; i < 4; i++) { if (l4_wait_ready()) goto out; cal[i] = inb(L4_PORT); } result = 0; out: outb(L4_SELECT_ANALOG, L4_PORT); return result; } /* * l4_setcal() programs the L4 with calibration values. */ static int l4_setcal(int port, int *cal) { int i, result = -1; outb(L4_SELECT_ANALOG, L4_PORT); outb(L4_SELECT_DIGITAL + (port >> 2), L4_PORT); if (inb(L4_PORT) & L4_BUSY) goto out; outb(L4_CMD_SETCAL, L4_PORT); if (l4_wait_ready()) goto out; if (inb(L4_PORT) != L4_SELECT_DIGITAL + (port >> 2)) goto out; if (l4_wait_ready()) goto out; outb(port & 3, L4_PORT); for (i = 0; i < 4; i++) { if (l4_wait_ready()) goto out; outb(cal[i], L4_PORT); } result = 0; out: outb(L4_SELECT_ANALOG, L4_PORT); return result; } /* * l4_calibrate() calibrates the L4 for the attached device, so * that the device's resistance fits into the L4's 8-bit range. */ static int l4_calibrate(struct gameport *gameport, int *axes, int *max) { int i, t; int cal[4]; struct l4 *l4 = gameport->port_data; if (l4_getcal(l4->port, cal)) return -1; for (i = 0; i < 4; i++) { t = (max[i] * cal[i]) / 200; t = (t < 1) ? 1 : ((t > 255) ? 255 : t); axes[i] = (axes[i] < 0) ? -1 : (axes[i] * cal[i]) / t; axes[i] = (axes[i] > 252) ? 252 : axes[i]; cal[i] = t; } if (l4_setcal(l4->port, cal)) return -1; return 0; } static int __init l4_create_ports(int card_no) { struct l4 *l4; struct gameport *port; int i, idx; for (i = 0; i < 4; i++) { idx = card_no * 4 + i; l4 = &l4_ports[idx]; if (!(l4->gameport = port = gameport_allocate_port())) { printk(KERN_ERR "lightning: Memory allocation failed\n"); while (--i >= 0) { gameport_free_port(l4->gameport); l4->gameport = NULL; } return -ENOMEM; } l4->port = idx; port->port_data = l4; port->open = l4_open; port->cooked_read = l4_cooked_read; port->calibrate = l4_calibrate; gameport_set_name(port, "PDPI Lightning 4"); gameport_set_phys(port, "isa%04x/gameport%d", L4_PORT, idx); if (idx == 0) port->io = L4_PORT; } return 0; } static int __init l4_add_card(int card_no) { int cal[4] = { 255, 255, 255, 255 }; int i, rev, result; struct l4 *l4; outb(L4_SELECT_ANALOG, L4_PORT); outb(L4_SELECT_DIGITAL + card_no, L4_PORT); if (inb(L4_PORT) & L4_BUSY) return -1; outb(L4_CMD_ID, L4_PORT); if (l4_wait_ready()) return -1; if (inb(L4_PORT) != L4_SELECT_DIGITAL + card_no) return -1; if (l4_wait_ready()) return -1; if (inb(L4_PORT) != L4_ID) return -1; if (l4_wait_ready()) return -1; rev = inb(L4_PORT); if (!rev) return -1; result = l4_create_ports(card_no); if (result) return result; printk(KERN_INFO "gameport: PDPI Lightning 4 %s card v%d.%d at %#x\n", card_no ? "secondary" : "primary", rev >> 4, rev, L4_PORT); for (i = 0; i < 4; i++) { l4 = &l4_ports[card_no * 4 + i]; if (rev > 0x28) /* on 2.9+ the setcal command works correctly */ l4_setcal(l4->port, cal); gameport_register_port(l4->gameport); } return 0; } static int __init l4_init(void) { int i, cards = 0; if (!request_region(L4_PORT, 1, "lightning")) return -EBUSY; for (i = 0; i < 2; i++) if (l4_add_card(i) == 0) cards++; outb(L4_SELECT_ANALOG, L4_PORT); if (!cards) { release_region(L4_PORT, 1); return -ENODEV; } return 0; } static void __exit l4_exit(void) { int i; int cal[4] = { 59, 59, 59, 59 }; for (i = 0; i < 8; i++) if (l4_ports[i].gameport) { l4_setcal(l4_ports[i].port, cal); gameport_unregister_port(l4_ports[i].gameport); } outb(L4_SELECT_ANALOG, L4_PORT); release_region(L4_PORT, 1); } module_init(l4_init); module_exit(l4_exit);
gpl-2.0
tejaswanjari/SMR_FS-EXT4
kernel/net/sched/cls_cgroup.c
408
5474
/* * net/sched/cls_cgroup.c Control Group Classifier * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Thomas Graf <tgraf@suug.ch> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/rcupdate.h> #include <net/rtnetlink.h> #include <net/pkt_cls.h> #include <net/sock.h> #include <net/cls_cgroup.h> struct cls_cgroup_head { u32 handle; struct tcf_exts exts; struct tcf_ematch_tree ematches; struct tcf_proto *tp; struct rcu_head rcu; }; static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); u32 classid; classid = task_cls_state(current)->classid; /* * Due to the nature of the classifier it is required to ignore all * packets originating from softirq context as accessing `current' * would lead to false results. * * This test assumes that all callers of dev_queue_xmit() explicitely * disable bh. Knowing this, it is possible to detect softirq based * calls by looking at the number of nested bh disable calls because * softirqs always disables bh. */ if (in_serving_softirq()) { /* If there is an sk_classid we'll use that. */ if (!skb->sk) return -1; classid = skb->sk->sk_classid; } if (!classid) return -1; if (!tcf_em_tree_match(skb, &head->ematches, NULL)) return -1; res->classid = classid; res->class = 0; return tcf_exts_exec(skb, &head->exts, res); } static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) { return 0UL; } static int cls_cgroup_init(struct tcf_proto *tp) { return 0; } static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = { [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, }; static void cls_cgroup_destroy_rcu(struct rcu_head *root) { struct cls_cgroup_head *head = container_of(root, struct cls_cgroup_head, rcu); tcf_exts_destroy(&head->exts); tcf_em_tree_destroy(&head->ematches); kfree(head); } static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, unsigned long *arg, bool ovr) { struct nlattr *tb[TCA_CGROUP_MAX + 1]; struct cls_cgroup_head *head = rtnl_dereference(tp->root); struct cls_cgroup_head *new; struct tcf_ematch_tree t; struct tcf_exts e; int err; if (!tca[TCA_OPTIONS]) return -EINVAL; if (!head && !handle) return -EINVAL; if (head && handle != head->handle) return -ENOENT; new = kzalloc(sizeof(*head), GFP_KERNEL); if (!new) return -ENOBUFS; tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); new->handle = handle; new->tp = tp; err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], cgroup_policy); if (err < 0) goto errout; tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); if (err < 0) goto errout; err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t); if (err < 0) { tcf_exts_destroy(&e); goto errout; } tcf_exts_change(tp, &new->exts, &e); tcf_em_tree_change(tp, &new->ematches, &t); rcu_assign_pointer(tp->root, new); if (head) call_rcu(&head->rcu, cls_cgroup_destroy_rcu); return 0; errout: kfree(new); return err; } static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force) { struct cls_cgroup_head *head = rtnl_dereference(tp->root); if (!force) return false; if (head) { RCU_INIT_POINTER(tp->root, NULL); call_rcu(&head->rcu, cls_cgroup_destroy_rcu); } return true; } static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg) { return -EOPNOTSUPP; } static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg) { struct cls_cgroup_head *head = rtnl_dereference(tp->root); if (arg->count < arg->skip) goto skip; if (arg->fn(tp, (unsigned long) head, arg) < 0) { arg->stop = 1; return; } skip: arg->count++; } static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { struct cls_cgroup_head *head = rtnl_dereference(tp->root); struct nlattr *nest; t->tcm_handle = head->handle; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; if (tcf_exts_dump(skb, &head->exts) < 0 || tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0) goto nla_put_failure; nla_nest_end(skb, nest); if (tcf_exts_dump_stats(skb, &head->exts) < 0) goto nla_put_failure; return skb->len; nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { .kind = "cgroup", .init = cls_cgroup_init, .change = cls_cgroup_change, .classify = cls_cgroup_classify, .destroy = cls_cgroup_destroy, .get = cls_cgroup_get, .delete = cls_cgroup_delete, .walk = cls_cgroup_walk, .dump = cls_cgroup_dump, .owner = THIS_MODULE, }; static int __init init_cgroup_cls(void) { return register_tcf_proto_ops(&cls_cgroup_ops); } static void __exit exit_cgroup_cls(void) { unregister_tcf_proto_ops(&cls_cgroup_ops); } module_init(init_cgroup_cls); module_exit(exit_cgroup_cls); MODULE_LICENSE("GPL");
gpl-2.0
issi5862/ishida_jbd2_linux-1.0
arch/arm/mach-tegra/cpuidle-tegra30.c
408
3441
/* * CPU idle driver for Tegra CPUs * * Copyright (c) 2010-2012, NVIDIA Corporation. * Copyright (c) 2011 Google, Inc. * Author: Colin Cross <ccross@android.com> * Gary King <gking@nvidia.com> * * Rework for 3.3 by Peter De Schrijver <pdeschrijver@nvidia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/clk/tegra.h> #include <linux/clockchips.h> #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include <linux/kernel.h> #include <linux/module.h> #include <asm/cpuidle.h> #include <asm/proc-fns.h> #include <asm/smp_plat.h> #include <asm/suspend.h> #include "pm.h" #include "sleep.h" #ifdef CONFIG_PM_SLEEP static int tegra30_idle_lp2(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); #endif static struct cpuidle_driver tegra_idle_driver = { .name = "tegra_idle", .owner = THIS_MODULE, #ifdef CONFIG_PM_SLEEP .state_count = 2, #else .state_count = 1, #endif .states = { [0] = ARM_CPUIDLE_WFI_STATE_PWR(600), #ifdef CONFIG_PM_SLEEP [1] = { .enter = tegra30_idle_lp2, .exit_latency = 2000, .target_residency = 2200, .power_usage = 0, .flags = CPUIDLE_FLAG_TIME_VALID, .name = "powered-down", .desc = "CPU power gated", }, #endif }, }; #ifdef CONFIG_PM_SLEEP static bool tegra30_cpu_cluster_power_down(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { /* All CPUs entering LP2 is not working. * Don't let CPU0 enter LP2 when any secondary CPU is online. */ if (num_online_cpus() > 1 || !tegra_cpu_rail_off_ready()) { cpu_do_idle(); return false; } clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); tegra_idle_lp2_last(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); return true; } #ifdef CONFIG_SMP static bool tegra30_cpu_core_power_down(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); smp_wmb(); cpu_suspend(0, tegra30_sleep_cpu_secondary_finish); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); return true; } #else static inline bool tegra30_cpu_core_power_down(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { return true; } #endif static int tegra30_idle_lp2(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { bool entered_lp2 = false; bool last_cpu; local_fiq_disable(); last_cpu = tegra_set_cpu_in_lp2(); cpu_pm_enter(); if (dev->cpu == 0) { if (last_cpu) entered_lp2 = tegra30_cpu_cluster_power_down(dev, drv, index); else cpu_do_idle(); } else { entered_lp2 = tegra30_cpu_core_power_down(dev, drv, index); } cpu_pm_exit(); tegra_clear_cpu_in_lp2(); local_fiq_enable(); smp_rmb(); return (entered_lp2) ? index : 0; } #endif int __init tegra30_cpuidle_init(void) { return cpuidle_register(&tegra_idle_driver, NULL); }
gpl-2.0
sivu/linux
arch/blackfin/mach-bf527/boards/cm_bf527.c
1944
21391
/* * Copyright 2004-2009 Analog Devices Inc. * 2008-2009 Bluetechnix * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/etherdevice.h> #include <linux/i2c.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/usb/musb.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/reboot.h> #include <asm/nand.h> #include <asm/portmux.h> #include <asm/dpmc.h> #include <linux/spi/ad7877.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "Bluetechnix CM-BF527"; /* * Driver needs to know address, irq and flag pin. */ #if IS_ENABLED(CONFIG_USB_ISP1760_HCD) #include <linux/usb/isp1760.h> static struct resource bfin_isp1760_resources[] = { [0] = { .start = 0x203C0000, .end = 0x203C0000 + 0x000fffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ, }, }; static struct isp1760_platform_data isp1760_priv = { .is_isp1761 = 0, .bus_width_16 = 1, .port1_otg = 0, .analog_oc = 0, .dack_polarity_high = 0, .dreq_polarity_high = 0, }; static struct platform_device bfin_isp1760_device = { .name = "isp1760", .id = 0, .dev = { .platform_data = &isp1760_priv, }, .num_resources = ARRAY_SIZE(bfin_isp1760_resources), .resource = bfin_isp1760_resources, }; #endif #if IS_ENABLED(CONFIG_USB_MUSB_HDRC) static struct resource musb_resources[] = { [0] = { .start = 0xffc03800, .end = 0xffc03cff, .flags = IORESOURCE_MEM, }, [1] = { /* general IRQ */ .start = IRQ_USB_INT0, .end = IRQ_USB_INT0, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, .name = "mc" }, [2] = { /* DMA IRQ */ .start = IRQ_USB_DMA, .end = IRQ_USB_DMA, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, .name = "dma" }, }; static struct musb_hdrc_config musb_config = { .multipoint = 0, .dyn_fifo = 0, .soft_con = 1, .dma = 1, .num_eps = 8, .dma_channels = 8, .gpio_vrsel = GPIO_PF11, /* Some custom boards need to be active low, just set it to "0" * if it is the case. */ .gpio_vrsel_active = 1, .clkin = 24, /* musb CLKIN in MHZ */ }; static struct musb_hdrc_platform_data musb_plat = { #if defined(CONFIG_USB_MUSB_OTG) .mode = MUSB_OTG, #elif defined(CONFIG_USB_MUSB_HDRC_HCD) .mode = MUSB_HOST, #elif defined(CONFIG_USB_GADGET_MUSB_HDRC) .mode = MUSB_PERIPHERAL, #endif .config = &musb_config, }; static u64 musb_dmamask = ~(u32)0; static struct platform_device musb_device = { .name = "musb-blackfin", .id = 0, .dev = { .dma_mask = &musb_dmamask, .coherent_dma_mask = 0xffffffff, .platform_data = &musb_plat, }, .num_resources = ARRAY_SIZE(musb_resources), .resource = musb_resources, }; #endif #if IS_ENABLED(CONFIG_MTD_NAND_BF5XX) static struct mtd_partition partition_info[] = { { .name = "linux kernel(nand)", .offset = 0, .size = 4 * 1024 * 1024, }, { .name = "file system(nand)", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct bf5xx_nand_platform bf5xx_nand_platform = { .data_width = NFC_NWIDTH_8, .partitions = partition_info, .nr_partitions = ARRAY_SIZE(partition_info), .rd_dly = 3, .wr_dly = 3, }; static struct resource bf5xx_nand_resources[] = { { .start = NFC_CTL, .end = NFC_DATA_RD + 2, .flags = IORESOURCE_MEM, }, { .start = CH_NFC, .end = CH_NFC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bf5xx_nand_device = { .name = "bf5xx-nand", .id = 0, .num_resources = ARRAY_SIZE(bf5xx_nand_resources), .resource = bf5xx_nand_resources, .dev = { .platform_data = &bf5xx_nand_platform, }, }; #endif #if IS_ENABLED(CONFIG_BFIN_CFPCMCIA) static struct resource bfin_pcmcia_cf_resources[] = { { .start = 0x20310000, /* IO PORT */ .end = 0x20312000, .flags = IORESOURCE_MEM, }, { .start = 0x20311000, /* Attribute Memory */ .end = 0x20311FFF, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF4, .end = IRQ_PF4, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, { .start = 6, /* Card Detect PF6 */ .end = 6, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_pcmcia_cf_device = { .name = "bfin_cf_pcmcia", .id = -1, .num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources), .resource = bfin_pcmcia_cf_resources, }; #endif #if IS_ENABLED(CONFIG_RTC_DRV_BFIN) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if IS_ENABLED(CONFIG_SMC91X) #include <linux/smc91x.h> static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { { .name = "smc91x-regs", .start = 0x20300300, .end = 0x20300300 + 16, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &smc91x_info, }, }; #endif #if IS_ENABLED(CONFIG_DM9000) static struct resource dm9000_resources[] = { [0] = { .start = 0x203FB800, .end = 0x203FB800 + 1, .flags = IORESOURCE_MEM, }, [1] = { .start = 0x203FB804, .end = 0x203FB804 + 1, .flags = IORESOURCE_MEM, }, [2] = { .start = IRQ_PF9, .end = IRQ_PF9, .flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE), }, }; static struct platform_device dm9000_device = { .name = "dm9000", .id = -1, .num_resources = ARRAY_SIZE(dm9000_resources), .resource = dm9000_resources, }; #endif #if IS_ENABLED(CONFIG_BFIN_MAC) #include <linux/bfin_mac.h> static const unsigned short bfin_mac_peripherals[] = P_RMII0; static struct bfin_phydev_platform_data bfin_phydev_data[] = { { .addr = 1, .irq = IRQ_MAC_PHYINT, }, }; static struct bfin_mii_bus_platform_data bfin_mii_bus_data = { .phydev_number = 1, .phydev_data = bfin_phydev_data, .phy_mode = PHY_INTERFACE_MODE_RMII, .mac_peripherals = bfin_mac_peripherals, }; static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", .dev = { .platform_data = &bfin_mii_bus_data, } }; static struct platform_device bfin_mac_device = { .name = "bfin_mac", .dev = { .platform_data = &bfin_mii_bus, } }; #endif #if IS_ENABLED(CONFIG_USB_NET2272) static struct resource net2272_bfin_resources[] = { { .start = 0x20300000, .end = 0x20300000 + 0x100, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device net2272_bfin_device = { .name = "net2272", .id = -1, .num_resources = ARRAY_SIZE(net2272_bfin_resources), .resource = net2272_bfin_resources, }; #endif #if IS_ENABLED(CONFIG_MTD_M25P80) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00040000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "m25p16", }; /* SPI flash chip (m25p64) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; #endif #if IS_ENABLED(CONFIG_MMC_SPI) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, }; #endif #if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877) static const struct ad7877_platform_data bfin_ad7877_ts_info = { .model = 7877, .vref_delay_usecs = 50, /* internal, no capacitor */ .x_plate_ohms = 419, .y_plate_ohms = 486, .pressure_max = 1000, .pressure_min = 0, .stopacq_polarity = 1, .first_conversion_delay = 3, .acquisition_time = 1, .averaging = 1, .pen_down_acc_interval = 1, }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if IS_ENABLED(CONFIG_MTD_M25P80) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X) { .modalias = "ad183x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 4, }, #endif #if IS_ENABLED(CONFIG_MMC_SPI) { .modalias = "mmc_spi", .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if IS_ENABLED(CONFIG_TOUCHSCREEN_AD7877) { .modalias = "ad7877", .platform_data = &bfin_ad7877_ts_info, .irq = IRQ_PF8, .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 2, }, #endif #if IS_ENABLED(CONFIG_SND_SOC_WM8731) \ && defined(CONFIG_SND_SOC_WM8731_SPI) { .modalias = "wm8731", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .mode = SPI_MODE_0, }, #endif #if IS_ENABLED(CONFIG_SPI_SPIDEV) { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, }, #endif }; #if IS_ENABLED(CONFIG_SPI_BFIN5XX) /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if IS_ENABLED(CONFIG_MTD_GPIO_ADDR) static struct mtd_partition cm_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0x100000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data cm_flash_data = { .width = 2, .parts = cm_partitions, .nr_parts = ARRAY_SIZE(cm_partitions), }; static unsigned cm_flash_gpios[] = { GPIO_PH9, GPIO_PG11 }; static struct resource cm_flash_resource[] = { { .name = "cfi_probe", .start = 0x20000000, .end = 0x201fffff, .flags = IORESOURCE_MEM, }, { .start = (unsigned long)cm_flash_gpios, .end = ARRAY_SIZE(cm_flash_gpios), .flags = IORESOURCE_IRQ, } }; static struct platform_device cm_flash_device = { .name = "gpio-addr-flash", .id = 0, .dev = { .platform_data = &cm_flash_data, }, .num_resources = ARRAY_SIZE(cm_flash_resource), .resource = cm_flash_resource, }; #endif #if IS_ENABLED(CONFIG_SERIAL_BFIN) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, #ifdef CONFIG_BFIN_UART1_CTSRTS { /* CTS pin */ .start = GPIO_PF9, .end = GPIO_PF9, .flags = IORESOURCE_IO, }, { /* RTS pin */ .start = GPIO_PF10, .end = GPIO_PF10, .flags = IORESOURCE_IO, }, #endif }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #endif #if IS_ENABLED(CONFIG_BFIN_SIR) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #endif #if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI) static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; static struct resource bfin_twi0_resource[] = { [0] = { .start = TWI0_REGBASE, .end = TWI0_REGBASE, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI, .end = IRQ_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi_device = { .name = "i2c-bfin-twi", .id = 0, .num_resources = ARRAY_SIZE(bfin_twi0_resource), .resource = bfin_twi0_resource, .dev = { .platform_data = &bfin_twi0_pins, }, }; #endif static struct i2c_board_info __initdata bfin_i2c_board_info[] = { #if IS_ENABLED(CONFIG_BFIN_TWI_LCD) { I2C_BOARD_INFO("pcf8574_lcd", 0x22), }, #endif #if IS_ENABLED(CONFIG_INPUT_PCF8574) { I2C_BOARD_INFO("pcf8574_keypad", 0x27), .irq = IRQ_PF8, }, #endif #if IS_ENABLED(CONFIG_FB_BFIN_7393) { I2C_BOARD_INFO("bfin-adv7393", 0x2B), }, #endif }; #if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART static struct resource bfin_sport0_uart_resources[] = { { .start = SPORT0_TCR1, .end = SPORT0_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT0_RX, .end = IRQ_SPORT0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport0_peripherals[] = { P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0 }; static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), .resource = bfin_sport0_uart_resources, .dev = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { .start = SPORT1_TCR1, .end = SPORT1_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT1_RX, .end = IRQ_SPORT1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport1_peripherals[] = { P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), .resource = bfin_sport1_uart_resources, .dev = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; #endif #endif #if IS_ENABLED(CONFIG_KEYBOARD_GPIO) #include <linux/input.h> #include <linux/gpio_keys.h> static struct gpio_keys_button bfin_gpio_keys_table[] = { {BTN_0, GPIO_PF14, 1, "gpio-keys: BTN0"}, }; static struct gpio_keys_platform_data bfin_gpio_keys_data = { .buttons = bfin_gpio_keys_table, .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table), }; static struct platform_device bfin_device_gpiokeys = { .name = "gpio-keys", .dev = { .platform_data = &bfin_gpio_keys_data, }, }; #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_100, 400000000), VRPAIR(VLEV_105, 426000000), VRPAIR(VLEV_110, 500000000), VRPAIR(VLEV_115, 533000000), VRPAIR(VLEV_120, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; static struct platform_device *cmbf527_devices[] __initdata = { &bfin_dpmc, #if IS_ENABLED(CONFIG_MTD_NAND_BF5XX) &bf5xx_nand_device, #endif #if IS_ENABLED(CONFIG_BFIN_CFPCMCIA) &bfin_pcmcia_cf_device, #endif #if IS_ENABLED(CONFIG_RTC_DRV_BFIN) &rtc_device, #endif #if IS_ENABLED(CONFIG_USB_ISP1760_HCD) &bfin_isp1760_device, #endif #if IS_ENABLED(CONFIG_USB_MUSB_HDRC) &musb_device, #endif #if IS_ENABLED(CONFIG_SMC91X) &smc91x_device, #endif #if IS_ENABLED(CONFIG_DM9000) &dm9000_device, #endif #if IS_ENABLED(CONFIG_BFIN_MAC) &bfin_mii_bus, &bfin_mac_device, #endif #if IS_ENABLED(CONFIG_USB_NET2272) &net2272_bfin_device, #endif #if IS_ENABLED(CONFIG_SPI_BFIN5XX) &bfin_spi0_device, #endif #if IS_ENABLED(CONFIG_SERIAL_BFIN) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if IS_ENABLED(CONFIG_BFIN_SIR) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #endif #if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI) &i2c_bfin_twi_device, #endif #if IS_ENABLED(CONFIG_SERIAL_BFIN_SPORT) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif #if IS_ENABLED(CONFIG_KEYBOARD_GPIO) &bfin_device_gpiokeys, #endif #if IS_ENABLED(CONFIG_MTD_GPIO_ADDR) &cm_flash_device, #endif }; static int __init cm_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); i2c_register_board_info(0, bfin_i2c_board_info, ARRAY_SIZE(bfin_i2c_board_info)); platform_add_devices(cmbf527_devices, ARRAY_SIZE(cmbf527_devices)); spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); return 0; } arch_initcall(cm_init); static struct platform_device *cmbf527_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(cmbf527_early_devices, ARRAY_SIZE(cmbf527_early_devices)); } void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ if ((bfin_read_SYSCR() & 0x7) == 0x3) bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); } int bfin_get_ether_addr(char *addr) { return 1; } EXPORT_SYMBOL(bfin_get_ether_addr);
gpl-2.0
adhi1419/MSM7627A
arch/parisc/math-emu/dfadd.c
1944
15801
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/dfadd.c $Revision: 1.1 $ * * Purpose: * Double_add: add two double precision values. * * External Interfaces: * dbl_fadd(leftptr, rightptr, dstptr, status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "dbl_float.h" /* * Double_add: add two double precision values. */ dbl_fadd( dbl_floating_point *leftptr, dbl_floating_point *rightptr, dbl_floating_point *dstptr, unsigned int *status) { register unsigned int signless_upper_left, signless_upper_right, save; register unsigned int leftp1, leftp2, rightp1, rightp2, extent; register unsigned int resultp1 = 0, resultp2 = 0; register int result_exponent, right_exponent, diff_exponent; register int sign_save, jumpsize; register boolean inexact = FALSE; register boolean underflowtrap; /* Create local copies of the numbers */ Dbl_copyfromptr(leftptr,leftp1,leftp2); Dbl_copyfromptr(rightptr,rightp1,rightp2); /* A zero "save" helps discover equal operands (for later), * * and is used in swapping operands (if needed). */ Dbl_xortointp1(leftp1,rightp1,/*to*/save); /* * check first operand for NaN's or infinity */ if ((result_exponent = Dbl_exponent(leftp1)) == DBL_INFINITY_EXPONENT) { if (Dbl_iszero_mantissa(leftp1,leftp2)) { if (Dbl_isnotnan(rightp1,rightp2)) { if (Dbl_isinfinity(rightp1,rightp2) && save!=0) { /* * invalid since operands are opposite signed infinity's */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Dbl_makequietnan(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * return infinity */ Dbl_copytoptr(leftp1,leftp2,dstptr); return(NOEXCEPTION); } } else { /* * is NaN; signaling or quiet? */ if (Dbl_isone_signaling(leftp1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Dbl_set_quiet(leftp1); } /* * is second operand a signaling NaN? */ else if (Dbl_is_signalingnan(rightp1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Dbl_set_quiet(rightp1); Dbl_copytoptr(rightp1,rightp2,dstptr); return(NOEXCEPTION); } /* * return quiet NaN */ Dbl_copytoptr(leftp1,leftp2,dstptr); return(NOEXCEPTION); } } /* End left NaN or Infinity processing */ /* * check second operand for NaN's or infinity */ if (Dbl_isinfinity_exponent(rightp1)) { if (Dbl_iszero_mantissa(rightp1,rightp2)) { /* return infinity */ Dbl_copytoptr(rightp1,rightp2,dstptr); return(NOEXCEPTION); } /* * is NaN; signaling or quiet? */ if (Dbl_isone_signaling(rightp1)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Dbl_set_quiet(rightp1); } /* * return quiet NaN */ Dbl_copytoptr(rightp1,rightp2,dstptr); return(NOEXCEPTION); } /* End right NaN or Infinity processing */ /* Invariant: Must be dealing with finite numbers */ /* Compare operands by removing the sign */ Dbl_copytoint_exponentmantissap1(leftp1,signless_upper_left); Dbl_copytoint_exponentmantissap1(rightp1,signless_upper_right); /* sign difference selects add or sub operation. */ if(Dbl_ismagnitudeless(leftp2,rightp2,signless_upper_left,signless_upper_right)) { /* Set the left operand to the larger one by XOR swap * * First finish the first word using "save" */ Dbl_xorfromintp1(save,rightp1,/*to*/rightp1); Dbl_xorfromintp1(save,leftp1,/*to*/leftp1); Dbl_swap_lower(leftp2,rightp2); result_exponent = Dbl_exponent(leftp1); } /* Invariant: left is not smaller than right. */ if((right_exponent = Dbl_exponent(rightp1)) == 0) { /* Denormalized operands. First look for zeroes */ if(Dbl_iszero_mantissa(rightp1,rightp2)) { /* right is zero */ if(Dbl_iszero_exponentmantissa(leftp1,leftp2)) { /* Both operands are zeros */ if(Is_rounding_mode(ROUNDMINUS)) { Dbl_or_signs(leftp1,/*with*/rightp1); } else { Dbl_and_signs(leftp1,/*with*/rightp1); } } else { /* Left is not a zero and must be the result. Trapped * underflows are signaled if left is denormalized. Result * is always exact. */ if( (result_exponent == 0) && Is_underflowtrap_enabled() ) { /* need to normalize results mantissa */ sign_save = Dbl_signextendedsign(leftp1); Dbl_leftshiftby1(leftp1,leftp2); Dbl_normalize(leftp1,leftp2,result_exponent); Dbl_set_sign(leftp1,/*using*/sign_save); Dbl_setwrapped_exponent(leftp1,result_exponent,unfl); Dbl_copytoptr(leftp1,leftp2,dstptr); /* inexact = FALSE */ return(UNDERFLOWEXCEPTION); } } Dbl_copytoptr(leftp1,leftp2,dstptr); return(NOEXCEPTION); } /* Neither are zeroes */ Dbl_clear_sign(rightp1); /* Exponent is already cleared */ if(result_exponent == 0 ) { /* Both operands are denormalized. The result must be exact * and is simply calculated. A sum could become normalized and a * difference could cancel to a true zero. */ if( (/*signed*/int) save < 0 ) { Dbl_subtract(leftp1,leftp2,/*minus*/rightp1,rightp2, /*into*/resultp1,resultp2); if(Dbl_iszero_mantissa(resultp1,resultp2)) { if(Is_rounding_mode(ROUNDMINUS)) { Dbl_setone_sign(resultp1); } else { Dbl_setzero_sign(resultp1); } Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } } else { Dbl_addition(leftp1,leftp2,rightp1,rightp2, /*into*/resultp1,resultp2); if(Dbl_isone_hidden(resultp1)) { Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } } if(Is_underflowtrap_enabled()) { /* need to normalize result */ sign_save = Dbl_signextendedsign(resultp1); Dbl_leftshiftby1(resultp1,resultp2); Dbl_normalize(resultp1,resultp2,result_exponent); Dbl_set_sign(resultp1,/*using*/sign_save); Dbl_setwrapped_exponent(resultp1,result_exponent,unfl); Dbl_copytoptr(resultp1,resultp2,dstptr); /* inexact = FALSE */ return(UNDERFLOWEXCEPTION); } Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } right_exponent = 1; /* Set exponent to reflect different bias * with denomalized numbers. */ } else { Dbl_clear_signexponent_set_hidden(rightp1); } Dbl_clear_exponent_set_hidden(leftp1); diff_exponent = result_exponent - right_exponent; /* * Special case alignment of operands that would force alignment * beyond the extent of the extension. A further optimization * could special case this but only reduces the path length for this * infrequent case. */ if(diff_exponent > DBL_THRESHOLD) { diff_exponent = DBL_THRESHOLD; } /* Align right operand by shifting to right */ Dbl_right_align(/*operand*/rightp1,rightp2,/*shifted by*/diff_exponent, /*and lower to*/extent); /* Treat sum and difference of the operands separately. */ if( (/*signed*/int) save < 0 ) { /* * Difference of the two operands. Their can be no overflow. A * borrow can occur out of the hidden bit and force a post * normalization phase. */ Dbl_subtract_withextension(leftp1,leftp2,/*minus*/rightp1,rightp2, /*with*/extent,/*into*/resultp1,resultp2); if(Dbl_iszero_hidden(resultp1)) { /* Handle normalization */ /* A straight foward algorithm would now shift the result * and extension left until the hidden bit becomes one. Not * all of the extension bits need participate in the shift. * Only the two most significant bits (round and guard) are * needed. If only a single shift is needed then the guard * bit becomes a significant low order bit and the extension * must participate in the rounding. If more than a single * shift is needed, then all bits to the right of the guard * bit are zeros, and the guard bit may or may not be zero. */ sign_save = Dbl_signextendedsign(resultp1); Dbl_leftshiftby1_withextent(resultp1,resultp2,extent,resultp1,resultp2); /* Need to check for a zero result. The sign and exponent * fields have already been zeroed. The more efficient test * of the full object can be used. */ if(Dbl_iszero(resultp1,resultp2)) /* Must have been "x-x" or "x+(-x)". */ { if(Is_rounding_mode(ROUNDMINUS)) Dbl_setone_sign(resultp1); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } result_exponent--; /* Look to see if normalization is finished. */ if(Dbl_isone_hidden(resultp1)) { if(result_exponent==0) { /* Denormalized, exponent should be zero. Left operand * * was normalized, so extent (guard, round) was zero */ goto underflow; } else { /* No further normalization is needed. */ Dbl_set_sign(resultp1,/*using*/sign_save); Ext_leftshiftby1(extent); goto round; } } /* Check for denormalized, exponent should be zero. Left * * operand was normalized, so extent (guard, round) was zero */ if(!(underflowtrap = Is_underflowtrap_enabled()) && result_exponent==0) goto underflow; /* Shift extension to complete one bit of normalization and * update exponent. */ Ext_leftshiftby1(extent); /* Discover first one bit to determine shift amount. Use a * modified binary search. We have already shifted the result * one position right and still not found a one so the remainder * of the extension must be zero and simplifies rounding. */ /* Scan bytes */ while(Dbl_iszero_hiddenhigh7mantissa(resultp1)) { Dbl_leftshiftby8(resultp1,resultp2); if((result_exponent -= 8) <= 0 && !underflowtrap) goto underflow; } /* Now narrow it down to the nibble */ if(Dbl_iszero_hiddenhigh3mantissa(resultp1)) { /* The lower nibble contains the normalizing one */ Dbl_leftshiftby4(resultp1,resultp2); if((result_exponent -= 4) <= 0 && !underflowtrap) goto underflow; } /* Select case were first bit is set (already normalized) * otherwise select the proper shift. */ if((jumpsize = Dbl_hiddenhigh3mantissa(resultp1)) > 7) { /* Already normalized */ if(result_exponent <= 0) goto underflow; Dbl_set_sign(resultp1,/*using*/sign_save); Dbl_set_exponent(resultp1,/*using*/result_exponent); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } Dbl_sethigh4bits(resultp1,/*using*/sign_save); switch(jumpsize) { case 1: { Dbl_leftshiftby3(resultp1,resultp2); result_exponent -= 3; break; } case 2: case 3: { Dbl_leftshiftby2(resultp1,resultp2); result_exponent -= 2; break; } case 4: case 5: case 6: case 7: { Dbl_leftshiftby1(resultp1,resultp2); result_exponent -= 1; break; } } if(result_exponent > 0) { Dbl_set_exponent(resultp1,/*using*/result_exponent); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); /* Sign bit is already set */ } /* Fixup potential underflows */ underflow: if(Is_underflowtrap_enabled()) { Dbl_set_sign(resultp1,sign_save); Dbl_setwrapped_exponent(resultp1,result_exponent,unfl); Dbl_copytoptr(resultp1,resultp2,dstptr); /* inexact = FALSE */ return(UNDERFLOWEXCEPTION); } /* * Since we cannot get an inexact denormalized result, * we can now return. */ Dbl_fix_overshift(resultp1,resultp2,(1-result_exponent),extent); Dbl_clear_signexponent(resultp1); Dbl_set_sign(resultp1,sign_save); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* end if(hidden...)... */ /* Fall through and round */ } /* end if(save < 0)... */ else { /* Add magnitudes */ Dbl_addition(leftp1,leftp2,rightp1,rightp2,/*to*/resultp1,resultp2); if(Dbl_isone_hiddenoverflow(resultp1)) { /* Prenormalization required. */ Dbl_rightshiftby1_withextent(resultp2,extent,extent); Dbl_arithrightshiftby1(resultp1,resultp2); result_exponent++; } /* end if hiddenoverflow... */ } /* end else ...add magnitudes... */ /* Round the result. If the extension is all zeros,then the result is * exact. Otherwise round in the correct direction. No underflow is * possible. If a postnormalization is necessary, then the mantissa is * all zeros so no shift is needed. */ round: if(Ext_isnotzero(extent)) { inexact = TRUE; switch(Rounding_mode()) { case ROUNDNEAREST: /* The default. */ if(Ext_isone_sign(extent)) { /* at least 1/2 ulp */ if(Ext_isnotzero_lower(extent) || Dbl_isone_lowmantissap2(resultp2)) { /* either exactly half way and odd or more than 1/2ulp */ Dbl_increment(resultp1,resultp2); } } break; case ROUNDPLUS: if(Dbl_iszero_sign(resultp1)) { /* Round up positive results */ Dbl_increment(resultp1,resultp2); } break; case ROUNDMINUS: if(Dbl_isone_sign(resultp1)) { /* Round down negative results */ Dbl_increment(resultp1,resultp2); } case ROUNDZERO:; /* truncate is simple */ } /* end switch... */ if(Dbl_isone_hiddenoverflow(resultp1)) result_exponent++; } if(result_exponent == DBL_INFINITY_EXPONENT) { /* Overflow */ if(Is_overflowtrap_enabled()) { Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl); Dbl_copytoptr(resultp1,resultp2,dstptr); if (inexact) if (Is_inexacttrap_enabled()) return(OVERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return(OVERFLOWEXCEPTION); } else { inexact = TRUE; Set_overflowflag(); Dbl_setoverflow(resultp1,resultp2); } } else Dbl_set_exponent(resultp1,result_exponent); Dbl_copytoptr(resultp1,resultp2,dstptr); if(inexact) if(Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); return(NOEXCEPTION); }
gpl-2.0
sunnyden/ubuntu_kernel
arch/blackfin/mach-bf533/boards/H8606.c
1944
10101
/* * Copyright 2004-2009 Analog Devices Inc. * 2007-2008 HV Sistemas S.L. * Javier Herrero <jherrero@hvsistemas.es> * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #if IS_ENABLED(CONFIG_USB_ISP1362_HCD) #include <linux/usb/isp1362.h> #endif #include <linux/irq.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/reboot.h> #include <asm/portmux.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "HV Sistemas H8606"; #if IS_ENABLED(CONFIG_RTC_DRV_BFIN) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif /* * Driver needs to know address, irq and flag pin. */ #if IS_ENABLED(CONFIG_DM9000) static struct resource dm9000_resources[] = { [0] = { .start = 0x20300000, .end = 0x20300002, .flags = IORESOURCE_MEM, }, [1] = { .start = 0x20300004, .end = 0x20300006, .flags = IORESOURCE_MEM, }, [2] = { .start = IRQ_PF10, .end = IRQ_PF10, .flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_SHAREABLE), }, }; static struct platform_device dm9000_device = { .id = 0, .name = "dm9000", .resource = dm9000_resources, .num_resources = ARRAY_SIZE(dm9000_resources), }; #endif #if IS_ENABLED(CONFIG_SMC91X) #include <linux/smc91x.h> static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { { .name = "smc91x-regs", .start = 0x20300300, .end = 0x20300300 + 16, .flags = IORESOURCE_MEM, }, { .start = IRQ_PROG_INTB, .end = IRQ_PROG_INTB, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &smc91x_info, }, }; #endif #if IS_ENABLED(CONFIG_USB_NET2272) static struct resource net2272_bfin_resources[] = { { .start = 0x20300000, .end = 0x20300000 + 0x100, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF10, .end = IRQ_PF10, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device net2272_bfin_device = { .name = "net2272", .id = -1, .num_resources = ARRAY_SIZE(net2272_bfin_resources), .resource = net2272_bfin_resources, }; #endif #if IS_ENABLED(CONFIG_SPI_BFIN5XX) /* all SPI peripherals info goes here */ #if IS_ENABLED(CONFIG_MTD_M25P80) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader (spi)", .size = 0x40000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "fpga (spi)", .size = 0x30000, .offset = 0x40000 }, { .name = "linux kernel (spi)", .size = 0x150000, .offset = 0x70000 }, { .name = "jffs2 root file system (spi)", .size = 0x640000, .offset = 0x1c0000, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "m25p64", }; /* SPI flash chip (m25p64) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; #endif /* Notice: for blackfin, the speed_hz is the value of register * SPI_BAUD, not the real baudrate */ static struct spi_board_info bfin_spi_board_info[] __initdata = { #if IS_ENABLED(CONFIG_MTD_M25P80) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ /* this value is the baudrate divisor */ .max_speed_hz = 50000000, /* actual baudrate is SCLK/(2xspeed_hz) */ .bus_num = 0, /* Framework bus number */ .chip_select = 2, /* Framework chip select. On STAMP537 it is SPISSEL2*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if IS_ENABLED(CONFIG_SND_BF5XX_SOC_AD183X) { .modalias = "ad183x", .max_speed_hz = 16, .bus_num = 1, .chip_select = 4, }, #endif }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, } }; /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if IS_ENABLED(CONFIG_SERIAL_BFIN) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = BFIN_UART_THR, .end = BFIN_UART_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #endif #if IS_ENABLED(CONFIG_BFIN_SIR) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #endif #if IS_ENABLED(CONFIG_SERIAL_8250) #include <linux/serial_8250.h> #include <linux/serial.h> /* * Configuration for two 16550 UARTS in FPGA at addresses 0x20200000 and 0x202000010. * running at half system clock, both with interrupt output or-ed to PF8. Change to * suit different FPGA configuration, or to suit real 16550 UARTS connected to the bus */ static struct plat_serial8250_port serial8250_platform_data [] = { { .membase = (void *)0x20200000, .mapbase = 0x20200000, .irq = IRQ_PF8, .irqflags = IRQF_TRIGGER_HIGH, .flags = UPF_BOOT_AUTOCONF | UART_CONFIG_TYPE, .iotype = UPIO_MEM, .regshift = 1, .uartclk = 66666667, }, { .membase = (void *)0x20200010, .mapbase = 0x20200010, .irq = IRQ_PF8, .irqflags = IRQF_TRIGGER_HIGH, .flags = UPF_BOOT_AUTOCONF | UART_CONFIG_TYPE, .iotype = UPIO_MEM, .regshift = 1, .uartclk = 66666667, }, { } }; static struct platform_device serial8250_device = { .id = PLAT8250_DEV_PLATFORM, .name = "serial8250", .dev = { .platform_data = serial8250_platform_data, }, }; #endif #if IS_ENABLED(CONFIG_KEYBOARD_OPENCORES) /* * Configuration for one OpenCores keyboard controller in FPGA at address 0x20200030, * interrupt output wired to PF9. Change to suit different FPGA configuration */ static struct resource opencores_kbd_resources[] = { [0] = { .start = 0x20200030, .end = 0x20300030 + 2, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_PF9, .end = IRQ_PF9, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; static struct platform_device opencores_kbd_device = { .id = -1, .name = "opencores-kbd", .resource = opencores_kbd_resources, .num_resources = ARRAY_SIZE(opencores_kbd_resources), }; #endif static struct platform_device *h8606_devices[] __initdata = { #if IS_ENABLED(CONFIG_RTC_DRV_BFIN) &rtc_device, #endif #if IS_ENABLED(CONFIG_DM9000) &dm9000_device, #endif #if IS_ENABLED(CONFIG_SMC91X) &smc91x_device, #endif #if IS_ENABLED(CONFIG_USB_NET2272) &net2272_bfin_device, #endif #if IS_ENABLED(CONFIG_SPI_BFIN5XX) &bfin_spi0_device, #endif #if IS_ENABLED(CONFIG_SERIAL_BFIN) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #endif #if IS_ENABLED(CONFIG_SERIAL_8250) &serial8250_device, #endif #if IS_ENABLED(CONFIG_BFIN_SIR) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #endif #if IS_ENABLED(CONFIG_KEYBOARD_OPENCORES) &opencores_kbd_device, #endif }; static int __init H8606_init(void) { printk(KERN_INFO "HV Sistemas H8606 board support by http://www.hvsistemas.com\n"); printk(KERN_INFO "%s(): registering device resources\n", __func__); platform_add_devices(h8606_devices, ARRAY_SIZE(h8606_devices)); #if IS_ENABLED(CONFIG_SPI_BFIN5XX) spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); #endif return 0; } arch_initcall(H8606_init); static struct platform_device *H8606_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(H8606_early_devices, ARRAY_SIZE(H8606_early_devices)); }
gpl-2.0
alexax66/LP-Kernel-SM-E500H
net/xfrm/xfrm_replay.c
2200
14592
/* * xfrm_replay.c - xfrm replay detection, derived from xfrm_state.c. * * Copyright (C) 2010 secunet Security Networks AG * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/export.h> #include <net/xfrm.h> u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq) { u32 seq, seq_hi, bottom; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; if (!(x->props.flags & XFRM_STATE_ESN)) return 0; seq = ntohl(net_seq); seq_hi = replay_esn->seq_hi; bottom = replay_esn->seq - replay_esn->replay_window + 1; if (likely(replay_esn->seq >= replay_esn->replay_window - 1)) { /* A. same subspace */ if (unlikely(seq < bottom)) seq_hi++; } else { /* B. window spans two subspaces */ if (unlikely(seq >= bottom)) seq_hi--; } return seq_hi; } static void xfrm_replay_notify(struct xfrm_state *x, int event) { struct km_event c; /* we send notify messages in case * 1. we updated on of the sequence numbers, and the seqno difference * is at least x->replay_maxdiff, in this case we also update the * timeout of our timer function * 2. if x->replay_maxage has elapsed since last update, * and there were changes * * The state structure must be locked! */ switch (event) { case XFRM_REPLAY_UPDATE: if (x->replay_maxdiff && (x->replay.seq - x->preplay.seq < x->replay_maxdiff) && (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) { if (x->xflags & XFRM_TIME_DEFER) event = XFRM_REPLAY_TIMEOUT; else return; } break; case XFRM_REPLAY_TIMEOUT: if (memcmp(&x->replay, &x->preplay, sizeof(struct xfrm_replay_state)) == 0) { x->xflags |= XFRM_TIME_DEFER; return; } break; } memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state)); c.event = XFRM_MSG_NEWAE; c.data.aevent = event; km_state_notify(x, &c); if (x->replay_maxage && !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) x->xflags &= ~XFRM_TIME_DEFER; } static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb) { int err = 0; struct net *net = xs_net(x); if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq; if (unlikely(x->replay.oseq == 0)) { x->replay.oseq--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; return err; } if (xfrm_aevent_is_on(net)) x->repl->notify(x, XFRM_REPLAY_UPDATE); } return err; } static int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq) { u32 diff; u32 seq = ntohl(net_seq); if (!x->props.replay_window) return 0; if (unlikely(seq == 0)) goto err; if (likely(seq > x->replay.seq)) return 0; diff = x->replay.seq - seq; if (diff >= min_t(unsigned int, x->props.replay_window, sizeof(x->replay.bitmap) * 8)) { x->stats.replay_window++; goto err; } if (x->replay.bitmap & (1U << diff)) { x->stats.replay++; goto err; } return 0; err: xfrm_audit_state_replay(x, skb, net_seq); return -EINVAL; } static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq) { u32 diff; u32 seq = ntohl(net_seq); if (!x->props.replay_window) return; if (seq > x->replay.seq) { diff = seq - x->replay.seq; if (diff < x->props.replay_window) x->replay.bitmap = ((x->replay.bitmap) << diff) | 1; else x->replay.bitmap = 1; x->replay.seq = seq; } else { diff = x->replay.seq - seq; x->replay.bitmap |= (1U << diff); } if (xfrm_aevent_is_on(xs_net(x))) x->repl->notify(x, XFRM_REPLAY_UPDATE); } static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb) { int err = 0; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; struct net *net = xs_net(x); if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; if (unlikely(replay_esn->oseq == 0)) { replay_esn->oseq--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; return err; } if (xfrm_aevent_is_on(net)) x->repl->notify(x, XFRM_REPLAY_UPDATE); } return err; } static int xfrm_replay_check_bmp(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq) { unsigned int bitnr, nr; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; u32 pos; u32 seq = ntohl(net_seq); u32 diff = replay_esn->seq - seq; if (!replay_esn->replay_window) return 0; if (unlikely(seq == 0)) goto err; if (likely(seq > replay_esn->seq)) return 0; if (diff >= replay_esn->replay_window) { x->stats.replay_window++; goto err; } pos = (replay_esn->seq - 1) % replay_esn->replay_window; if (pos >= diff) bitnr = (pos - diff) % replay_esn->replay_window; else bitnr = replay_esn->replay_window - (diff - pos); nr = bitnr >> 5; bitnr = bitnr & 0x1F; if (replay_esn->bmp[nr] & (1U << bitnr)) goto err_replay; return 0; err_replay: x->stats.replay++; err: xfrm_audit_state_replay(x, skb, net_seq); return -EINVAL; } static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq) { unsigned int bitnr, nr, i; u32 diff; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; u32 seq = ntohl(net_seq); u32 pos; if (!replay_esn->replay_window) return; pos = (replay_esn->seq - 1) % replay_esn->replay_window; if (seq > replay_esn->seq) { diff = seq - replay_esn->seq; if (diff < replay_esn->replay_window) { for (i = 1; i < diff; i++) { bitnr = (pos + i) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] &= ~(1U << bitnr); } } else { nr = (replay_esn->replay_window - 1) >> 5; for (i = 0; i <= nr; i++) replay_esn->bmp[i] = 0; } bitnr = (pos + diff) % replay_esn->replay_window; replay_esn->seq = seq; } else { diff = replay_esn->seq - seq; if (pos >= diff) bitnr = (pos - diff) % replay_esn->replay_window; else bitnr = replay_esn->replay_window - (diff - pos); } nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); if (xfrm_aevent_is_on(xs_net(x))) x->repl->notify(x, XFRM_REPLAY_UPDATE); } static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) { struct km_event c; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn; /* we send notify messages in case * 1. we updated on of the sequence numbers, and the seqno difference * is at least x->replay_maxdiff, in this case we also update the * timeout of our timer function * 2. if x->replay_maxage has elapsed since last update, * and there were changes * * The state structure must be locked! */ switch (event) { case XFRM_REPLAY_UPDATE: if (x->replay_maxdiff && (replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) && (replay_esn->oseq - preplay_esn->oseq < x->replay_maxdiff)) { if (x->xflags & XFRM_TIME_DEFER) event = XFRM_REPLAY_TIMEOUT; else return; } break; case XFRM_REPLAY_TIMEOUT: if (memcmp(x->replay_esn, x->preplay_esn, xfrm_replay_state_esn_len(replay_esn)) == 0) { x->xflags |= XFRM_TIME_DEFER; return; } break; } memcpy(x->preplay_esn, x->replay_esn, xfrm_replay_state_esn_len(replay_esn)); c.event = XFRM_MSG_NEWAE; c.data.aevent = event; km_state_notify(x, &c); if (x->replay_maxage && !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) x->xflags &= ~XFRM_TIME_DEFER; } static void xfrm_replay_notify_esn(struct xfrm_state *x, int event) { u32 seq_diff, oseq_diff; struct km_event c; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn; /* we send notify messages in case * 1. we updated on of the sequence numbers, and the seqno difference * is at least x->replay_maxdiff, in this case we also update the * timeout of our timer function * 2. if x->replay_maxage has elapsed since last update, * and there were changes * * The state structure must be locked! */ switch (event) { case XFRM_REPLAY_UPDATE: if (!x->replay_maxdiff) break; if (replay_esn->seq_hi == preplay_esn->seq_hi) seq_diff = replay_esn->seq - preplay_esn->seq; else seq_diff = ~preplay_esn->seq + replay_esn->seq + 1; if (replay_esn->oseq_hi == preplay_esn->oseq_hi) oseq_diff = replay_esn->oseq - preplay_esn->oseq; else oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1; if (seq_diff < x->replay_maxdiff && oseq_diff < x->replay_maxdiff) { if (x->xflags & XFRM_TIME_DEFER) event = XFRM_REPLAY_TIMEOUT; else return; } break; case XFRM_REPLAY_TIMEOUT: if (memcmp(x->replay_esn, x->preplay_esn, xfrm_replay_state_esn_len(replay_esn)) == 0) { x->xflags |= XFRM_TIME_DEFER; return; } break; } memcpy(x->preplay_esn, x->replay_esn, xfrm_replay_state_esn_len(replay_esn)); c.event = XFRM_MSG_NEWAE; c.data.aevent = event; km_state_notify(x, &c); if (x->replay_maxage && !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) x->xflags &= ~XFRM_TIME_DEFER; } static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) { int err = 0; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; struct net *net = xs_net(x); if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; XFRM_SKB_CB(skb)->seq.output.hi = replay_esn->oseq_hi; if (unlikely(replay_esn->oseq == 0)) { XFRM_SKB_CB(skb)->seq.output.hi = ++replay_esn->oseq_hi; if (replay_esn->oseq_hi == 0) { replay_esn->oseq--; replay_esn->oseq_hi--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; return err; } } if (xfrm_aevent_is_on(net)) x->repl->notify(x, XFRM_REPLAY_UPDATE); } return err; } static int xfrm_replay_check_esn(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq) { unsigned int bitnr, nr; u32 diff; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; u32 pos; u32 seq = ntohl(net_seq); u32 wsize = replay_esn->replay_window; u32 top = replay_esn->seq; u32 bottom = top - wsize + 1; if (!wsize) return 0; if (unlikely(seq == 0 && replay_esn->seq_hi == 0 && (replay_esn->seq < replay_esn->replay_window - 1))) goto err; diff = top - seq; if (likely(top >= wsize - 1)) { /* A. same subspace */ if (likely(seq > top) || seq < bottom) return 0; } else { /* B. window spans two subspaces */ if (likely(seq > top && seq < bottom)) return 0; if (seq >= bottom) diff = ~seq + top + 1; } if (diff >= replay_esn->replay_window) { x->stats.replay_window++; goto err; } pos = (replay_esn->seq - 1) % replay_esn->replay_window; if (pos >= diff) bitnr = (pos - diff) % replay_esn->replay_window; else bitnr = replay_esn->replay_window - (diff - pos); nr = bitnr >> 5; bitnr = bitnr & 0x1F; if (replay_esn->bmp[nr] & (1U << bitnr)) goto err_replay; return 0; err_replay: x->stats.replay++; err: xfrm_audit_state_replay(x, skb, net_seq); return -EINVAL; } static int xfrm_replay_recheck_esn(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq) { if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi != htonl(xfrm_replay_seqhi(x, net_seq)))) { x->stats.replay_window++; return -EINVAL; } return xfrm_replay_check_esn(x, skb, net_seq); } static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) { unsigned int bitnr, nr, i; int wrap; u32 diff, pos, seq, seq_hi; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; if (!replay_esn->replay_window) return; seq = ntohl(net_seq); pos = (replay_esn->seq - 1) % replay_esn->replay_window; seq_hi = xfrm_replay_seqhi(x, net_seq); wrap = seq_hi - replay_esn->seq_hi; if ((!wrap && seq > replay_esn->seq) || wrap > 0) { if (likely(!wrap)) diff = seq - replay_esn->seq; else diff = ~replay_esn->seq + seq + 1; if (diff < replay_esn->replay_window) { for (i = 1; i < diff; i++) { bitnr = (pos + i) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] &= ~(1U << bitnr); } } else { nr = (replay_esn->replay_window - 1) >> 5; for (i = 0; i <= nr; i++) replay_esn->bmp[i] = 0; } bitnr = (pos + diff) % replay_esn->replay_window; replay_esn->seq = seq; if (unlikely(wrap > 0)) replay_esn->seq_hi++; } else { diff = replay_esn->seq - seq; if (pos >= diff) bitnr = (pos - diff) % replay_esn->replay_window; else bitnr = replay_esn->replay_window - (diff - pos); } nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); if (xfrm_aevent_is_on(xs_net(x))) x->repl->notify(x, XFRM_REPLAY_UPDATE); } static struct xfrm_replay xfrm_replay_legacy = { .advance = xfrm_replay_advance, .check = xfrm_replay_check, .recheck = xfrm_replay_check, .notify = xfrm_replay_notify, .overflow = xfrm_replay_overflow, }; static struct xfrm_replay xfrm_replay_bmp = { .advance = xfrm_replay_advance_bmp, .check = xfrm_replay_check_bmp, .recheck = xfrm_replay_check_bmp, .notify = xfrm_replay_notify_bmp, .overflow = xfrm_replay_overflow_bmp, }; static struct xfrm_replay xfrm_replay_esn = { .advance = xfrm_replay_advance_esn, .check = xfrm_replay_check_esn, .recheck = xfrm_replay_recheck_esn, .notify = xfrm_replay_notify_esn, .overflow = xfrm_replay_overflow_esn, }; int xfrm_init_replay(struct xfrm_state *x) { struct xfrm_replay_state_esn *replay_esn = x->replay_esn; if (replay_esn) { if (replay_esn->replay_window > replay_esn->bmp_len * sizeof(__u32) * 8) return -EINVAL; if (x->props.flags & XFRM_STATE_ESN) { if (replay_esn->replay_window == 0) return -EINVAL; x->repl = &xfrm_replay_esn; } else x->repl = &xfrm_replay_bmp; } else x->repl = &xfrm_replay_legacy; return 0; } EXPORT_SYMBOL(xfrm_init_replay);
gpl-2.0
mixianghang/mptcp
arch/x86/kernel/step.c
3480
6073
/* * x86 single-step support code, common to 32-bit and 64-bit. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <asm/desc.h> unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) { unsigned long addr, seg; addr = regs->ip; seg = regs->cs & 0xffff; if (v8086_mode(regs)) { addr = (addr & 0xffff) + (seg << 4); return addr; } /* * We'll assume that the code segments in the GDT * are all zero-based. That is largely true: the * TLS segments are used for data, and the PNPBIOS * and APM bios ones we just ignore here. */ if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc; unsigned long base; seg &= ~7UL; mutex_lock(&child->mm->context.lock); if (unlikely((seg >> 3) >= child->mm->context.size)) addr = -1L; /* bogus selector, access would fault */ else { desc = child->mm->context.ldt + seg; base = get_desc_base(desc); /* 16-bit code segment? */ if (!desc->d) addr &= 0xffff; addr += base; } mutex_unlock(&child->mm->context.lock); } return addr; } static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) { int i, copied; unsigned char opcode[15]; unsigned long addr = convert_ip_to_linear(child, regs); copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); for (i = 0; i < copied; i++) { switch (opcode[i]) { /* popf and iret */ case 0x9d: case 0xcf: return 1; /* CHECKME: 64 65 */ /* opcode and address size prefixes */ case 0x66: case 0x67: continue; /* irrelevant prefixes (segment overrides and repeats) */ case 0x26: case 0x2e: case 0x36: case 0x3e: case 0x64: case 0x65: case 0xf0: case 0xf2: case 0xf3: continue; #ifdef CONFIG_X86_64 case 0x40 ... 0x4f: if (!user_64bit_mode(regs)) /* 32-bit mode: register increment */ return 0; /* 64-bit mode: REX prefix */ continue; #endif /* CHECKME: f2, f3 */ /* * pushf: NOTE! We should probably not let * the user see the TF bit being set. But * it's more pain than it's worth to avoid * it, and a debugger could emulate this * all in user space if it _really_ cares. */ case 0x9c: default: return 0; } } return 0; } /* * Enable single-stepping. Return nonzero if user mode is not using TF itself. */ static int enable_single_step(struct task_struct *child) { struct pt_regs *regs = task_pt_regs(child); unsigned long oflags; /* * If we stepped into a sysenter/syscall insn, it trapped in * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. * If user-mode had set TF itself, then it's still clear from * do_debug() and we need to set it again to restore the user * state so we don't wrongly set TIF_FORCED_TF below. * If enable_single_step() was used last and that is what * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are * already set and our bookkeeping is fine. */ if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP))) regs->flags |= X86_EFLAGS_TF; /* * Always set TIF_SINGLESTEP - this guarantees that * we single-step system calls etc.. This will also * cause us to set TF when returning to user mode. */ set_tsk_thread_flag(child, TIF_SINGLESTEP); oflags = regs->flags; /* Set TF on the kernel stack.. */ regs->flags |= X86_EFLAGS_TF; /* * ..but if TF is changed by the instruction we will trace, * don't mark it as being "us" that set it, so that we * won't clear it by hand later. * * Note that if we don't actually execute the popf because * of a signal arriving right now or suchlike, we will lose * track of the fact that it really was "us" that set it. */ if (is_setting_trap_flag(child, regs)) { clear_tsk_thread_flag(child, TIF_FORCED_TF); return 0; } /* * If TF was already set, check whether it was us who set it. * If not, we should never attempt a block step. */ if (oflags & X86_EFLAGS_TF) return test_tsk_thread_flag(child, TIF_FORCED_TF); set_tsk_thread_flag(child, TIF_FORCED_TF); return 1; } void set_task_blockstep(struct task_struct *task, bool on) { unsigned long debugctl; /* * Ensure irq/preemption can't change debugctl in between. * Note also that both TIF_BLOCKSTEP and debugctl should * be changed atomically wrt preemption. * * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if * task is current or it can't be running, otherwise we can race * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but * PTRACE_KILL is not safe. */ local_irq_disable(); debugctl = get_debugctlmsr(); if (on) { debugctl |= DEBUGCTLMSR_BTF; set_tsk_thread_flag(task, TIF_BLOCKSTEP); } else { debugctl &= ~DEBUGCTLMSR_BTF; clear_tsk_thread_flag(task, TIF_BLOCKSTEP); } if (task == current) update_debugctlmsr(debugctl); local_irq_enable(); } /* * Enable single or block step. */ static void enable_step(struct task_struct *child, bool block) { /* * Make sure block stepping (BTF) is not enabled unless it should be. * Note that we don't try to worry about any is_setting_trap_flag() * instructions after the first when using block stepping. * So no one should try to use debugger block stepping in a program * that uses user-mode single stepping itself. */ if (enable_single_step(child) && block) set_task_blockstep(child, true); else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) set_task_blockstep(child, false); } void user_enable_single_step(struct task_struct *child) { enable_step(child, 0); } void user_enable_block_step(struct task_struct *child) { enable_step(child, 1); } void user_disable_single_step(struct task_struct *child) { /* * Make sure block stepping (BTF) is disabled. */ if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) set_task_blockstep(child, false); /* Always clear TIF_SINGLESTEP... */ clear_tsk_thread_flag(child, TIF_SINGLESTEP); /* But touch TF only if it was set by us.. */ if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) task_pt_regs(child)->flags &= ~X86_EFLAGS_TF; }
gpl-2.0
TeamHorizon/android_kernel_oneplus_msm8974
arch/mips/alchemy/devboards/db1300.c
4504
19980
/* * DBAu1300 init and platform device setup. * * (c) 2009 Manuel Lauss <manuel.lauss@googlemail.com> */ #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/init.h> #include <linux/input.h> /* KEY_* codes */ #include <linux/i2c.h> #include <linux/io.h> #include <linux/leds.h> #include <linux/ata_platform.h> #include <linux/mmc/host.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/platform_device.h> #include <linux/smsc911x.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1100_mmc.h> #include <asm/mach-au1x00/au1200fb.h> #include <asm/mach-au1x00/au1xxx_dbdma.h> #include <asm/mach-au1x00/au1xxx_psc.h> #include <asm/mach-db1x00/db1300.h> #include <asm/mach-db1x00/bcsr.h> #include <asm/mach-au1x00/prom.h> #include "platform.h" static struct i2c_board_info db1300_i2c_devs[] __initdata = { { I2C_BOARD_INFO("wm8731", 0x1b), }, /* I2S audio codec */ { I2C_BOARD_INFO("ne1619", 0x2d), }, /* adm1025-compat hwmon */ }; /* multifunction pins to assign to GPIO controller */ static int db1300_gpio_pins[] __initdata = { AU1300_PIN_LCDPWM0, AU1300_PIN_PSC2SYNC1, AU1300_PIN_WAKE1, AU1300_PIN_WAKE2, AU1300_PIN_WAKE3, AU1300_PIN_FG3AUX, AU1300_PIN_EXTCLK1, -1, /* terminator */ }; /* multifunction pins to assign to device functions */ static int db1300_dev_pins[] __initdata = { /* wake-from-str pins 0-3 */ AU1300_PIN_WAKE0, /* external clock sources for PSC0 */ AU1300_PIN_EXTCLK0, /* 8bit MMC interface on SD0: 6-9 */ AU1300_PIN_SD0DAT4, AU1300_PIN_SD0DAT5, AU1300_PIN_SD0DAT6, AU1300_PIN_SD0DAT7, /* UART1 pins: 11-18 */ AU1300_PIN_U1RI, AU1300_PIN_U1DCD, AU1300_PIN_U1DSR, AU1300_PIN_U1CTS, AU1300_PIN_U1RTS, AU1300_PIN_U1DTR, AU1300_PIN_U1RX, AU1300_PIN_U1TX, /* UART0 pins: 19-24 */ AU1300_PIN_U0RI, AU1300_PIN_U0DCD, AU1300_PIN_U0DSR, AU1300_PIN_U0CTS, AU1300_PIN_U0RTS, AU1300_PIN_U0DTR, /* UART2: 25-26 */ AU1300_PIN_U2RX, AU1300_PIN_U2TX, /* UART3: 27-28 */ AU1300_PIN_U3RX, AU1300_PIN_U3TX, /* LCD controller PWMs, ext pixclock: 30-31 */ AU1300_PIN_LCDPWM1, AU1300_PIN_LCDCLKIN, /* SD1 interface: 32-37 */ AU1300_PIN_SD1DAT0, AU1300_PIN_SD1DAT1, AU1300_PIN_SD1DAT2, AU1300_PIN_SD1DAT3, AU1300_PIN_SD1CMD, AU1300_PIN_SD1CLK, /* SD2 interface: 38-43 */ AU1300_PIN_SD2DAT0, AU1300_PIN_SD2DAT1, AU1300_PIN_SD2DAT2, AU1300_PIN_SD2DAT3, AU1300_PIN_SD2CMD, AU1300_PIN_SD2CLK, /* PSC0/1 clocks: 44-45 */ AU1300_PIN_PSC0CLK, AU1300_PIN_PSC1CLK, /* PSCs: 46-49/50-53/54-57/58-61 */ AU1300_PIN_PSC0SYNC0, AU1300_PIN_PSC0SYNC1, AU1300_PIN_PSC0D0, AU1300_PIN_PSC0D1, AU1300_PIN_PSC1SYNC0, AU1300_PIN_PSC1SYNC1, AU1300_PIN_PSC1D0, AU1300_PIN_PSC1D1, AU1300_PIN_PSC2SYNC0, AU1300_PIN_PSC2D0, AU1300_PIN_PSC2D1, AU1300_PIN_PSC3SYNC0, AU1300_PIN_PSC3SYNC1, AU1300_PIN_PSC3D0, AU1300_PIN_PSC3D1, /* PCMCIA interface: 62-70 */ AU1300_PIN_PCE2, AU1300_PIN_PCE1, AU1300_PIN_PIOS16, AU1300_PIN_PIOR, AU1300_PIN_PWE, AU1300_PIN_PWAIT, AU1300_PIN_PREG, AU1300_PIN_POE, AU1300_PIN_PIOW, /* camera interface H/V sync inputs: 71-72 */ AU1300_PIN_CIMLS, AU1300_PIN_CIMFS, /* PSC2/3 clocks: 73-74 */ AU1300_PIN_PSC2CLK, AU1300_PIN_PSC3CLK, -1, /* terminator */ }; static void __init db1300_gpio_config(void) { int *i; i = &db1300_dev_pins[0]; while (*i != -1) au1300_pinfunc_to_dev(*i++); i = &db1300_gpio_pins[0]; while (*i != -1) au1300_gpio_direction_input(*i++);/* implies pin_to_gpio */ au1300_set_dbdma_gpio(1, AU1300_PIN_FG3AUX); } char *get_system_type(void) { return "DB1300"; } /**********************************************************************/ static void au1300_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *this = mtd->priv; unsigned long ioaddr = (unsigned long)this->IO_ADDR_W; ioaddr &= 0xffffff00; if (ctrl & NAND_CLE) { ioaddr += MEM_STNAND_CMD; } else if (ctrl & NAND_ALE) { ioaddr += MEM_STNAND_ADDR; } else { /* assume we want to r/w real data by default */ ioaddr += MEM_STNAND_DATA; } this->IO_ADDR_R = this->IO_ADDR_W = (void __iomem *)ioaddr; if (cmd != NAND_CMD_NONE) { __raw_writeb(cmd, this->IO_ADDR_W); wmb(); } } static int au1300_nand_device_ready(struct mtd_info *mtd) { return __raw_readl((void __iomem *)MEM_STSTAT) & 1; } static const char *db1300_part_probes[] = { "cmdlinepart", NULL }; static struct mtd_partition db1300_nand_parts[] = { { .name = "NAND FS 0", .offset = 0, .size = 8 * 1024 * 1024, }, { .name = "NAND FS 1", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL }, }; struct platform_nand_data db1300_nand_platdata = { .chip = { .nr_chips = 1, .chip_offset = 0, .nr_partitions = ARRAY_SIZE(db1300_nand_parts), .partitions = db1300_nand_parts, .chip_delay = 20, .part_probe_types = db1300_part_probes, }, .ctrl = { .dev_ready = au1300_nand_device_ready, .cmd_ctrl = au1300_nand_cmd_ctrl, }, }; static struct resource db1300_nand_res[] = { [0] = { .start = DB1300_NAND_PHYS_ADDR, .end = DB1300_NAND_PHYS_ADDR + 0xff, .flags = IORESOURCE_MEM, }, }; static struct platform_device db1300_nand_dev = { .name = "gen_nand", .num_resources = ARRAY_SIZE(db1300_nand_res), .resource = db1300_nand_res, .id = -1, .dev = { .platform_data = &db1300_nand_platdata, } }; /**********************************************************************/ static struct resource db1300_eth_res[] = { [0] = { .start = DB1300_ETH_PHYS_ADDR, .end = DB1300_ETH_PHYS_END, .flags = IORESOURCE_MEM, }, [1] = { .start = DB1300_ETH_INT, .end = DB1300_ETH_INT, .flags = IORESOURCE_IRQ, }, }; static struct smsc911x_platform_config db1300_eth_config = { .phy_interface = PHY_INTERFACE_MODE_MII, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, .flags = SMSC911X_USE_32BIT, }; static struct platform_device db1300_eth_dev = { .name = "smsc911x", .id = -1, .num_resources = ARRAY_SIZE(db1300_eth_res), .resource = db1300_eth_res, .dev = { .platform_data = &db1300_eth_config, }, }; /**********************************************************************/ static struct resource au1300_psc1_res[] = { [0] = { .start = AU1300_PSC1_PHYS_ADDR, .end = AU1300_PSC1_PHYS_ADDR + 0x0fff, .flags = IORESOURCE_MEM, }, [1] = { .start = AU1300_PSC1_INT, .end = AU1300_PSC1_INT, .flags = IORESOURCE_IRQ, }, [2] = { .start = AU1300_DSCR_CMD0_PSC1_TX, .end = AU1300_DSCR_CMD0_PSC1_TX, .flags = IORESOURCE_DMA, }, [3] = { .start = AU1300_DSCR_CMD0_PSC1_RX, .end = AU1300_DSCR_CMD0_PSC1_RX, .flags = IORESOURCE_DMA, }, }; static struct platform_device db1300_ac97_dev = { .name = "au1xpsc_ac97", .id = 1, /* PSC ID. match with AC97 codec ID! */ .num_resources = ARRAY_SIZE(au1300_psc1_res), .resource = au1300_psc1_res, }; /**********************************************************************/ static struct resource au1300_psc2_res[] = { [0] = { .start = AU1300_PSC2_PHYS_ADDR, .end = AU1300_PSC2_PHYS_ADDR + 0x0fff, .flags = IORESOURCE_MEM, }, [1] = { .start = AU1300_PSC2_INT, .end = AU1300_PSC2_INT, .flags = IORESOURCE_IRQ, }, [2] = { .start = AU1300_DSCR_CMD0_PSC2_TX, .end = AU1300_DSCR_CMD0_PSC2_TX, .flags = IORESOURCE_DMA, }, [3] = { .start = AU1300_DSCR_CMD0_PSC2_RX, .end = AU1300_DSCR_CMD0_PSC2_RX, .flags = IORESOURCE_DMA, }, }; static struct platform_device db1300_i2s_dev = { .name = "au1xpsc_i2s", .id = 2, /* PSC ID */ .num_resources = ARRAY_SIZE(au1300_psc2_res), .resource = au1300_psc2_res, }; /**********************************************************************/ static struct resource au1300_psc3_res[] = { [0] = { .start = AU1300_PSC3_PHYS_ADDR, .end = AU1300_PSC3_PHYS_ADDR + 0x0fff, .flags = IORESOURCE_MEM, }, [1] = { .start = AU1300_PSC3_INT, .end = AU1300_PSC3_INT, .flags = IORESOURCE_IRQ, }, [2] = { .start = AU1300_DSCR_CMD0_PSC3_TX, .end = AU1300_DSCR_CMD0_PSC3_TX, .flags = IORESOURCE_DMA, }, [3] = { .start = AU1300_DSCR_CMD0_PSC3_RX, .end = AU1300_DSCR_CMD0_PSC3_RX, .flags = IORESOURCE_DMA, }, }; static struct platform_device db1300_i2c_dev = { .name = "au1xpsc_smbus", .id = 0, /* bus number */ .num_resources = ARRAY_SIZE(au1300_psc3_res), .resource = au1300_psc3_res, }; /**********************************************************************/ /* proper key assignments when facing the LCD panel. For key assignments * according to the schematics swap up with down and left with right. * I chose to use it to emulate the arrow keys of a keyboard. */ static struct gpio_keys_button db1300_5waysw_arrowkeys[] = { { .code = KEY_DOWN, .gpio = AU1300_PIN_LCDPWM0, .type = EV_KEY, .debounce_interval = 1, .active_low = 1, .desc = "5waysw-down", }, { .code = KEY_UP, .gpio = AU1300_PIN_PSC2SYNC1, .type = EV_KEY, .debounce_interval = 1, .active_low = 1, .desc = "5waysw-up", }, { .code = KEY_RIGHT, .gpio = AU1300_PIN_WAKE3, .type = EV_KEY, .debounce_interval = 1, .active_low = 1, .desc = "5waysw-right", }, { .code = KEY_LEFT, .gpio = AU1300_PIN_WAKE2, .type = EV_KEY, .debounce_interval = 1, .active_low = 1, .desc = "5waysw-left", }, { .code = KEY_ENTER, .gpio = AU1300_PIN_WAKE1, .type = EV_KEY, .debounce_interval = 1, .active_low = 1, .desc = "5waysw-push", }, }; static struct gpio_keys_platform_data db1300_5waysw_data = { .buttons = db1300_5waysw_arrowkeys, .nbuttons = ARRAY_SIZE(db1300_5waysw_arrowkeys), .rep = 1, .name = "db1300-5wayswitch", }; static struct platform_device db1300_5waysw_dev = { .name = "gpio-keys", .dev = { .platform_data = &db1300_5waysw_data, }, }; /**********************************************************************/ static struct pata_platform_info db1300_ide_info = { .ioport_shift = DB1300_IDE_REG_SHIFT, }; #define IDE_ALT_START (14 << DB1300_IDE_REG_SHIFT) static struct resource db1300_ide_res[] = { [0] = { .start = DB1300_IDE_PHYS_ADDR, .end = DB1300_IDE_PHYS_ADDR + IDE_ALT_START - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = DB1300_IDE_PHYS_ADDR + IDE_ALT_START, .end = DB1300_IDE_PHYS_ADDR + DB1300_IDE_PHYS_LEN - 1, .flags = IORESOURCE_MEM, }, [2] = { .start = DB1300_IDE_INT, .end = DB1300_IDE_INT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device db1300_ide_dev = { .dev = { .platform_data = &db1300_ide_info, }, .name = "pata_platform", .resource = db1300_ide_res, .num_resources = ARRAY_SIZE(db1300_ide_res), }; /**********************************************************************/ static irqreturn_t db1300_mmc_cd(int irq, void *ptr) { void(*mmc_cd)(struct mmc_host *, unsigned long); /* disable the one currently screaming. No other way to shut it up */ if (irq == DB1300_SD1_INSERT_INT) { disable_irq_nosync(DB1300_SD1_INSERT_INT); enable_irq(DB1300_SD1_EJECT_INT); } else { disable_irq_nosync(DB1300_SD1_EJECT_INT); enable_irq(DB1300_SD1_INSERT_INT); } /* link against CONFIG_MMC=m. We can only be called once MMC core has * initialized the controller, so symbol_get() should always succeed. */ mmc_cd = symbol_get(mmc_detect_change); mmc_cd(ptr, msecs_to_jiffies(500)); symbol_put(mmc_detect_change); return IRQ_HANDLED; } static int db1300_mmc_card_readonly(void *mmc_host) { /* it uses SD1 interface, but the DB1200's SD0 bit in the CPLD */ return bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD0WP; } static int db1300_mmc_card_inserted(void *mmc_host) { return bcsr_read(BCSR_SIGSTAT) & (1 << 12); /* insertion irq signal */ } static int db1300_mmc_cd_setup(void *mmc_host, int en) { int ret; if (en) { ret = request_irq(DB1300_SD1_INSERT_INT, db1300_mmc_cd, 0, "sd_insert", mmc_host); if (ret) goto out; ret = request_irq(DB1300_SD1_EJECT_INT, db1300_mmc_cd, 0, "sd_eject", mmc_host); if (ret) { free_irq(DB1300_SD1_INSERT_INT, mmc_host); goto out; } if (db1300_mmc_card_inserted(mmc_host)) enable_irq(DB1300_SD1_EJECT_INT); else enable_irq(DB1300_SD1_INSERT_INT); } else { free_irq(DB1300_SD1_INSERT_INT, mmc_host); free_irq(DB1300_SD1_EJECT_INT, mmc_host); } ret = 0; out: return ret; } static void db1300_mmcled_set(struct led_classdev *led, enum led_brightness brightness) { if (brightness != LED_OFF) bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED0, 0); else bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED0); } static struct led_classdev db1300_mmc_led = { .brightness_set = db1300_mmcled_set, }; struct au1xmmc_platform_data db1300_sd1_platdata = { .cd_setup = db1300_mmc_cd_setup, .card_inserted = db1300_mmc_card_inserted, .card_readonly = db1300_mmc_card_readonly, .led = &db1300_mmc_led, }; static struct resource au1300_sd1_res[] = { [0] = { .start = AU1300_SD1_PHYS_ADDR, .end = AU1300_SD1_PHYS_ADDR, .flags = IORESOURCE_MEM, }, [1] = { .start = AU1300_SD1_INT, .end = AU1300_SD1_INT, .flags = IORESOURCE_IRQ, }, [2] = { .start = AU1300_DSCR_CMD0_SDMS_TX1, .end = AU1300_DSCR_CMD0_SDMS_TX1, .flags = IORESOURCE_DMA, }, [3] = { .start = AU1300_DSCR_CMD0_SDMS_RX1, .end = AU1300_DSCR_CMD0_SDMS_RX1, .flags = IORESOURCE_DMA, }, }; static struct platform_device db1300_sd1_dev = { .dev = { .platform_data = &db1300_sd1_platdata, }, .name = "au1xxx-mmc", .id = 1, .resource = au1300_sd1_res, .num_resources = ARRAY_SIZE(au1300_sd1_res), }; /**********************************************************************/ static int db1300_movinand_inserted(void *mmc_host) { return 0; /* disable for now, it doesn't work yet */ } static int db1300_movinand_readonly(void *mmc_host) { return 0; } static void db1300_movinand_led_set(struct led_classdev *led, enum led_brightness brightness) { if (brightness != LED_OFF) bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED1, 0); else bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED1); } static struct led_classdev db1300_movinand_led = { .brightness_set = db1300_movinand_led_set, }; struct au1xmmc_platform_data db1300_sd0_platdata = { .card_inserted = db1300_movinand_inserted, .card_readonly = db1300_movinand_readonly, .led = &db1300_movinand_led, .mask_host_caps = MMC_CAP_NEEDS_POLL, }; static struct resource au1300_sd0_res[] = { [0] = { .start = AU1100_SD0_PHYS_ADDR, .end = AU1100_SD0_PHYS_ADDR, .flags = IORESOURCE_MEM, }, [1] = { .start = AU1300_SD0_INT, .end = AU1300_SD0_INT, .flags = IORESOURCE_IRQ, }, [2] = { .start = AU1300_DSCR_CMD0_SDMS_TX0, .end = AU1300_DSCR_CMD0_SDMS_TX0, .flags = IORESOURCE_DMA, }, [3] = { .start = AU1300_DSCR_CMD0_SDMS_RX0, .end = AU1300_DSCR_CMD0_SDMS_RX0, .flags = IORESOURCE_DMA, }, }; static struct platform_device db1300_sd0_dev = { .dev = { .platform_data = &db1300_sd0_platdata, }, .name = "au1xxx-mmc", .id = 0, .resource = au1300_sd0_res, .num_resources = ARRAY_SIZE(au1300_sd0_res), }; /**********************************************************************/ static struct platform_device db1300_wm9715_dev = { .name = "wm9712-codec", .id = 1, /* ID of PSC for AC97 audio, see asoc glue! */ }; static struct platform_device db1300_ac97dma_dev = { .name = "au1xpsc-pcm", .id = 1, /* PSC ID */ }; static struct platform_device db1300_i2sdma_dev = { .name = "au1xpsc-pcm", .id = 2, /* PSC ID */ }; static struct platform_device db1300_sndac97_dev = { .name = "db1300-ac97", }; static struct platform_device db1300_sndi2s_dev = { .name = "db1300-i2s", }; /**********************************************************************/ static int db1300fb_panel_index(void) { return 9; /* DB1300_800x480 */ } static int db1300fb_panel_init(void) { /* Apply power (Vee/Vdd logic is inverted on Panel DB1300_800x480) */ bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD, BCSR_BOARD_LCDBL); return 0; } static int db1300fb_panel_shutdown(void) { /* Remove power (Vee/Vdd logic is inverted on Panel DB1300_800x480) */ bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDBL, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD); return 0; } static struct au1200fb_platdata db1300fb_pd = { .panel_index = db1300fb_panel_index, .panel_init = db1300fb_panel_init, .panel_shutdown = db1300fb_panel_shutdown, }; static struct resource au1300_lcd_res[] = { [0] = { .start = AU1200_LCD_PHYS_ADDR, .end = AU1200_LCD_PHYS_ADDR + 0x800 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = AU1300_LCD_INT, .end = AU1300_LCD_INT, .flags = IORESOURCE_IRQ, } }; static u64 au1300_lcd_dmamask = DMA_BIT_MASK(32); static struct platform_device db1300_lcd_dev = { .name = "au1200-lcd", .id = 0, .dev = { .dma_mask = &au1300_lcd_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &db1300fb_pd, }, .num_resources = ARRAY_SIZE(au1300_lcd_res), .resource = au1300_lcd_res, }; /**********************************************************************/ static struct platform_device *db1300_dev[] __initdata = { &db1300_eth_dev, &db1300_i2c_dev, &db1300_5waysw_dev, &db1300_nand_dev, &db1300_ide_dev, &db1300_sd0_dev, &db1300_sd1_dev, &db1300_lcd_dev, &db1300_ac97_dev, &db1300_i2s_dev, &db1300_wm9715_dev, &db1300_ac97dma_dev, &db1300_i2sdma_dev, &db1300_sndac97_dev, &db1300_sndi2s_dev, }; static int __init db1300_device_init(void) { int swapped, cpldirq; /* setup CPLD IRQ muxer */ cpldirq = au1300_gpio_to_irq(AU1300_PIN_EXTCLK1); irq_set_irq_type(cpldirq, IRQ_TYPE_LEVEL_HIGH); bcsr_init_irq(DB1300_FIRST_INT, DB1300_LAST_INT, cpldirq); /* insert/eject IRQs: one always triggers so don't enable them * when doing request_irq() on them. DB1200 has this bug too. */ irq_set_status_flags(DB1300_SD1_INSERT_INT, IRQ_NOAUTOEN); irq_set_status_flags(DB1300_SD1_EJECT_INT, IRQ_NOAUTOEN); irq_set_status_flags(DB1300_CF_INSERT_INT, IRQ_NOAUTOEN); irq_set_status_flags(DB1300_CF_EJECT_INT, IRQ_NOAUTOEN); /* * setup board */ prom_get_ethernet_addr(&db1300_eth_config.mac[0]); i2c_register_board_info(0, db1300_i2c_devs, ARRAY_SIZE(db1300_i2c_devs)); /* Audio PSC clock is supplied by codecs (PSC1, 2) */ __raw_writel(PSC_SEL_CLK_SERCLK, (void __iomem *)KSEG1ADDR(AU1300_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET); wmb(); __raw_writel(PSC_SEL_CLK_SERCLK, (void __iomem *)KSEG1ADDR(AU1300_PSC2_PHYS_ADDR) + PSC_SEL_OFFSET); wmb(); /* I2C uses internal 48MHz EXTCLK1 */ __raw_writel(PSC_SEL_CLK_INTCLK, (void __iomem *)KSEG1ADDR(AU1300_PSC3_PHYS_ADDR) + PSC_SEL_OFFSET); wmb(); /* enable power to USB ports */ bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_USBHPWR | BCSR_RESETS_OTGPWR); /* although it is socket #0, it uses the CPLD bits which previous boards * have used for socket #1. */ db1x_register_pcmcia_socket( AU1000_PCMCIA_ATTR_PHYS_ADDR, AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x00400000 - 1, AU1000_PCMCIA_MEM_PHYS_ADDR, AU1000_PCMCIA_MEM_PHYS_ADDR + 0x00400000 - 1, AU1000_PCMCIA_IO_PHYS_ADDR, AU1000_PCMCIA_IO_PHYS_ADDR + 0x00010000 - 1, DB1300_CF_INT, DB1300_CF_INSERT_INT, 0, DB1300_CF_EJECT_INT, 1); swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1200_SWAPBOOT; db1x_register_norflash(64 << 20, 2, swapped); return platform_add_devices(db1300_dev, ARRAY_SIZE(db1300_dev)); } device_initcall(db1300_device_init); void __init board_setup(void) { unsigned short whoami; db1300_gpio_config(); bcsr_init(DB1300_BCSR_PHYS_ADDR, DB1300_BCSR_PHYS_ADDR + DB1300_BCSR_HEXLED_OFS); whoami = bcsr_read(BCSR_WHOAMI); printk(KERN_INFO "NetLogic DBAu1300 Development Platform.\n\t" "BoardID %d CPLD Rev %d DaughtercardID %d\n", BCSR_WHOAMI_BOARD(whoami), BCSR_WHOAMI_CPLD(whoami), BCSR_WHOAMI_DCID(whoami)); /* enable UARTs, YAMON only enables #2 */ alchemy_uart_enable(AU1300_UART0_PHYS_ADDR); alchemy_uart_enable(AU1300_UART1_PHYS_ADDR); alchemy_uart_enable(AU1300_UART3_PHYS_ADDR); }
gpl-2.0
TeamEpsilon/linux-3.8
arch/arm/mach-omap1/io.c
4760
3859
/* * linux/arch/arm/mach-omap1/io.c * * OMAP1 I/O mapping code * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <asm/tlb.h> #include <asm/mach/map.h> #include <mach/mux.h> #include <mach/tc.h> #include <linux/omap-dma.h> #include "iomap.h" #include "common.h" #include "clock.h" /* * The machine specific code may provide the extra mapping besides the * default mapping provided here. */ static struct map_desc omap_io_desc[] __initdata = { { .virtual = OMAP1_IO_VIRT, .pfn = __phys_to_pfn(OMAP1_IO_PHYS), .length = OMAP1_IO_SIZE, .type = MT_DEVICE } }; #if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850) static struct map_desc omap7xx_io_desc[] __initdata = { { .virtual = OMAP7XX_DSP_BASE, .pfn = __phys_to_pfn(OMAP7XX_DSP_START), .length = OMAP7XX_DSP_SIZE, .type = MT_DEVICE }, { .virtual = OMAP7XX_DSPREG_BASE, .pfn = __phys_to_pfn(OMAP7XX_DSPREG_START), .length = OMAP7XX_DSPREG_SIZE, .type = MT_DEVICE } }; #endif #ifdef CONFIG_ARCH_OMAP15XX static struct map_desc omap1510_io_desc[] __initdata = { { .virtual = OMAP1510_DSP_BASE, .pfn = __phys_to_pfn(OMAP1510_DSP_START), .length = OMAP1510_DSP_SIZE, .type = MT_DEVICE }, { .virtual = OMAP1510_DSPREG_BASE, .pfn = __phys_to_pfn(OMAP1510_DSPREG_START), .length = OMAP1510_DSPREG_SIZE, .type = MT_DEVICE } }; #endif #if defined(CONFIG_ARCH_OMAP16XX) static struct map_desc omap16xx_io_desc[] __initdata = { { .virtual = OMAP16XX_DSP_BASE, .pfn = __phys_to_pfn(OMAP16XX_DSP_START), .length = OMAP16XX_DSP_SIZE, .type = MT_DEVICE }, { .virtual = OMAP16XX_DSPREG_BASE, .pfn = __phys_to_pfn(OMAP16XX_DSPREG_START), .length = OMAP16XX_DSPREG_SIZE, .type = MT_DEVICE } }; #endif /* * Maps common IO regions for omap1 */ static void __init omap1_map_common_io(void) { iotable_init(omap_io_desc, ARRAY_SIZE(omap_io_desc)); } #if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850) void __init omap7xx_map_io(void) { omap1_map_common_io(); iotable_init(omap7xx_io_desc, ARRAY_SIZE(omap7xx_io_desc)); } #endif #ifdef CONFIG_ARCH_OMAP15XX void __init omap15xx_map_io(void) { omap1_map_common_io(); iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc)); } #endif #if defined(CONFIG_ARCH_OMAP16XX) void __init omap16xx_map_io(void) { omap1_map_common_io(); iotable_init(omap16xx_io_desc, ARRAY_SIZE(omap16xx_io_desc)); } #endif /* * Common low-level hardware init for omap1. */ void __init omap1_init_early(void) { omap_check_revision(); /* REVISIT: Refer to OMAP5910 Errata, Advisory SYS_1: "Timeout Abort * on a Posted Write in the TIPB Bridge". */ omap_writew(0x0, MPU_PUBLIC_TIPB_CNTL); omap_writew(0x0, MPU_PRIVATE_TIPB_CNTL); /* Must init clocks early to assure that timer interrupt works */ omap1_clk_init(); omap1_mux_init(); } void __init omap1_init_late(void) { omap_serial_wakeup_init(); } /* * NOTE: Please use ioremap + __raw_read/write where possible instead of these */ u8 omap_readb(u32 pa) { return __raw_readb(OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_readb); u16 omap_readw(u32 pa) { return __raw_readw(OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_readw); u32 omap_readl(u32 pa) { return __raw_readl(OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_readl); void omap_writeb(u8 v, u32 pa) { __raw_writeb(v, OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_writeb); void omap_writew(u16 v, u32 pa) { __raw_writew(v, OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_writew); void omap_writel(u32 v, u32 pa) { __raw_writel(v, OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_writel);
gpl-2.0
Kurre/kernel_msm
fs/ecryptfs/read_write.c
5016
8866
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 2007 International Business Machines Corp. * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/fs.h> #include <linux/pagemap.h> #include "ecryptfs_kernel.h" /** * ecryptfs_write_lower * @ecryptfs_inode: The eCryptfs inode * @data: Data to write * @offset: Byte offset in the lower file to which to write the data * @size: Number of bytes from @data to write at @offset in the lower * file * * Write data to the lower file. * * Returns bytes written on success; less than zero on error */ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data, loff_t offset, size_t size) { struct file *lower_file; mm_segment_t fs_save; ssize_t rc; lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file; if (!lower_file) return -EIO; fs_save = get_fs(); set_fs(get_ds()); rc = vfs_write(lower_file, data, size, &offset); set_fs(fs_save); mark_inode_dirty_sync(ecryptfs_inode); return rc; } /** * ecryptfs_write_lower_page_segment * @ecryptfs_inode: The eCryptfs inode * @page_for_lower: The page containing the data to be written to the * lower file * @offset_in_page: The offset in the @page_for_lower from which to * start writing the data * @size: The amount of data from @page_for_lower to write to the * lower file * * Determines the byte offset in the file for the given page and * offset within the page, maps the page, and makes the call to write * the contents of @page_for_lower to the lower inode. * * Returns zero on success; non-zero otherwise */ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode, struct page *page_for_lower, size_t offset_in_page, size_t size) { char *virt; loff_t offset; int rc; offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT) + offset_in_page); virt = kmap(page_for_lower); rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size); if (rc > 0) rc = 0; kunmap(page_for_lower); return rc; } /** * ecryptfs_write * @ecryptfs_inode: The eCryptfs file into which to write * @data: Virtual address where data to write is located * @offset: Offset in the eCryptfs file at which to begin writing the * data from @data * @size: The number of bytes to write from @data * * Write an arbitrary amount of data to an arbitrary location in the * eCryptfs inode page cache. This is done on a page-by-page, and then * by an extent-by-extent, basis; individual extents are encrypted and * written to the lower page cache (via VFS writes). This function * takes care of all the address translation to locations in the lower * filesystem; it also handles truncate events, writing out zeros * where necessary. * * Returns zero on success; non-zero otherwise */ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset, size_t size) { struct page *ecryptfs_page; struct ecryptfs_crypt_stat *crypt_stat; char *ecryptfs_page_virt; loff_t ecryptfs_file_size = i_size_read(ecryptfs_inode); loff_t data_offset = 0; loff_t pos; int rc = 0; crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; /* * if we are writing beyond current size, then start pos * at the current size - we'll fill in zeros from there. */ if (offset > ecryptfs_file_size) pos = ecryptfs_file_size; else pos = offset; while (pos < (offset + size)) { pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT); size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK); size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page); loff_t total_remaining_bytes = ((offset + size) - pos); if (fatal_signal_pending(current)) { rc = -EINTR; break; } if (num_bytes > total_remaining_bytes) num_bytes = total_remaining_bytes; if (pos < offset) { /* remaining zeros to write, up to destination offset */ loff_t total_remaining_zeros = (offset - pos); if (num_bytes > total_remaining_zeros) num_bytes = total_remaining_zeros; } ecryptfs_page = ecryptfs_get_locked_page(ecryptfs_inode, ecryptfs_page_idx); if (IS_ERR(ecryptfs_page)) { rc = PTR_ERR(ecryptfs_page); printk(KERN_ERR "%s: Error getting page at " "index [%ld] from eCryptfs inode " "mapping; rc = [%d]\n", __func__, ecryptfs_page_idx, rc); goto out; } ecryptfs_page_virt = kmap_atomic(ecryptfs_page); /* * pos: where we're now writing, offset: where the request was * If current pos is before request, we are filling zeros * If we are at or beyond request, we are writing the *data* * If we're in a fresh page beyond eof, zero it in either case */ if (pos < offset || !start_offset_in_page) { /* We are extending past the previous end of the file. * Fill in zero values to the end of the page */ memset(((char *)ecryptfs_page_virt + start_offset_in_page), 0, PAGE_CACHE_SIZE - start_offset_in_page); } /* pos >= offset, we are now writing the data request */ if (pos >= offset) { memcpy(((char *)ecryptfs_page_virt + start_offset_in_page), (data + data_offset), num_bytes); data_offset += num_bytes; } kunmap_atomic(ecryptfs_page_virt); flush_dcache_page(ecryptfs_page); SetPageUptodate(ecryptfs_page); unlock_page(ecryptfs_page); if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) rc = ecryptfs_encrypt_page(ecryptfs_page); else rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, ecryptfs_page, start_offset_in_page, data_offset); page_cache_release(ecryptfs_page); if (rc) { printk(KERN_ERR "%s: Error encrypting " "page; rc = [%d]\n", __func__, rc); goto out; } pos += num_bytes; } if (pos > ecryptfs_file_size) { i_size_write(ecryptfs_inode, pos); if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) { int rc2; rc2 = ecryptfs_write_inode_size_to_metadata( ecryptfs_inode); if (rc2) { printk(KERN_ERR "Problem with " "ecryptfs_write_inode_size_to_metadata; " "rc = [%d]\n", rc2); if (!rc) rc = rc2; goto out; } } } out: return rc; } /** * ecryptfs_read_lower * @data: The read data is stored here by this function * @offset: Byte offset in the lower file from which to read the data * @size: Number of bytes to read from @offset of the lower file and * store into @data * @ecryptfs_inode: The eCryptfs inode * * Read @size bytes of data at byte offset @offset from the lower * inode into memory location @data. * * Returns bytes read on success; 0 on EOF; less than zero on error */ int ecryptfs_read_lower(char *data, loff_t offset, size_t size, struct inode *ecryptfs_inode) { struct file *lower_file; mm_segment_t fs_save; ssize_t rc; lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file; if (!lower_file) return -EIO; fs_save = get_fs(); set_fs(get_ds()); rc = vfs_read(lower_file, data, size, &offset); set_fs(fs_save); return rc; } /** * ecryptfs_read_lower_page_segment * @page_for_ecryptfs: The page into which data for eCryptfs will be * written * @offset_in_page: Offset in @page_for_ecryptfs from which to start * writing * @size: The number of bytes to write into @page_for_ecryptfs * @ecryptfs_inode: The eCryptfs inode * * Determines the byte offset in the file for the given page and * offset within the page, maps the page, and makes the call to read * the contents of @page_for_ecryptfs from the lower inode. * * Returns zero on success; non-zero otherwise */ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs, pgoff_t page_index, size_t offset_in_page, size_t size, struct inode *ecryptfs_inode) { char *virt; loff_t offset; int rc; offset = ((((loff_t)page_index) << PAGE_CACHE_SHIFT) + offset_in_page); virt = kmap(page_for_ecryptfs); rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode); if (rc > 0) rc = 0; kunmap(page_for_ecryptfs); flush_dcache_page(page_for_ecryptfs); return rc; }
gpl-2.0
shaowei-wang/linux-3.4-hummingbird
drivers/input/mouse/rpcmouse.c
9880
2954
/* * Acorn RiscPC mouse driver for Linux/ARM * * Copyright (c) 2000-2002 Vojtech Pavlik * Copyright (C) 1996-2002 Russell King * */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This handles the Acorn RiscPCs mouse. We basically have a couple of * hardware registers that track the sensor count for the X-Y movement and * another register holding the button state. On every VSYNC interrupt we read * the complete state and then work out if something has changed. */ #include <linux/module.h> #include <linux/ptrace.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/input.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/hardware/iomd.h> MODULE_AUTHOR("Vojtech Pavlik, Russell King"); MODULE_DESCRIPTION("Acorn RiscPC mouse driver"); MODULE_LICENSE("GPL"); static short rpcmouse_lastx, rpcmouse_lasty; static struct input_dev *rpcmouse_dev; static irqreturn_t rpcmouse_irq(int irq, void *dev_id) { struct input_dev *dev = dev_id; short x, y, dx, dy, b; x = (short) iomd_readl(IOMD_MOUSEX); y = (short) iomd_readl(IOMD_MOUSEY); b = (short) (__raw_readl(0xe0310000) ^ 0x70); dx = x - rpcmouse_lastx; dy = y - rpcmouse_lasty; rpcmouse_lastx = x; rpcmouse_lasty = y; input_report_rel(dev, REL_X, dx); input_report_rel(dev, REL_Y, -dy); input_report_key(dev, BTN_LEFT, b & 0x40); input_report_key(dev, BTN_MIDDLE, b & 0x20); input_report_key(dev, BTN_RIGHT, b & 0x10); input_sync(dev); return IRQ_HANDLED; } static int __init rpcmouse_init(void) { int err; rpcmouse_dev = input_allocate_device(); if (!rpcmouse_dev) return -ENOMEM; rpcmouse_dev->name = "Acorn RiscPC Mouse"; rpcmouse_dev->phys = "rpcmouse/input0"; rpcmouse_dev->id.bustype = BUS_HOST; rpcmouse_dev->id.vendor = 0x0005; rpcmouse_dev->id.product = 0x0001; rpcmouse_dev->id.version = 0x0100; rpcmouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); rpcmouse_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); rpcmouse_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); rpcmouse_lastx = (short) iomd_readl(IOMD_MOUSEX); rpcmouse_lasty = (short) iomd_readl(IOMD_MOUSEY); if (request_irq(IRQ_VSYNCPULSE, rpcmouse_irq, IRQF_SHARED, "rpcmouse", rpcmouse_dev)) { printk(KERN_ERR "rpcmouse: unable to allocate VSYNC interrupt\n"); err = -EBUSY; goto err_free_dev; } err = input_register_device(rpcmouse_dev); if (err) goto err_free_irq; return 0; err_free_irq: free_irq(IRQ_VSYNCPULSE, rpcmouse_dev); err_free_dev: input_free_device(rpcmouse_dev); return err; } static void __exit rpcmouse_exit(void) { free_irq(IRQ_VSYNCPULSE, rpcmouse_dev); input_unregister_device(rpcmouse_dev); } module_init(rpcmouse_init); module_exit(rpcmouse_exit);
gpl-2.0
jjhmod/mk908-jjh-kernel
drivers/input/mouse/rpcmouse.c
9880
2954
/* * Acorn RiscPC mouse driver for Linux/ARM * * Copyright (c) 2000-2002 Vojtech Pavlik * Copyright (C) 1996-2002 Russell King * */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This handles the Acorn RiscPCs mouse. We basically have a couple of * hardware registers that track the sensor count for the X-Y movement and * another register holding the button state. On every VSYNC interrupt we read * the complete state and then work out if something has changed. */ #include <linux/module.h> #include <linux/ptrace.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/input.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/hardware/iomd.h> MODULE_AUTHOR("Vojtech Pavlik, Russell King"); MODULE_DESCRIPTION("Acorn RiscPC mouse driver"); MODULE_LICENSE("GPL"); static short rpcmouse_lastx, rpcmouse_lasty; static struct input_dev *rpcmouse_dev; static irqreturn_t rpcmouse_irq(int irq, void *dev_id) { struct input_dev *dev = dev_id; short x, y, dx, dy, b; x = (short) iomd_readl(IOMD_MOUSEX); y = (short) iomd_readl(IOMD_MOUSEY); b = (short) (__raw_readl(0xe0310000) ^ 0x70); dx = x - rpcmouse_lastx; dy = y - rpcmouse_lasty; rpcmouse_lastx = x; rpcmouse_lasty = y; input_report_rel(dev, REL_X, dx); input_report_rel(dev, REL_Y, -dy); input_report_key(dev, BTN_LEFT, b & 0x40); input_report_key(dev, BTN_MIDDLE, b & 0x20); input_report_key(dev, BTN_RIGHT, b & 0x10); input_sync(dev); return IRQ_HANDLED; } static int __init rpcmouse_init(void) { int err; rpcmouse_dev = input_allocate_device(); if (!rpcmouse_dev) return -ENOMEM; rpcmouse_dev->name = "Acorn RiscPC Mouse"; rpcmouse_dev->phys = "rpcmouse/input0"; rpcmouse_dev->id.bustype = BUS_HOST; rpcmouse_dev->id.vendor = 0x0005; rpcmouse_dev->id.product = 0x0001; rpcmouse_dev->id.version = 0x0100; rpcmouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); rpcmouse_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); rpcmouse_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); rpcmouse_lastx = (short) iomd_readl(IOMD_MOUSEX); rpcmouse_lasty = (short) iomd_readl(IOMD_MOUSEY); if (request_irq(IRQ_VSYNCPULSE, rpcmouse_irq, IRQF_SHARED, "rpcmouse", rpcmouse_dev)) { printk(KERN_ERR "rpcmouse: unable to allocate VSYNC interrupt\n"); err = -EBUSY; goto err_free_dev; } err = input_register_device(rpcmouse_dev); if (err) goto err_free_irq; return 0; err_free_irq: free_irq(IRQ_VSYNCPULSE, rpcmouse_dev); err_free_dev: input_free_device(rpcmouse_dev); return err; } static void __exit rpcmouse_exit(void) { free_irq(IRQ_VSYNCPULSE, rpcmouse_dev); input_unregister_device(rpcmouse_dev); } module_init(rpcmouse_init); module_exit(rpcmouse_exit);
gpl-2.0
MaxiCM/android_kernel_lge_jagnm
arch/arm/nwfpe/single_cpdo.c
15000
3563
/* NetWinder Floating Point Emulator (c) Rebel.COM, 1998,1999 (c) Philip Blundell, 2001 Direct questions, comments to Scott Bambrough <scottb@netwinder.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "fpa11.h" #include "softfloat.h" #include "fpopcode.h" float32 float32_exp(float32 Fm); float32 float32_ln(float32 Fm); float32 float32_sin(float32 rFm); float32 float32_cos(float32 rFm); float32 float32_arcsin(float32 rFm); float32 float32_arctan(float32 rFm); float32 float32_log(float32 rFm); float32 float32_tan(float32 rFm); float32 float32_arccos(float32 rFm); float32 float32_pow(float32 rFn, float32 rFm); float32 float32_pol(float32 rFn, float32 rFm); static float32 float32_rsf(struct roundingData *roundData, float32 rFn, float32 rFm) { return float32_sub(roundData, rFm, rFn); } static float32 float32_rdv(struct roundingData *roundData, float32 rFn, float32 rFm) { return float32_div(roundData, rFm, rFn); } static float32 (*const dyadic_single[16])(struct roundingData *, float32 rFn, float32 rFm) = { [ADF_CODE >> 20] = float32_add, [MUF_CODE >> 20] = float32_mul, [SUF_CODE >> 20] = float32_sub, [RSF_CODE >> 20] = float32_rsf, [DVF_CODE >> 20] = float32_div, [RDF_CODE >> 20] = float32_rdv, [RMF_CODE >> 20] = float32_rem, [FML_CODE >> 20] = float32_mul, [FDV_CODE >> 20] = float32_div, [FRD_CODE >> 20] = float32_rdv, }; static float32 float32_mvf(struct roundingData *roundData, float32 rFm) { return rFm; } static float32 float32_mnf(struct roundingData *roundData, float32 rFm) { return rFm ^ 0x80000000; } static float32 float32_abs(struct roundingData *roundData, float32 rFm) { return rFm & 0x7fffffff; } static float32 (*const monadic_single[16])(struct roundingData*, float32 rFm) = { [MVF_CODE >> 20] = float32_mvf, [MNF_CODE >> 20] = float32_mnf, [ABS_CODE >> 20] = float32_abs, [RND_CODE >> 20] = float32_round_to_int, [URD_CODE >> 20] = float32_round_to_int, [SQT_CODE >> 20] = float32_sqrt, [NRM_CODE >> 20] = float32_mvf, }; unsigned int SingleCPDO(struct roundingData *roundData, const unsigned int opcode, FPREG * rFd) { FPA11 *fpa11 = GET_FPA11(); float32 rFm; unsigned int Fm, opc_mask_shift; Fm = getFm(opcode); if (CONSTANT_FM(opcode)) { rFm = getSingleConstant(Fm); } else if (fpa11->fType[Fm] == typeSingle) { rFm = fpa11->fpreg[Fm].fSingle; } else { return 0; } opc_mask_shift = (opcode & MASK_ARITHMETIC_OPCODE) >> 20; if (!MONADIC_INSTRUCTION(opcode)) { unsigned int Fn = getFn(opcode); float32 rFn; if (fpa11->fType[Fn] == typeSingle && dyadic_single[opc_mask_shift]) { rFn = fpa11->fpreg[Fn].fSingle; rFd->fSingle = dyadic_single[opc_mask_shift](roundData, rFn, rFm); } else { return 0; } } else { if (monadic_single[opc_mask_shift]) { rFd->fSingle = monadic_single[opc_mask_shift](roundData, rFm); } else { return 0; } } return 1; }
gpl-2.0
Supervenom/linux-mod_sys_call
drivers/media/i2c/adv7183.c
153
16990
/* * adv7183.c Analog Devices ADV7183 video decoder driver * * Copyright (c) 2011 Analog Devices Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/videodev2.h> #include <media/adv7183.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include "adv7183_regs.h" struct adv7183 { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; v4l2_std_id std; /* Current set standard */ u32 input; u32 output; unsigned reset_pin; unsigned oe_pin; struct v4l2_mbus_framefmt fmt; }; /* EXAMPLES USING 27 MHz CLOCK * Mode 1 CVBS Input (Composite Video on AIN5) * All standards are supported through autodetect, 8-bit, 4:2:2, ITU-R BT.656 output on P15 to P8. */ static const unsigned char adv7183_init_regs[] = { ADV7183_IN_CTRL, 0x04, /* CVBS input on AIN5 */ ADV7183_DIGI_CLAMP_CTRL_1, 0x00, /* Slow down digital clamps */ ADV7183_SHAP_FILT_CTRL, 0x41, /* Set CSFM to SH1 */ ADV7183_ADC_CTRL, 0x16, /* Power down ADC 1 and ADC 2 */ ADV7183_CTI_DNR_CTRL_4, 0x04, /* Set DNR threshold to 4 for flat response */ /* ADI recommended programming sequence */ ADV7183_ADI_CTRL, 0x80, ADV7183_CTI_DNR_CTRL_4, 0x20, 0x52, 0x18, 0x58, 0xED, 0x77, 0xC5, 0x7C, 0x93, 0x7D, 0x00, 0xD0, 0x48, 0xD5, 0xA0, 0xD7, 0xEA, ADV7183_SD_SATURATION_CR, 0x3E, ADV7183_PAL_V_END, 0x3E, ADV7183_PAL_F_TOGGLE, 0x0F, ADV7183_ADI_CTRL, 0x00, }; static inline struct adv7183 *to_adv7183(struct v4l2_subdev *sd) { return container_of(sd, struct adv7183, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct adv7183, hdl)->sd; } static inline int adv7183_read(struct v4l2_subdev *sd, unsigned char reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_read_byte_data(client, reg); } static inline int adv7183_write(struct v4l2_subdev *sd, unsigned char reg, unsigned char value) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_write_byte_data(client, reg, value); } static int adv7183_writeregs(struct v4l2_subdev *sd, const unsigned char *regs, unsigned int num) { unsigned char reg, data; unsigned int cnt = 0; if (num & 0x1) { v4l2_err(sd, "invalid regs array\n"); return -1; } while (cnt < num) { reg = *regs++; data = *regs++; cnt += 2; adv7183_write(sd, reg, data); } return 0; } static int adv7183_log_status(struct v4l2_subdev *sd) { struct adv7183 *decoder = to_adv7183(sd); v4l2_info(sd, "adv7183: Input control = 0x%02x\n", adv7183_read(sd, ADV7183_IN_CTRL)); v4l2_info(sd, "adv7183: Video selection = 0x%02x\n", adv7183_read(sd, ADV7183_VD_SEL)); v4l2_info(sd, "adv7183: Output control = 0x%02x\n", adv7183_read(sd, ADV7183_OUT_CTRL)); v4l2_info(sd, "adv7183: Extended output control = 0x%02x\n", adv7183_read(sd, ADV7183_EXT_OUT_CTRL)); v4l2_info(sd, "adv7183: Autodetect enable = 0x%02x\n", adv7183_read(sd, ADV7183_AUTO_DET_EN)); v4l2_info(sd, "adv7183: Contrast = 0x%02x\n", adv7183_read(sd, ADV7183_CONTRAST)); v4l2_info(sd, "adv7183: Brightness = 0x%02x\n", adv7183_read(sd, ADV7183_BRIGHTNESS)); v4l2_info(sd, "adv7183: Hue = 0x%02x\n", adv7183_read(sd, ADV7183_HUE)); v4l2_info(sd, "adv7183: Default value Y = 0x%02x\n", adv7183_read(sd, ADV7183_DEF_Y)); v4l2_info(sd, "adv7183: Default value C = 0x%02x\n", adv7183_read(sd, ADV7183_DEF_C)); v4l2_info(sd, "adv7183: ADI control = 0x%02x\n", adv7183_read(sd, ADV7183_ADI_CTRL)); v4l2_info(sd, "adv7183: Power Management = 0x%02x\n", adv7183_read(sd, ADV7183_POW_MANAGE)); v4l2_info(sd, "adv7183: Status 1 2 and 3 = 0x%02x 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_STATUS_1), adv7183_read(sd, ADV7183_STATUS_2), adv7183_read(sd, ADV7183_STATUS_3)); v4l2_info(sd, "adv7183: Ident = 0x%02x\n", adv7183_read(sd, ADV7183_IDENT)); v4l2_info(sd, "adv7183: Analog clamp control = 0x%02x\n", adv7183_read(sd, ADV7183_ANAL_CLAMP_CTRL)); v4l2_info(sd, "adv7183: Digital clamp control 1 = 0x%02x\n", adv7183_read(sd, ADV7183_DIGI_CLAMP_CTRL_1)); v4l2_info(sd, "adv7183: Shaping filter control 1 and 2 = 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_SHAP_FILT_CTRL), adv7183_read(sd, ADV7183_SHAP_FILT_CTRL_2)); v4l2_info(sd, "adv7183: Comb filter control = 0x%02x\n", adv7183_read(sd, ADV7183_COMB_FILT_CTRL)); v4l2_info(sd, "adv7183: ADI control 2 = 0x%02x\n", adv7183_read(sd, ADV7183_ADI_CTRL_2)); v4l2_info(sd, "adv7183: Pixel delay control = 0x%02x\n", adv7183_read(sd, ADV7183_PIX_DELAY_CTRL)); v4l2_info(sd, "adv7183: Misc gain control = 0x%02x\n", adv7183_read(sd, ADV7183_MISC_GAIN_CTRL)); v4l2_info(sd, "adv7183: AGC mode control = 0x%02x\n", adv7183_read(sd, ADV7183_AGC_MODE_CTRL)); v4l2_info(sd, "adv7183: Chroma gain control 1 and 2 = 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_CHRO_GAIN_CTRL_1), adv7183_read(sd, ADV7183_CHRO_GAIN_CTRL_2)); v4l2_info(sd, "adv7183: Luma gain control 1 and 2 = 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_LUMA_GAIN_CTRL_1), adv7183_read(sd, ADV7183_LUMA_GAIN_CTRL_2)); v4l2_info(sd, "adv7183: Vsync field control 1 2 and 3 = 0x%02x 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_VS_FIELD_CTRL_1), adv7183_read(sd, ADV7183_VS_FIELD_CTRL_2), adv7183_read(sd, ADV7183_VS_FIELD_CTRL_3)); v4l2_info(sd, "adv7183: Hsync positon control 1 2 and 3 = 0x%02x 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_HS_POS_CTRL_1), adv7183_read(sd, ADV7183_HS_POS_CTRL_2), adv7183_read(sd, ADV7183_HS_POS_CTRL_3)); v4l2_info(sd, "adv7183: Polarity = 0x%02x\n", adv7183_read(sd, ADV7183_POLARITY)); v4l2_info(sd, "adv7183: ADC control = 0x%02x\n", adv7183_read(sd, ADV7183_ADC_CTRL)); v4l2_info(sd, "adv7183: SD offset Cb and Cr = 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_SD_OFFSET_CB), adv7183_read(sd, ADV7183_SD_OFFSET_CR)); v4l2_info(sd, "adv7183: SD saturation Cb and Cr = 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_SD_SATURATION_CB), adv7183_read(sd, ADV7183_SD_SATURATION_CR)); v4l2_info(sd, "adv7183: Drive strength = 0x%02x\n", adv7183_read(sd, ADV7183_DRIVE_STR)); v4l2_ctrl_handler_log_status(&decoder->hdl, sd->name); return 0; } static int adv7183_g_std(struct v4l2_subdev *sd, v4l2_std_id *std) { struct adv7183 *decoder = to_adv7183(sd); *std = decoder->std; return 0; } static int adv7183_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct adv7183 *decoder = to_adv7183(sd); int reg; reg = adv7183_read(sd, ADV7183_IN_CTRL) & 0xF; if (std == V4L2_STD_PAL_60) reg |= 0x60; else if (std == V4L2_STD_NTSC_443) reg |= 0x70; else if (std == V4L2_STD_PAL_N) reg |= 0x90; else if (std == V4L2_STD_PAL_M) reg |= 0xA0; else if (std == V4L2_STD_PAL_Nc) reg |= 0xC0; else if (std & V4L2_STD_PAL) reg |= 0x80; else if (std & V4L2_STD_NTSC) reg |= 0x50; else if (std & V4L2_STD_SECAM) reg |= 0xE0; else return -EINVAL; adv7183_write(sd, ADV7183_IN_CTRL, reg); decoder->std = std; return 0; } static int adv7183_reset(struct v4l2_subdev *sd, u32 val) { int reg; reg = adv7183_read(sd, ADV7183_POW_MANAGE) | 0x80; adv7183_write(sd, ADV7183_POW_MANAGE, reg); /* wait 5ms before any further i2c writes are performed */ usleep_range(5000, 10000); return 0; } static int adv7183_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct adv7183 *decoder = to_adv7183(sd); int reg; if ((input > ADV7183_COMPONENT1) || (output > ADV7183_16BIT_OUT)) return -EINVAL; if (input != decoder->input) { decoder->input = input; reg = adv7183_read(sd, ADV7183_IN_CTRL) & 0xF0; switch (input) { case ADV7183_COMPOSITE1: reg |= 0x1; break; case ADV7183_COMPOSITE2: reg |= 0x2; break; case ADV7183_COMPOSITE3: reg |= 0x3; break; case ADV7183_COMPOSITE4: reg |= 0x4; break; case ADV7183_COMPOSITE5: reg |= 0x5; break; case ADV7183_COMPOSITE6: reg |= 0xB; break; case ADV7183_COMPOSITE7: reg |= 0xC; break; case ADV7183_COMPOSITE8: reg |= 0xD; break; case ADV7183_COMPOSITE9: reg |= 0xE; break; case ADV7183_COMPOSITE10: reg |= 0xF; break; case ADV7183_SVIDEO0: reg |= 0x6; break; case ADV7183_SVIDEO1: reg |= 0x7; break; case ADV7183_SVIDEO2: reg |= 0x8; break; case ADV7183_COMPONENT0: reg |= 0x9; break; case ADV7183_COMPONENT1: reg |= 0xA; break; default: break; } adv7183_write(sd, ADV7183_IN_CTRL, reg); } if (output != decoder->output) { decoder->output = output; reg = adv7183_read(sd, ADV7183_OUT_CTRL) & 0xC0; switch (output) { case ADV7183_16BIT_OUT: reg |= 0x9; break; default: reg |= 0xC; break; } adv7183_write(sd, ADV7183_OUT_CTRL, reg); } return 0; } static int adv7183_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); int val = ctrl->val; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: if (val < 0) val = 127 - val; adv7183_write(sd, ADV7183_BRIGHTNESS, val); break; case V4L2_CID_CONTRAST: adv7183_write(sd, ADV7183_CONTRAST, val); break; case V4L2_CID_SATURATION: adv7183_write(sd, ADV7183_SD_SATURATION_CB, val >> 8); adv7183_write(sd, ADV7183_SD_SATURATION_CR, (val & 0xFF)); break; case V4L2_CID_HUE: adv7183_write(sd, ADV7183_SD_OFFSET_CB, val >> 8); adv7183_write(sd, ADV7183_SD_OFFSET_CR, (val & 0xFF)); break; default: return -EINVAL; } return 0; } static int adv7183_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) { struct adv7183 *decoder = to_adv7183(sd); int reg; /* enable autodetection block */ reg = adv7183_read(sd, ADV7183_IN_CTRL) & 0xF; adv7183_write(sd, ADV7183_IN_CTRL, reg); /* wait autodetection switch */ mdelay(10); /* get autodetection result */ reg = adv7183_read(sd, ADV7183_STATUS_1); switch ((reg >> 0x4) & 0x7) { case 0: *std &= V4L2_STD_NTSC; break; case 1: *std &= V4L2_STD_NTSC_443; break; case 2: *std &= V4L2_STD_PAL_M; break; case 3: *std &= V4L2_STD_PAL_60; break; case 4: *std &= V4L2_STD_PAL; break; case 5: *std &= V4L2_STD_SECAM; break; case 6: *std &= V4L2_STD_PAL_Nc; break; case 7: *std &= V4L2_STD_SECAM; break; default: *std = V4L2_STD_UNKNOWN; break; } /* after std detection, write back user set std */ adv7183_s_std(sd, decoder->std); return 0; } static int adv7183_g_input_status(struct v4l2_subdev *sd, u32 *status) { int reg; *status = V4L2_IN_ST_NO_SIGNAL; reg = adv7183_read(sd, ADV7183_STATUS_1); if (reg < 0) return reg; if (reg & 0x1) *status = 0; return 0; } static int adv7183_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index, enum v4l2_mbus_pixelcode *code) { if (index > 0) return -EINVAL; *code = V4L2_MBUS_FMT_UYVY8_2X8; return 0; } static int adv7183_try_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) { struct adv7183 *decoder = to_adv7183(sd); fmt->code = V4L2_MBUS_FMT_UYVY8_2X8; fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; if (decoder->std & V4L2_STD_525_60) { fmt->field = V4L2_FIELD_SEQ_TB; fmt->width = 720; fmt->height = 480; } else { fmt->field = V4L2_FIELD_SEQ_BT; fmt->width = 720; fmt->height = 576; } return 0; } static int adv7183_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) { struct adv7183 *decoder = to_adv7183(sd); adv7183_try_mbus_fmt(sd, fmt); decoder->fmt = *fmt; return 0; } static int adv7183_g_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) { struct adv7183 *decoder = to_adv7183(sd); *fmt = decoder->fmt; return 0; } static int adv7183_s_stream(struct v4l2_subdev *sd, int enable) { struct adv7183 *decoder = to_adv7183(sd); if (enable) gpio_set_value(decoder->oe_pin, 0); else gpio_set_value(decoder->oe_pin, 1); udelay(1); return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int adv7183_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { reg->val = adv7183_read(sd, reg->reg & 0xff); reg->size = 1; return 0; } static int adv7183_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { adv7183_write(sd, reg->reg & 0xff, reg->val & 0xff); return 0; } #endif static const struct v4l2_ctrl_ops adv7183_ctrl_ops = { .s_ctrl = adv7183_s_ctrl, }; static const struct v4l2_subdev_core_ops adv7183_core_ops = { .log_status = adv7183_log_status, .g_std = adv7183_g_std, .s_std = adv7183_s_std, .reset = adv7183_reset, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = adv7183_g_register, .s_register = adv7183_s_register, #endif }; static const struct v4l2_subdev_video_ops adv7183_video_ops = { .s_routing = adv7183_s_routing, .querystd = adv7183_querystd, .g_input_status = adv7183_g_input_status, .enum_mbus_fmt = adv7183_enum_mbus_fmt, .try_mbus_fmt = adv7183_try_mbus_fmt, .s_mbus_fmt = adv7183_s_mbus_fmt, .g_mbus_fmt = adv7183_g_mbus_fmt, .s_stream = adv7183_s_stream, }; static const struct v4l2_subdev_ops adv7183_ops = { .core = &adv7183_core_ops, .video = &adv7183_video_ops, }; static int adv7183_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adv7183 *decoder; struct v4l2_subdev *sd; struct v4l2_ctrl_handler *hdl; int ret; struct v4l2_mbus_framefmt fmt; const unsigned *pin_array; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%02x (%s)\n", client->addr << 1, client->adapter->name); pin_array = client->dev.platform_data; if (pin_array == NULL) return -EINVAL; decoder = devm_kzalloc(&client->dev, sizeof(*decoder), GFP_KERNEL); if (decoder == NULL) return -ENOMEM; decoder->reset_pin = pin_array[0]; decoder->oe_pin = pin_array[1]; if (devm_gpio_request_one(&client->dev, decoder->reset_pin, GPIOF_OUT_INIT_LOW, "ADV7183 Reset")) { v4l_err(client, "failed to request GPIO %d\n", decoder->reset_pin); return -EBUSY; } if (devm_gpio_request_one(&client->dev, decoder->oe_pin, GPIOF_OUT_INIT_HIGH, "ADV7183 Output Enable")) { v4l_err(client, "failed to request GPIO %d\n", decoder->oe_pin); return -EBUSY; } sd = &decoder->sd; v4l2_i2c_subdev_init(sd, client, &adv7183_ops); hdl = &decoder->hdl; v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &adv7183_ctrl_ops, V4L2_CID_BRIGHTNESS, -128, 127, 1, 0); v4l2_ctrl_new_std(hdl, &adv7183_ctrl_ops, V4L2_CID_CONTRAST, 0, 0xFF, 1, 0x80); v4l2_ctrl_new_std(hdl, &adv7183_ctrl_ops, V4L2_CID_SATURATION, 0, 0xFFFF, 1, 0x8080); v4l2_ctrl_new_std(hdl, &adv7183_ctrl_ops, V4L2_CID_HUE, 0, 0xFFFF, 1, 0x8080); /* hook the control handler into the driver */ sd->ctrl_handler = hdl; if (hdl->error) { ret = hdl->error; v4l2_ctrl_handler_free(hdl); return ret; } /* v4l2 doesn't support an autodetect standard, pick PAL as default */ decoder->std = V4L2_STD_PAL; decoder->input = ADV7183_COMPOSITE4; decoder->output = ADV7183_8BIT_OUT; /* reset chip */ /* reset pulse width at least 5ms */ mdelay(10); gpio_set_value(decoder->reset_pin, 1); /* wait 5ms before any further i2c writes are performed */ mdelay(5); adv7183_writeregs(sd, adv7183_init_regs, ARRAY_SIZE(adv7183_init_regs)); adv7183_s_std(sd, decoder->std); fmt.width = 720; fmt.height = 576; adv7183_s_mbus_fmt(sd, &fmt); /* initialize the hardware to the default control values */ ret = v4l2_ctrl_handler_setup(hdl); if (ret) { v4l2_ctrl_handler_free(hdl); return ret; } return 0; } static int adv7183_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(sd->ctrl_handler); return 0; } static const struct i2c_device_id adv7183_id[] = { {"adv7183", 0}, {}, }; MODULE_DEVICE_TABLE(i2c, adv7183_id); static struct i2c_driver adv7183_driver = { .driver = { .owner = THIS_MODULE, .name = "adv7183", }, .probe = adv7183_probe, .remove = adv7183_remove, .id_table = adv7183_id, }; module_i2c_driver(adv7183_driver); MODULE_DESCRIPTION("Analog Devices ADV7183 video decoder driver"); MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
kozmikkick/Endeavor3.1.10
lib/atomic64_test.c
665
3564
/* * Testsuite for atomic64_t functions * * Copyright © 2010 Luca Barbieri * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/atomic.h> #define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) static __init int test_atomic64(void) { long long v0 = 0xaaa31337c001d00dLL; long long v1 = 0xdeadbeefdeafcafeLL; long long v2 = 0xfaceabadf00df001LL; long long onestwos = 0x1111111122222222LL; long long one = 1LL; atomic64_t v = ATOMIC64_INIT(v0); long long r = v0; BUG_ON(v.counter != r); atomic64_set(&v, v1); r = v1; BUG_ON(v.counter != r); BUG_ON(atomic64_read(&v) != r); INIT(v0); atomic64_add(onestwos, &v); r += onestwos; BUG_ON(v.counter != r); INIT(v0); atomic64_add(-one, &v); r += -one; BUG_ON(v.counter != r); INIT(v0); r += onestwos; BUG_ON(atomic64_add_return(onestwos, &v) != r); BUG_ON(v.counter != r); INIT(v0); r += -one; BUG_ON(atomic64_add_return(-one, &v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_sub(onestwos, &v); r -= onestwos; BUG_ON(v.counter != r); INIT(v0); atomic64_sub(-one, &v); r -= -one; BUG_ON(v.counter != r); INIT(v0); r -= onestwos; BUG_ON(atomic64_sub_return(onestwos, &v) != r); BUG_ON(v.counter != r); INIT(v0); r -= -one; BUG_ON(atomic64_sub_return(-one, &v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_inc(&v); r += one; BUG_ON(v.counter != r); INIT(v0); r += one; BUG_ON(atomic64_inc_return(&v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_dec(&v); r -= one; BUG_ON(v.counter != r); INIT(v0); r -= one; BUG_ON(atomic64_dec_return(&v) != r); BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_xchg(&v, v1) != v0); r = v1; BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0); r = v1; BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0); BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_add_unless(&v, one, v0)); BUG_ON(v.counter != r); INIT(v0); BUG_ON(!atomic64_add_unless(&v, one, v1)); r += one; BUG_ON(v.counter != r); #if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \ defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) || defined(CONFIG_ARM) INIT(onestwos); BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); r -= one; BUG_ON(v.counter != r); INIT(0); BUG_ON(atomic64_dec_if_positive(&v) != -one); BUG_ON(v.counter != r); INIT(-one); BUG_ON(atomic64_dec_if_positive(&v) != (-one - one)); BUG_ON(v.counter != r); #else #warning Please implement atomic64_dec_if_positive for your architecture, and add it to the IF above #endif INIT(onestwos); BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); INIT(0); BUG_ON(atomic64_inc_not_zero(&v)); BUG_ON(v.counter != r); INIT(-one); BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); #ifdef CONFIG_X86 printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n", #ifdef CONFIG_X86_64 "x86-64", #elif defined(CONFIG_X86_CMPXCHG64) "i586+", #else "i386+", #endif boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without", boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without"); #else printk(KERN_INFO "atomic64 test passed\n"); #endif return 0; } core_initcall(test_atomic64);
gpl-2.0
aospan/linux-stable-netup-universal-dvb-1.4
lib/glob.c
1689
7839
#include <linux/module.h> #include <linux/glob.h> /* * The only reason this code can be compiled as a module is because the * ATA code that depends on it can be as well. In practice, they're * both usually compiled in and the module overhead goes away. */ MODULE_DESCRIPTION("glob(7) matching"); MODULE_LICENSE("Dual MIT/GPL"); /** * glob_match - Shell-style pattern matching, like !fnmatch(pat, str, 0) * @pat: Shell-style pattern to match, e.g. "*.[ch]". * @str: String to match. The pattern must match the entire string. * * Perform shell-style glob matching, returning true (1) if the match * succeeds, or false (0) if it fails. Equivalent to !fnmatch(@pat, @str, 0). * * Pattern metacharacters are ?, *, [ and \. * (And, inside character classes, !, - and ].) * * This is small and simple implementation intended for device blacklists * where a string is matched against a number of patterns. Thus, it * does not preprocess the patterns. It is non-recursive, and run-time * is at most quadratic: strlen(@str)*strlen(@pat). * * An example of the worst case is glob_match("*aaaaa", "aaaaaaaaaa"); * it takes 6 passes over the pattern before matching the string. * * Like !fnmatch(@pat, @str, 0) and unlike the shell, this does NOT * treat / or leading . specially; it isn't actually used for pathnames. * * Note that according to glob(7) (and unlike bash), character classes * are complemented by a leading !; this does not support the regex-style * [^a-z] syntax. * * An opening bracket without a matching close is matched literally. */ bool __pure glob_match(char const *pat, char const *str) { /* * Backtrack to previous * on mismatch and retry starting one * character later in the string. Because * matches all characters * (no exception for /), it can be easily proved that there's * never a need to backtrack multiple levels. */ char const *back_pat = NULL, *back_str = back_str; /* * Loop over each token (character or class) in pat, matching * it against the remaining unmatched tail of str. Return false * on mismatch, or true after matching the trailing nul bytes. */ for (;;) { unsigned char c = *str++; unsigned char d = *pat++; switch (d) { case '?': /* Wildcard: anything but nul */ if (c == '\0') return false; break; case '*': /* Any-length wildcard */ if (*pat == '\0') /* Optimize trailing * case */ return true; back_pat = pat; back_str = --str; /* Allow zero-length match */ break; case '[': { /* Character class */ bool match = false, inverted = (*pat == '!'); char const *class = pat + inverted; unsigned char a = *class++; /* * Iterate over each span in the character class. * A span is either a single character a, or a * range a-b. The first span may begin with ']'. */ do { unsigned char b = a; if (a == '\0') /* Malformed */ goto literal; if (class[0] == '-' && class[1] != ']') { b = class[1]; if (b == '\0') goto literal; class += 2; /* Any special action if a > b? */ } match |= (a <= c && c <= b); } while ((a = *class++) != ']'); if (match == inverted) goto backtrack; pat = class; } break; case '\\': d = *pat++; /*FALLTHROUGH*/ default: /* Literal character */ literal: if (c == d) { if (d == '\0') return true; break; } backtrack: if (c == '\0' || !back_pat) return false; /* No point continuing */ /* Try again from last *, one character later in str. */ pat = back_pat; str = ++back_str; break; } } } EXPORT_SYMBOL(glob_match); #ifdef CONFIG_GLOB_SELFTEST #include <linux/printk.h> #include <linux/moduleparam.h> /* Boot with "glob.verbose=1" to show successful tests, too */ static bool verbose = false; module_param(verbose, bool, 0); struct glob_test { char const *pat, *str; bool expected; }; static bool __pure __init test(char const *pat, char const *str, bool expected) { bool match = glob_match(pat, str); bool success = match == expected; /* Can't get string literals into a particular section, so... */ static char const msg_error[] __initconst = KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n"; static char const msg_ok[] __initconst = KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n"; static char const mismatch[] __initconst = "mismatch"; char const *message; if (!success) message = msg_error; else if (verbose) message = msg_ok; else return success; printk(message, pat, str, mismatch + 3*match); return success; } /* * The tests are all jammed together in one array to make it simpler * to place that array in the .init.rodata section. The obvious * "array of structures containing char *" has no way to force the * pointed-to strings to be in a particular section. * * Anyway, a test consists of: * 1. Expected glob_match result: '1' or '0'. * 2. Pattern to match: null-terminated string * 3. String to match against: null-terminated string * * The list of tests is terminated with a final '\0' instead of * a glob_match result character. */ static char const glob_tests[] __initconst = /* Some basic tests */ "1" "a\0" "a\0" "0" "a\0" "b\0" "0" "a\0" "aa\0" "0" "a\0" "\0" "1" "\0" "\0" "0" "\0" "a\0" /* Simple character class tests */ "1" "[a]\0" "a\0" "0" "[a]\0" "b\0" "0" "[!a]\0" "a\0" "1" "[!a]\0" "b\0" "1" "[ab]\0" "a\0" "1" "[ab]\0" "b\0" "0" "[ab]\0" "c\0" "1" "[!ab]\0" "c\0" "1" "[a-c]\0" "b\0" "0" "[a-c]\0" "d\0" /* Corner cases in character class parsing */ "1" "[a-c-e-g]\0" "-\0" "0" "[a-c-e-g]\0" "d\0" "1" "[a-c-e-g]\0" "f\0" "1" "[]a-ceg-ik[]\0" "a\0" "1" "[]a-ceg-ik[]\0" "]\0" "1" "[]a-ceg-ik[]\0" "[\0" "1" "[]a-ceg-ik[]\0" "h\0" "0" "[]a-ceg-ik[]\0" "f\0" "0" "[!]a-ceg-ik[]\0" "h\0" "0" "[!]a-ceg-ik[]\0" "]\0" "1" "[!]a-ceg-ik[]\0" "f\0" /* Simple wild cards */ "1" "?\0" "a\0" "0" "?\0" "aa\0" "0" "??\0" "a\0" "1" "?x?\0" "axb\0" "0" "?x?\0" "abx\0" "0" "?x?\0" "xab\0" /* Asterisk wild cards (backtracking) */ "0" "*??\0" "a\0" "1" "*??\0" "ab\0" "1" "*??\0" "abc\0" "1" "*??\0" "abcd\0" "0" "??*\0" "a\0" "1" "??*\0" "ab\0" "1" "??*\0" "abc\0" "1" "??*\0" "abcd\0" "0" "?*?\0" "a\0" "1" "?*?\0" "ab\0" "1" "?*?\0" "abc\0" "1" "?*?\0" "abcd\0" "1" "*b\0" "b\0" "1" "*b\0" "ab\0" "0" "*b\0" "ba\0" "1" "*b\0" "bb\0" "1" "*b\0" "abb\0" "1" "*b\0" "bab\0" "1" "*bc\0" "abbc\0" "1" "*bc\0" "bc\0" "1" "*bc\0" "bbc\0" "1" "*bc\0" "bcbc\0" /* Multiple asterisks (complex backtracking) */ "1" "*ac*\0" "abacadaeafag\0" "1" "*ac*ae*ag*\0" "abacadaeafag\0" "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0" "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0" "1" "*abcd*\0" "abcabcabcabcdefg\0" "1" "*ab*cd*\0" "abcabcabcabcdefg\0" "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0" "0" "*abcd*\0" "abcabcabcabcefg\0" "0" "*ab*cd*\0" "abcabcabcabcefg\0"; static int __init glob_init(void) { unsigned successes = 0; unsigned n = 0; char const *p = glob_tests; static char const message[] __initconst = KERN_INFO "glob: %u self-tests passed, %u failed\n"; /* * Tests are jammed together in a string. The first byte is '1' * or '0' to indicate the expected outcome, or '\0' to indicate the * end of the tests. Then come two null-terminated strings: the * pattern and the string to match it against. */ while (*p) { bool expected = *p++ & 1; char const *pat = p; p += strlen(p) + 1; successes += test(pat, p, expected); p += strlen(p) + 1; n++; } n -= successes; printk(message, successes, n); /* What's the errno for "kernel bug detected"? Guess... */ return n ? -ECANCELED : 0; } /* We need a dummy exit function to allow unload */ static void __exit glob_fini(void) { } module_init(glob_init); module_exit(glob_fini); #endif /* CONFIG_GLOB_SELFTEST */
gpl-2.0
phalf/kernel_mint2g
drivers/gpu/drm/nouveau/nv50_graph.c
1945
30983
/* * Copyright (C) 2007 Ben Skeggs. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" #include "nouveau_ramht.h" #include "nouveau_grctx.h" #include "nouveau_dma.h" #include "nouveau_vm.h" #include "nouveau_ramht.h" #include "nv50_evo.h" struct nv50_graph_engine { struct nouveau_exec_engine base; u32 ctxprog[512]; u32 ctxprog_size; u32 grctx_size; }; static void nv50_graph_fifo_access(struct drm_device *dev, bool enabled) { const uint32_t mask = 0x00010001; if (enabled) nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask); else nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask); } static struct nouveau_channel * nv50_graph_channel(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t inst; int i; /* Be sure we're not in the middle of a context switch or bad things * will happen, such as unloading the wrong pgraph context. */ if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) NV_ERROR(dev, "Ctxprog is still running\n"); inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) return NULL; inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; for (i = 0; i < dev_priv->engine.fifo.channels; i++) { struct nouveau_channel *chan = dev_priv->channels.ptr[i]; if (chan && chan->ramin && chan->ramin->vinst == inst) return chan; } return NULL; } static int nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) { uint32_t fifo = nv_rd32(dev, 0x400500); nv_wr32(dev, 0x400500, fifo & ~1); nv_wr32(dev, 0x400784, inst); nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40); nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11); nv_wr32(dev, 0x400040, 0xffffffff); (void)nv_rd32(dev, 0x400040); nv_wr32(dev, 0x400040, 0x00000000); nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1); if (nouveau_wait_for_idle(dev)) nv_wr32(dev, 0x40032c, inst | (1<<31)); nv_wr32(dev, 0x400500, fifo); return 0; } static int nv50_graph_unload_context(struct drm_device *dev) { uint32_t inst; inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) return 0; inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; nouveau_wait_for_idle(dev); nv_wr32(dev, 0x400784, inst); nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); nouveau_wait_for_idle(dev); nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); return 0; } static void nv50_graph_init_reset(struct drm_device *dev) { uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21); NV_DEBUG(dev, "\n"); nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e); } static void nv50_graph_init_intr(struct drm_device *dev) { NV_DEBUG(dev, "\n"); nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); nv_wr32(dev, 0x400138, 0xffffffff); nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); } static void nv50_graph_init_regs__nv(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t units = nv_rd32(dev, 0x1540); int i; NV_DEBUG(dev, "\n"); nv_wr32(dev, 0x400804, 0xc0000000); nv_wr32(dev, 0x406800, 0xc0000000); nv_wr32(dev, 0x400c04, 0xc0000000); nv_wr32(dev, 0x401800, 0xc0000000); nv_wr32(dev, 0x405018, 0xc0000000); nv_wr32(dev, 0x402000, 0xc0000000); for (i = 0; i < 16; i++) { if (units & 1 << i) { if (dev_priv->chipset < 0xa0) { nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000); nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000); nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000); } else { nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000); nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000); nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000); } } } nv_wr32(dev, 0x400108, 0xffffffff); nv_wr32(dev, 0x400824, 0x00004000); nv_wr32(dev, 0x400500, 0x00010001); } static void nv50_graph_init_zcull(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; int i; NV_DEBUG(dev, "\n"); switch (dev_priv->chipset & 0xf0) { case 0x50: case 0x80: case 0x90: nv_wr32(dev, 0x402ca8, 0x00000800); break; case 0xa0: default: nv_wr32(dev, 0x402cc0, 0x00000000); if (dev_priv->chipset == 0xa0 || dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) { nv_wr32(dev, 0x402ca8, 0x00000802); } else { nv_wr32(dev, 0x402cc0, 0x00000000); nv_wr32(dev, 0x402ca8, 0x00000002); } break; } /* zero out zcull regions */ for (i = 0; i < 8; i++) { nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000); nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000); nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000); nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000); } } static int nv50_graph_init_ctxctl(struct drm_device *dev) { struct nv50_graph_engine *pgraph = nv_engine(dev, NVOBJ_ENGINE_GR); int i; NV_DEBUG(dev, "\n"); nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); for (i = 0; i < pgraph->ctxprog_size; i++) nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, pgraph->ctxprog[i]); nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ nv_wr32(dev, 0x400320, 4); nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0); return 0; } static int nv50_graph_init(struct drm_device *dev, int engine) { int ret; NV_DEBUG(dev, "\n"); nv50_graph_init_reset(dev); nv50_graph_init_regs__nv(dev); nv50_graph_init_zcull(dev); ret = nv50_graph_init_ctxctl(dev); if (ret) return ret; nv50_graph_init_intr(dev); return 0; } static int nv50_graph_fini(struct drm_device *dev, int engine) { NV_DEBUG(dev, "\n"); nv50_graph_unload_context(dev); nv_wr32(dev, 0x40013c, 0x00000000); return 0; } static int nv50_graph_context_new(struct nouveau_channel *chan, int engine) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *ramin = chan->ramin; struct nouveau_gpuobj *grctx = NULL; struct nv50_graph_engine *pgraph = nv_engine(dev, engine); struct nouveau_grctx ctx = {}; int hdr, ret; NV_DEBUG(dev, "ch%d\n", chan->id); ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, &grctx); if (ret) return ret; hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; nv_wo32(ramin, hdr + 0x00, 0x00190002); nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1); nv_wo32(ramin, hdr + 0x08, grctx->vinst); nv_wo32(ramin, hdr + 0x0c, 0); nv_wo32(ramin, hdr + 0x10, 0); nv_wo32(ramin, hdr + 0x14, 0x00010000); ctx.dev = chan->dev; ctx.mode = NOUVEAU_GRCTX_VALS; ctx.data = grctx; nv50_grctx_init(&ctx); nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12); dev_priv->engine.instmem.flush(dev); atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]); chan->engctx[NVOBJ_ENGINE_GR] = grctx; return 0; } static void nv50_graph_context_del(struct nouveau_channel *chan, int engine) { struct nouveau_gpuobj *grctx = chan->engctx[engine]; struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; unsigned long flags; NV_DEBUG(dev, "ch%d\n", chan->id); if (!chan->ramin) return; spin_lock_irqsave(&dev_priv->context_switch_lock, flags); pfifo->reassign(dev, false); nv50_graph_fifo_access(dev, false); if (nv50_graph_channel(dev) == chan) nv50_graph_unload_context(dev); for (i = hdr; i < hdr + 24; i += 4) nv_wo32(chan->ramin, i, 0); dev_priv->engine.instmem.flush(dev); nv50_graph_fifo_access(dev, true); pfifo->reassign(dev, true); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); nouveau_gpuobj_ref(NULL, &grctx); atomic_dec(&chan->vm->engref[engine]); chan->engctx[engine] = NULL; } static int nv50_graph_object_new(struct nouveau_channel *chan, int engine, u32 handle, u16 class) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *obj = NULL; int ret; ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); if (ret) return ret; obj->engine = 1; obj->class = class; nv_wo32(obj, 0x00, class); nv_wo32(obj, 0x04, 0x00000000); nv_wo32(obj, 0x08, 0x00000000); nv_wo32(obj, 0x0c, 0x00000000); dev_priv->engine.instmem.flush(dev); ret = nouveau_ramht_insert(chan, handle, obj); nouveau_gpuobj_ref(NULL, &obj); return ret; } static void nv50_graph_context_switch(struct drm_device *dev) { uint32_t inst; nv50_graph_unload_context(dev); inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT); inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE; nv50_graph_do_load_context(dev, inst); nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH); } static int nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) { struct nouveau_gpuobj *gpuobj; gpuobj = nouveau_ramht_find(chan, data); if (!gpuobj) return -ENOENT; if (nouveau_notifier_offset(gpuobj, NULL)) return -EINVAL; chan->nvsw.vblsem = gpuobj; chan->nvsw.vblsem_offset = ~0; return 0; } static int nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) { if (nouveau_notifier_offset(chan->nvsw.vblsem, &data)) return -ERANGE; chan->nvsw.vblsem_offset = data >> 2; return 0; } static int nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) { chan->nvsw.vblsem_rval = data; return 0; } static int nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1) return -EINVAL; drm_vblank_get(dev, data); chan->nvsw.vblsem_head = data; list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting); return 0; } static int nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) { nouveau_finish_page_flip(chan, NULL); return 0; } static void nv50_graph_tlb_flush(struct drm_device *dev, int engine) { nv50_vm_flush_engine(dev, 0); } static void nv84_graph_tlb_flush(struct drm_device *dev, int engine) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; bool idle, timeout = false; unsigned long flags; u64 start; u32 tmp; spin_lock_irqsave(&dev_priv->context_switch_lock, flags); nv_mask(dev, 0x400500, 0x00000001, 0x00000000); start = ptimer->read(dev); do { idle = true; for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) { if ((tmp & 7) == 1) idle = false; } for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) { if ((tmp & 7) == 1) idle = false; } for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) { if ((tmp & 7) == 1) idle = false; } } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000)); if (timeout) { NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: " "0x%08x 0x%08x 0x%08x 0x%08x\n", nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380), nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); } nv50_vm_flush_engine(dev, 0); nv_mask(dev, 0x400500, 0x00000001, 0x00000001); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); } static struct nouveau_enum nv50_mp_exec_error_names[] = { { 3, "STACK_UNDERFLOW", NULL }, { 4, "QUADON_ACTIVE", NULL }, { 8, "TIMEOUT", NULL }, { 0x10, "INVALID_OPCODE", NULL }, { 0x40, "BREAKPOINT", NULL }, {} }; static struct nouveau_bitfield nv50_graph_trap_m2mf[] = { { 0x00000001, "NOTIFY" }, { 0x00000002, "IN" }, { 0x00000004, "OUT" }, {} }; static struct nouveau_bitfield nv50_graph_trap_vfetch[] = { { 0x00000001, "FAULT" }, {} }; static struct nouveau_bitfield nv50_graph_trap_strmout[] = { { 0x00000001, "FAULT" }, {} }; static struct nouveau_bitfield nv50_graph_trap_ccache[] = { { 0x00000001, "FAULT" }, {} }; /* There must be a *lot* of these. Will take some time to gather them up. */ struct nouveau_enum nv50_data_error_names[] = { { 0x00000003, "INVALID_QUERY_OR_TEXTURE", NULL }, { 0x00000004, "INVALID_VALUE", NULL }, { 0x00000005, "INVALID_ENUM", NULL }, { 0x00000008, "INVALID_OBJECT", NULL }, { 0x00000009, "READ_ONLY_OBJECT", NULL }, { 0x0000000a, "SUPERVISOR_OBJECT", NULL }, { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL }, { 0x0000000c, "INVALID_BITFIELD", NULL }, { 0x0000000d, "BEGIN_END_ACTIVE", NULL }, { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL }, { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL }, { 0x00000010, "RT_DOUBLE_BIND", NULL }, { 0x00000011, "RT_TYPES_MISMATCH", NULL }, { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL }, { 0x00000015, "FP_TOO_FEW_REGS", NULL }, { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL }, { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL }, { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL }, { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL }, { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL }, { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL }, { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL }, { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL }, { 0x0000001f, "RT_BPP128_WITH_MS8", NULL }, { 0x00000021, "Z_OUT_OF_BOUNDS", NULL }, { 0x00000023, "XY_OUT_OF_BOUNDS", NULL }, { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL }, { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL }, { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL }, { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL }, { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL }, { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL }, { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL }, { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL }, { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL }, { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL }, { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL }, { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL }, { 0x00000046, "LAYER_ID_NEEDS_GP", NULL }, { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL }, { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL }, {} }; static struct nouveau_bitfield nv50_graph_intr[] = { { 0x00000001, "NOTIFY" }, { 0x00000002, "COMPUTE_QUERY" }, { 0x00000010, "ILLEGAL_MTHD" }, { 0x00000020, "ILLEGAL_CLASS" }, { 0x00000040, "DOUBLE_NOTIFY" }, { 0x00001000, "CONTEXT_SWITCH" }, { 0x00010000, "BUFFER_NOTIFY" }, { 0x00100000, "DATA_ERROR" }, { 0x00200000, "TRAP" }, { 0x01000000, "SINGLE_STEP" }, {} }; static void nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t units = nv_rd32(dev, 0x1540); uint32_t addr, mp10, status, pc, oplow, ophigh; int i; int mps = 0; for (i = 0; i < 4; i++) { if (!(units & 1 << (i+24))) continue; if (dev_priv->chipset < 0xa0) addr = 0x408200 + (tpid << 12) + (i << 7); else addr = 0x408100 + (tpid << 11) + (i << 7); mp10 = nv_rd32(dev, addr + 0x10); status = nv_rd32(dev, addr + 0x14); if (!status) continue; if (display) { nv_rd32(dev, addr + 0x20); pc = nv_rd32(dev, addr + 0x24); oplow = nv_rd32(dev, addr + 0x70); ophigh = nv_rd32(dev, addr + 0x74); NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " "TP %d MP %d: ", tpid, i); nouveau_enum_print(nv50_mp_exec_error_names, status); printk(" at %06x warp %d, opcode %08x %08x\n", pc&0xffffff, pc >> 24, oplow, ophigh); } nv_wr32(dev, addr + 0x10, mp10); nv_wr32(dev, addr + 0x14, 0); mps++; } if (!mps && display) NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " "No MPs claiming errors?\n", tpid); } static void nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, uint32_t ustatus_new, int display, const char *name) { struct drm_nouveau_private *dev_priv = dev->dev_private; int tps = 0; uint32_t units = nv_rd32(dev, 0x1540); int i, r; uint32_t ustatus_addr, ustatus; for (i = 0; i < 16; i++) { if (!(units & (1 << i))) continue; if (dev_priv->chipset < 0xa0) ustatus_addr = ustatus_old + (i << 12); else ustatus_addr = ustatus_new + (i << 11); ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; if (!ustatus) continue; tps++; switch (type) { case 6: /* texture error... unknown for now */ if (display) { NV_ERROR(dev, "magic set %d:\n", i); for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); } break; case 7: /* MP error */ if (ustatus & 0x00010000) { nv50_pgraph_mp_trap(dev, i, display); ustatus &= ~0x00010000; } break; case 8: /* TPDMA error */ { uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); /* 2d engine destination */ if (ustatus & 0x00000010) { if (display) { NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", i, e14, e10); NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", i, e0c, e18, e1c, e20, e24); } ustatus &= ~0x00000010; } /* Render target */ if (ustatus & 0x00000040) { if (display) { NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", i, e14, e10); NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", i, e0c, e18, e1c, e20, e24); } ustatus &= ~0x00000040; } /* CUDA memory: l[], g[] or stack. */ if (ustatus & 0x00000080) { if (display) { if (e18 & 0x80000000) { /* g[] read fault? */ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", i, e14, e10 | ((e18 >> 24) & 0x1f)); e18 &= ~0x1f000000; } else if (e18 & 0xc) { /* g[] write fault? */ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", i, e14, e10 | ((e18 >> 7) & 0x1f)); e18 &= ~0x00000f80; } else { NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", i, e14, e10); } NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", i, e0c, e18, e1c, e20, e24); } ustatus &= ~0x00000080; } } break; } if (ustatus) { if (display) NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); } nv_wr32(dev, ustatus_addr, 0xc0000000); } if (!tps && display) NV_INFO(dev, "%s - No TPs claiming errors?\n", name); } static int nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid) { u32 status = nv_rd32(dev, 0x400108); u32 ustatus; if (!status && display) { NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n"); return 1; } /* DISPATCH: Relays commands to other units and handles NOTIFY, * COND, QUERY. If you get a trap from it, the command is still stuck * in DISPATCH and you need to do something about it. */ if (status & 0x001) { ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; if (!ustatus && display) { NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); } nv_wr32(dev, 0x400500, 0x00000000); /* Known to be triggered by screwed up NOTIFY and COND... */ if (ustatus & 0x00000001) { u32 addr = nv_rd32(dev, 0x400808); u32 subc = (addr & 0x00070000) >> 16; u32 mthd = (addr & 0x00001ffc); u32 datal = nv_rd32(dev, 0x40080c); u32 datah = nv_rd32(dev, 0x400810); u32 class = nv_rd32(dev, 0x400814); u32 r848 = nv_rd32(dev, 0x400848); NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n"); if (display && (addr & 0x80000000)) { NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) " "subc %d class 0x%04x mthd 0x%04x " "data 0x%08x%08x " "400808 0x%08x 400848 0x%08x\n", chid, inst, subc, class, mthd, datah, datal, addr, r848); } else if (display) { NV_INFO(dev, "PGRAPH - no stuck command?\n"); } nv_wr32(dev, 0x400808, 0); nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); nv_wr32(dev, 0x400848, 0); ustatus &= ~0x00000001; } if (ustatus & 0x00000002) { u32 addr = nv_rd32(dev, 0x40084c); u32 subc = (addr & 0x00070000) >> 16; u32 mthd = (addr & 0x00001ffc); u32 data = nv_rd32(dev, 0x40085c); u32 class = nv_rd32(dev, 0x400814); NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n"); if (display && (addr & 0x80000000)) { NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) " "subc %d class 0x%04x mthd 0x%04x " "data 0x%08x 40084c 0x%08x\n", chid, inst, subc, class, mthd, data, addr); } else if (display) { NV_INFO(dev, "PGRAPH - no stuck command?\n"); } nv_wr32(dev, 0x40084c, 0); ustatus &= ~0x00000002; } if (ustatus && display) { NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown " "0x%08x)\n", ustatus); } nv_wr32(dev, 0x400804, 0xc0000000); nv_wr32(dev, 0x400108, 0x001); status &= ~0x001; if (!status) return 0; } /* M2MF: Memory to memory copy engine. */ if (status & 0x002) { u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; if (display) { NV_INFO(dev, "PGRAPH - TRAP_M2MF"); nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus); printk("\n"); NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n", nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808), nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810)); } /* No sane way found yet -- just reset the bugger. */ nv_wr32(dev, 0x400040, 2); nv_wr32(dev, 0x400040, 0); nv_wr32(dev, 0x406800, 0xc0000000); nv_wr32(dev, 0x400108, 0x002); status &= ~0x002; } /* VFETCH: Fetches data from vertex buffers. */ if (status & 0x004) { u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; if (display) { NV_INFO(dev, "PGRAPH - TRAP_VFETCH"); nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus); printk("\n"); NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n", nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08), nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10)); } nv_wr32(dev, 0x400c04, 0xc0000000); nv_wr32(dev, 0x400108, 0x004); status &= ~0x004; } /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ if (status & 0x008) { ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; if (display) { NV_INFO(dev, "PGRAPH - TRAP_STRMOUT"); nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus); printk("\n"); NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n", nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808), nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810)); } /* No sane way found yet -- just reset the bugger. */ nv_wr32(dev, 0x400040, 0x80); nv_wr32(dev, 0x400040, 0); nv_wr32(dev, 0x401800, 0xc0000000); nv_wr32(dev, 0x400108, 0x008); status &= ~0x008; } /* CCACHE: Handles code and c[] caches and fills them. */ if (status & 0x010) { ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; if (display) { NV_INFO(dev, "PGRAPH - TRAP_CCACHE"); nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus); printk("\n"); NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x" " %08x %08x %08x\n", nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004), nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c), nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014), nv_rd32(dev, 0x40501c)); } nv_wr32(dev, 0x405018, 0xc0000000); nv_wr32(dev, 0x400108, 0x010); status &= ~0x010; } /* Unknown, not seen yet... 0x402000 is the only trap status reg * remaining, so try to handle it anyway. Perhaps related to that * unknown DMA slot on tesla? */ if (status & 0x20) { ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; if (display) NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus); nv_wr32(dev, 0x402000, 0xc0000000); /* no status modifiction on purpose */ } /* TEXTURE: CUDA texturing units */ if (status & 0x040) { nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display, "PGRAPH - TRAP_TEXTURE"); nv_wr32(dev, 0x400108, 0x040); status &= ~0x040; } /* MP: CUDA execution engines. */ if (status & 0x080) { nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display, "PGRAPH - TRAP_MP"); nv_wr32(dev, 0x400108, 0x080); status &= ~0x080; } /* TPDMA: Handles TP-initiated uncached memory accesses: * l[], g[], stack, 2d surfaces, render targets. */ if (status & 0x100) { nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display, "PGRAPH - TRAP_TPDMA"); nv_wr32(dev, 0x400108, 0x100); status &= ~0x100; } if (status) { if (display) NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status); nv_wr32(dev, 0x400108, status); } return 1; } int nv50_graph_isr_chid(struct drm_device *dev, u64 inst) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan; unsigned long flags; int i; spin_lock_irqsave(&dev_priv->channels.lock, flags); for (i = 0; i < dev_priv->engine.fifo.channels; i++) { chan = dev_priv->channels.ptr[i]; if (!chan || !chan->ramin) continue; if (inst == chan->ramin->vinst) break; } spin_unlock_irqrestore(&dev_priv->channels.lock, flags); return i; } static void nv50_graph_isr(struct drm_device *dev) { u32 stat; while ((stat = nv_rd32(dev, 0x400100))) { u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12; u32 chid = nv50_graph_isr_chid(dev, inst); u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); u32 subc = (addr & 0x00070000) >> 16; u32 mthd = (addr & 0x00001ffc); u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); u32 class = nv_rd32(dev, 0x400814); u32 show = stat; if (stat & 0x00000010) { if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) show &= ~0x00000010; } if (stat & 0x00001000) { nv_wr32(dev, 0x400500, 0x00000000); nv_wr32(dev, 0x400100, 0x00001000); nv_mask(dev, 0x40013c, 0x00001000, 0x00000000); nv50_graph_context_switch(dev); stat &= ~0x00001000; show &= ~0x00001000; } show = (show && nouveau_ratelimit()) ? show : 0; if (show & 0x00100000) { u32 ecode = nv_rd32(dev, 0x400110); NV_INFO(dev, "PGRAPH - DATA_ERROR "); nouveau_enum_print(nv50_data_error_names, ecode); printk("\n"); } if (stat & 0x00200000) { if (!nv50_pgraph_trap_handler(dev, show, inst, chid)) show &= ~0x00200000; } nv_wr32(dev, 0x400100, stat); nv_wr32(dev, 0x400500, 0x00010001); if (show) { NV_INFO(dev, "PGRAPH -"); nouveau_bitfield_print(nv50_graph_intr, show); printk("\n"); NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d " "class 0x%04x mthd 0x%04x data 0x%08x\n", chid, inst, subc, class, mthd, data); nv50_fb_vm_trap(dev, 1); } } if (nv_rd32(dev, 0x400824) & (1 << 31)) nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); } static void nv50_graph_destroy(struct drm_device *dev, int engine) { struct nv50_graph_engine *pgraph = nv_engine(dev, engine); NVOBJ_ENGINE_DEL(dev, GR); nouveau_irq_unregister(dev, 12); kfree(pgraph); } int nv50_graph_create(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_graph_engine *pgraph; struct nouveau_grctx ctx = {}; int ret; pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL); if (!pgraph) return -ENOMEM; ctx.dev = dev; ctx.mode = NOUVEAU_GRCTX_PROG; ctx.data = pgraph->ctxprog; ctx.ctxprog_max = ARRAY_SIZE(pgraph->ctxprog); ret = nv50_grctx_init(&ctx); if (ret) { NV_ERROR(dev, "PGRAPH: ctxprog build failed\n"); kfree(pgraph); return 0; } pgraph->grctx_size = ctx.ctxvals_pos * 4; pgraph->ctxprog_size = ctx.ctxprog_len; pgraph->base.destroy = nv50_graph_destroy; pgraph->base.init = nv50_graph_init; pgraph->base.fini = nv50_graph_fini; pgraph->base.context_new = nv50_graph_context_new; pgraph->base.context_del = nv50_graph_context_del; pgraph->base.object_new = nv50_graph_object_new; if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac) pgraph->base.tlb_flush = nv50_graph_tlb_flush; else pgraph->base.tlb_flush = nv84_graph_tlb_flush; nouveau_irq_register(dev, 12, nv50_graph_isr); /* NVSW really doesn't live here... */ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem); NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset); NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val); NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release); NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip); NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); NVOBJ_CLASS(dev, 0x0030, GR); /* null */ NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */ NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */ /* tesla */ if (dev_priv->chipset == 0x50) NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */ else if (dev_priv->chipset < 0xa0) NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */ else { switch (dev_priv->chipset) { case 0xa0: case 0xaa: case 0xac: NVOBJ_CLASS(dev, 0x8397, GR); break; case 0xa3: case 0xa5: case 0xa8: NVOBJ_CLASS(dev, 0x8597, GR); break; case 0xaf: NVOBJ_CLASS(dev, 0x8697, GR); break; } } /* compute */ NVOBJ_CLASS(dev, 0x50c0, GR); if (dev_priv->chipset > 0xa0 && dev_priv->chipset != 0xaa && dev_priv->chipset != 0xac) NVOBJ_CLASS(dev, 0x85c0, GR); return 0; }
gpl-2.0
guh/linux-imx6-3.14-tune
drivers/video/sysimgblt.c
2713
6936
/* * Generic 1-bit or 8-bit source to 1-32 bit destination expansion * for frame buffer located in system RAM with packed pixels of any depth. * * Based almost entirely on cfbimgblt.c * * Copyright (C) April 2007 Antonino Daplas <adaplas@pol.net> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/string.h> #include <linux/fb.h> #include <asm/types.h> #define DEBUG #ifdef DEBUG #define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt,__func__,## args) #else #define DPRINTK(fmt, args...) #endif static const u32 cfb_tab8_be[] = { 0x00000000,0x000000ff,0x0000ff00,0x0000ffff, 0x00ff0000,0x00ff00ff,0x00ffff00,0x00ffffff, 0xff000000,0xff0000ff,0xff00ff00,0xff00ffff, 0xffff0000,0xffff00ff,0xffffff00,0xffffffff }; static const u32 cfb_tab8_le[] = { 0x00000000,0xff000000,0x00ff0000,0xffff0000, 0x0000ff00,0xff00ff00,0x00ffff00,0xffffff00, 0x000000ff,0xff0000ff,0x00ff00ff,0xffff00ff, 0x0000ffff,0xff00ffff,0x00ffffff,0xffffffff }; static const u32 cfb_tab16_be[] = { 0x00000000, 0x0000ffff, 0xffff0000, 0xffffffff }; static const u32 cfb_tab16_le[] = { 0x00000000, 0xffff0000, 0x0000ffff, 0xffffffff }; static const u32 cfb_tab32[] = { 0x00000000, 0xffffffff }; static void color_imageblit(const struct fb_image *image, struct fb_info *p, void *dst1, u32 start_index, u32 pitch_index) { /* Draw the penguin */ u32 *dst, *dst2; u32 color = 0, val, shift; int i, n, bpp = p->var.bits_per_pixel; u32 null_bits = 32 - bpp; u32 *palette = (u32 *) p->pseudo_palette; const u8 *src = image->data; dst2 = dst1; for (i = image->height; i--; ) { n = image->width; dst = dst1; shift = 0; val = 0; if (start_index) { u32 start_mask = ~(FB_SHIFT_HIGH(p, ~(u32)0, start_index)); val = *dst & start_mask; shift = start_index; } while (n--) { if (p->fix.visual == FB_VISUAL_TRUECOLOR || p->fix.visual == FB_VISUAL_DIRECTCOLOR ) color = palette[*src]; else color = *src; color <<= FB_LEFT_POS(p, bpp); val |= FB_SHIFT_HIGH(p, color, shift); if (shift >= null_bits) { *dst++ = val; val = (shift == null_bits) ? 0 : FB_SHIFT_LOW(p, color, 32 - shift); } shift += bpp; shift &= (32 - 1); src++; } if (shift) { u32 end_mask = FB_SHIFT_HIGH(p, ~(u32)0, shift); *dst &= end_mask; *dst |= val; } dst1 += p->fix.line_length; if (pitch_index) { dst2 += p->fix.line_length; dst1 = (u8 *)((long)dst2 & ~(sizeof(u32) - 1)); start_index += pitch_index; start_index &= 32 - 1; } } } static void slow_imageblit(const struct fb_image *image, struct fb_info *p, void *dst1, u32 fgcolor, u32 bgcolor, u32 start_index, u32 pitch_index) { u32 shift, color = 0, bpp = p->var.bits_per_pixel; u32 *dst, *dst2; u32 val, pitch = p->fix.line_length; u32 null_bits = 32 - bpp; u32 spitch = (image->width+7)/8; const u8 *src = image->data, *s; u32 i, j, l; dst2 = dst1; fgcolor <<= FB_LEFT_POS(p, bpp); bgcolor <<= FB_LEFT_POS(p, bpp); for (i = image->height; i--; ) { shift = val = 0; l = 8; j = image->width; dst = dst1; s = src; /* write leading bits */ if (start_index) { u32 start_mask = ~(FB_SHIFT_HIGH(p, ~(u32)0, start_index)); val = *dst & start_mask; shift = start_index; } while (j--) { l--; color = (*s & (1 << l)) ? fgcolor : bgcolor; val |= FB_SHIFT_HIGH(p, color, shift); /* Did the bitshift spill bits to the next long? */ if (shift >= null_bits) { *dst++ = val; val = (shift == null_bits) ? 0 : FB_SHIFT_LOW(p, color, 32 - shift); } shift += bpp; shift &= (32 - 1); if (!l) { l = 8; s++; } } /* write trailing bits */ if (shift) { u32 end_mask = FB_SHIFT_HIGH(p, ~(u32)0, shift); *dst &= end_mask; *dst |= val; } dst1 += pitch; src += spitch; if (pitch_index) { dst2 += pitch; dst1 = (u8 *)((long)dst2 & ~(sizeof(u32) - 1)); start_index += pitch_index; start_index &= 32 - 1; } } } /* * fast_imageblit - optimized monochrome color expansion * * Only if: bits_per_pixel == 8, 16, or 32 * image->width is divisible by pixel/dword (ppw); * fix->line_legth is divisible by 4; * beginning and end of a scanline is dword aligned */ static void fast_imageblit(const struct fb_image *image, struct fb_info *p, void *dst1, u32 fgcolor, u32 bgcolor) { u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel; u32 ppw = 32/bpp, spitch = (image->width + 7)/8; u32 bit_mask, end_mask, eorx, shift; const char *s = image->data, *src; u32 *dst; const u32 *tab = NULL; int i, j, k; switch (bpp) { case 8: tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le; break; case 16: tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le; break; case 32: default: tab = cfb_tab32; break; } for (i = ppw-1; i--; ) { fgx <<= bpp; bgx <<= bpp; fgx |= fgcolor; bgx |= bgcolor; } bit_mask = (1 << ppw) - 1; eorx = fgx ^ bgx; k = image->width/ppw; for (i = image->height; i--; ) { dst = dst1; shift = 8; src = s; for (j = k; j--; ) { shift -= ppw; end_mask = tab[(*src >> shift) & bit_mask]; *dst++ = (end_mask & eorx) ^ bgx; if (!shift) { shift = 8; src++; } } dst1 += p->fix.line_length; s += spitch; } } void sys_imageblit(struct fb_info *p, const struct fb_image *image) { u32 fgcolor, bgcolor, start_index, bitstart, pitch_index = 0; u32 bpl = sizeof(u32), bpp = p->var.bits_per_pixel; u32 width = image->width; u32 dx = image->dx, dy = image->dy; void *dst1; if (p->state != FBINFO_STATE_RUNNING) return; bitstart = (dy * p->fix.line_length * 8) + (dx * bpp); start_index = bitstart & (32 - 1); pitch_index = (p->fix.line_length & (bpl - 1)) * 8; bitstart /= 8; bitstart &= ~(bpl - 1); dst1 = (void __force *)p->screen_base + bitstart; if (p->fbops->fb_sync) p->fbops->fb_sync(p); if (image->depth == 1) { if (p->fix.visual == FB_VISUAL_TRUECOLOR || p->fix.visual == FB_VISUAL_DIRECTCOLOR) { fgcolor = ((u32*)(p->pseudo_palette))[image->fg_color]; bgcolor = ((u32*)(p->pseudo_palette))[image->bg_color]; } else { fgcolor = image->fg_color; bgcolor = image->bg_color; } if (32 % bpp == 0 && !start_index && !pitch_index && ((width & (32/bpp-1)) == 0) && bpp >= 8 && bpp <= 32) fast_imageblit(image, p, dst1, fgcolor, bgcolor); else slow_imageblit(image, p, dst1, fgcolor, bgcolor, start_index, pitch_index); } else color_imageblit(image, p, dst1, start_index, pitch_index); } EXPORT_SYMBOL(sys_imageblit); MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>"); MODULE_DESCRIPTION("1-bit/8-bit to 1-32 bit color expansion (sys-to-sys)"); MODULE_LICENSE("GPL");
gpl-2.0
dtsinc/DTS-Sound-Integration_CAF-Android-kernel
arch/mips/ralink/prom.c
2969
1501
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org> * Copyright (C) 2010 Joonas Lahtinen <joonas.lahtinen@gmail.com> * Copyright (C) 2013 John Crispin <blogic@openwrt.org> */ #include <linux/string.h> #include <linux/of_fdt.h> #include <linux/of_platform.h> #include <asm/bootinfo.h> #include <asm/addrspace.h> #include "common.h" struct ralink_soc_info soc_info; const char *get_system_type(void) { return soc_info.sys_type; } static __init void prom_init_cmdline(int argc, char **argv) { int i; pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n", (unsigned int)fw_arg0, (unsigned int)fw_arg1, (unsigned int)fw_arg2, (unsigned int)fw_arg3); argc = fw_arg0; argv = (char **) KSEG1ADDR(fw_arg1); if (!argv) { pr_debug("argv=%p is invalid, skipping\n", argv); return; } for (i = 0; i < argc; i++) { char *p = (char *) KSEG1ADDR(argv[i]); if (CPHYSADDR(p) && *p) { pr_debug("argv[%d]: %s\n", i, p); strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline)); strlcat(arcs_cmdline, p, sizeof(arcs_cmdline)); } } } void __init prom_init(void) { int argc; char **argv; prom_soc_init(&soc_info); pr_info("SoC Type: %s\n", get_system_type()); prom_init_cmdline(argc, argv); } void __init prom_free_prom_memory(void) { }
gpl-2.0
imoseyon/leanKernel-galaxy-nexus
drivers/media/dvb/frontends/tda10021.c
4249
13265
/* TDA10021 - Single Chip Cable Channel Receiver driver module used on the Siemens DVB-C cards Copyright (C) 1999 Convergence Integrated Media GmbH <ralph@convergence.de> Copyright (C) 2004 Markus Schulz <msc@antzsystem.de> Support for TDA10021 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "tda1002x.h" struct tda10021_state { struct i2c_adapter* i2c; /* configuration settings */ const struct tda1002x_config* config; struct dvb_frontend frontend; u8 pwm; u8 reg0; }; #if 0 #define dprintk(x...) printk(x) #else #define dprintk(x...) #endif static int verbose; #define XIN 57840000UL #define FIN (XIN >> 4) static int tda10021_inittab_size = 0x40; static u8 tda10021_inittab[0x40]= { 0x73, 0x6a, 0x23, 0x0a, 0x02, 0x37, 0x77, 0x1a, 0x37, 0x6a, 0x17, 0x8a, 0x1e, 0x86, 0x43, 0x40, 0xb8, 0x3f, 0xa1, 0x00, 0xcd, 0x01, 0x00, 0xff, 0x11, 0x00, 0x7c, 0x31, 0x30, 0x20, 0x00, 0x00, 0x02, 0x00, 0x00, 0x7d, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x33, 0x11, 0x0d, 0x95, 0x08, 0x58, 0x00, 0x00, 0x80, 0x00, 0x80, 0xff, 0x00, 0x00, 0x04, 0x2d, 0x2f, 0xff, 0x00, 0x00, 0x00, 0x00, }; static int _tda10021_writereg (struct tda10021_state* state, u8 reg, u8 data) { u8 buf[] = { reg, data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; int ret; ret = i2c_transfer (state->i2c, &msg, 1); if (ret != 1) printk("DVB: TDA10021(%d): %s, writereg error " "(reg == 0x%02x, val == 0x%02x, ret == %i)\n", state->frontend.dvb->num, __func__, reg, data, ret); msleep(10); return (ret != 1) ? -EREMOTEIO : 0; } static u8 tda10021_readreg (struct tda10021_state* state, u8 reg) { u8 b0 [] = { reg }; u8 b1 [] = { 0 }; struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; int ret; ret = i2c_transfer (state->i2c, msg, 2); // Don't print an error message if the id is read. if (ret != 2 && reg != 0x1a) printk("DVB: TDA10021: %s: readreg error (ret == %i)\n", __func__, ret); return b1[0]; } //get access to tuner static int lock_tuner(struct tda10021_state* state) { u8 buf[2] = { 0x0f, tda10021_inittab[0x0f] | 0x80 }; struct i2c_msg msg = {.addr=state->config->demod_address, .flags=0, .buf=buf, .len=2}; if(i2c_transfer(state->i2c, &msg, 1) != 1) { printk("tda10021: lock tuner fails\n"); return -EREMOTEIO; } return 0; } //release access from tuner static int unlock_tuner(struct tda10021_state* state) { u8 buf[2] = { 0x0f, tda10021_inittab[0x0f] & 0x7f }; struct i2c_msg msg_post={.addr=state->config->demod_address, .flags=0, .buf=buf, .len=2}; if(i2c_transfer(state->i2c, &msg_post, 1) != 1) { printk("tda10021: unlock tuner fails\n"); return -EREMOTEIO; } return 0; } static int tda10021_setup_reg0 (struct tda10021_state* state, u8 reg0, fe_spectral_inversion_t inversion) { reg0 |= state->reg0 & 0x63; if ((INVERSION_ON == inversion) ^ (state->config->invert == 0)) reg0 &= ~0x20; else reg0 |= 0x20; _tda10021_writereg (state, 0x00, reg0 & 0xfe); _tda10021_writereg (state, 0x00, reg0 | 0x01); state->reg0 = reg0; return 0; } static int tda10021_set_symbolrate (struct tda10021_state* state, u32 symbolrate) { s32 BDR; s32 BDRI; s16 SFIL=0; u16 NDEC = 0; u32 tmp, ratio; if (symbolrate > XIN/2) symbolrate = XIN/2; if (symbolrate < 500000) symbolrate = 500000; if (symbolrate < XIN/16) NDEC = 1; if (symbolrate < XIN/32) NDEC = 2; if (symbolrate < XIN/64) NDEC = 3; if (symbolrate < (u32)(XIN/12.3)) SFIL = 1; if (symbolrate < (u32)(XIN/16)) SFIL = 0; if (symbolrate < (u32)(XIN/24.6)) SFIL = 1; if (symbolrate < (u32)(XIN/32)) SFIL = 0; if (symbolrate < (u32)(XIN/49.2)) SFIL = 1; if (symbolrate < (u32)(XIN/64)) SFIL = 0; if (symbolrate < (u32)(XIN/98.4)) SFIL = 1; symbolrate <<= NDEC; ratio = (symbolrate << 4) / FIN; tmp = ((symbolrate << 4) % FIN) << 8; ratio = (ratio << 8) + tmp / FIN; tmp = (tmp % FIN) << 8; ratio = (ratio << 8) + DIV_ROUND_CLOSEST(tmp, FIN); BDR = ratio; BDRI = (((XIN << 5) / symbolrate) + 1) / 2; if (BDRI > 0xFF) BDRI = 0xFF; SFIL = (SFIL << 4) | tda10021_inittab[0x0E]; NDEC = (NDEC << 6) | tda10021_inittab[0x03]; _tda10021_writereg (state, 0x03, NDEC); _tda10021_writereg (state, 0x0a, BDR&0xff); _tda10021_writereg (state, 0x0b, (BDR>> 8)&0xff); _tda10021_writereg (state, 0x0c, (BDR>>16)&0x3f); _tda10021_writereg (state, 0x0d, BDRI); _tda10021_writereg (state, 0x0e, SFIL); return 0; } static int tda10021_init (struct dvb_frontend *fe) { struct tda10021_state* state = fe->demodulator_priv; int i; dprintk("DVB: TDA10021(%d): init chip\n", fe->adapter->num); //_tda10021_writereg (fe, 0, 0); for (i=0; i<tda10021_inittab_size; i++) _tda10021_writereg (state, i, tda10021_inittab[i]); _tda10021_writereg (state, 0x34, state->pwm); //Comment by markus //0x2A[3-0] == PDIV -> P multiplaying factor (P=PDIV+1)(default 0) //0x2A[4] == BYPPLL -> Power down mode (default 1) //0x2A[5] == LCK -> PLL Lock Flag //0x2A[6] == POLAXIN -> Polarity of the input reference clock (default 0) //Activate PLL _tda10021_writereg(state, 0x2a, tda10021_inittab[0x2a] & 0xef); return 0; } static int tda10021_set_parameters (struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct tda10021_state* state = fe->demodulator_priv; //table for QAM4-QAM256 ready QAM4 QAM16 QAM32 QAM64 QAM128 QAM256 //CONF static const u8 reg0x00 [] = { 0x14, 0x00, 0x04, 0x08, 0x0c, 0x10 }; //AGCREF value static const u8 reg0x01 [] = { 0x78, 0x8c, 0x8c, 0x6a, 0x78, 0x5c }; //LTHR value static const u8 reg0x05 [] = { 0x78, 0x87, 0x64, 0x46, 0x36, 0x26 }; //MSETH static const u8 reg0x08 [] = { 0x8c, 0xa2, 0x74, 0x43, 0x34, 0x23 }; //AREF static const u8 reg0x09 [] = { 0x96, 0x91, 0x96, 0x6a, 0x7e, 0x6b }; int qam = p->u.qam.modulation; if (qam < 0 || qam > 5) return -EINVAL; if (p->inversion != INVERSION_ON && p->inversion != INVERSION_OFF) return -EINVAL; //printk("tda10021: set frequency to %d qam=%d symrate=%d\n", p->frequency,qam,p->u.qam.symbol_rate); if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe, p); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } tda10021_set_symbolrate (state, p->u.qam.symbol_rate); _tda10021_writereg (state, 0x34, state->pwm); _tda10021_writereg (state, 0x01, reg0x01[qam]); _tda10021_writereg (state, 0x05, reg0x05[qam]); _tda10021_writereg (state, 0x08, reg0x08[qam]); _tda10021_writereg (state, 0x09, reg0x09[qam]); tda10021_setup_reg0 (state, reg0x00[qam], p->inversion); return 0; } static int tda10021_read_status(struct dvb_frontend* fe, fe_status_t* status) { struct tda10021_state* state = fe->demodulator_priv; int sync; *status = 0; //0x11[0] == EQALGO -> Equalizer algorithms state //0x11[1] == CARLOCK -> Carrier locked //0x11[2] == FSYNC -> Frame synchronisation //0x11[3] == FEL -> Front End locked //0x11[6] == NODVB -> DVB Mode Information sync = tda10021_readreg (state, 0x11); if (sync & 2) *status |= FE_HAS_SIGNAL|FE_HAS_CARRIER; if (sync & 4) *status |= FE_HAS_SYNC|FE_HAS_VITERBI; if (sync & 8) *status |= FE_HAS_LOCK; return 0; } static int tda10021_read_ber(struct dvb_frontend* fe, u32* ber) { struct tda10021_state* state = fe->demodulator_priv; u32 _ber = tda10021_readreg(state, 0x14) | (tda10021_readreg(state, 0x15) << 8) | ((tda10021_readreg(state, 0x16) & 0x0f) << 16); _tda10021_writereg(state, 0x10, (tda10021_readreg(state, 0x10) & ~0xc0) | (tda10021_inittab[0x10] & 0xc0)); *ber = 10 * _ber; return 0; } static int tda10021_read_signal_strength(struct dvb_frontend* fe, u16* strength) { struct tda10021_state* state = fe->demodulator_priv; u8 config = tda10021_readreg(state, 0x02); u8 gain = tda10021_readreg(state, 0x17); if (config & 0x02) /* the agc value is inverted */ gain = ~gain; *strength = (gain << 8) | gain; return 0; } static int tda10021_read_snr(struct dvb_frontend* fe, u16* snr) { struct tda10021_state* state = fe->demodulator_priv; u8 quality = ~tda10021_readreg(state, 0x18); *snr = (quality << 8) | quality; return 0; } static int tda10021_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) { struct tda10021_state* state = fe->demodulator_priv; *ucblocks = tda10021_readreg (state, 0x13) & 0x7f; if (*ucblocks == 0x7f) *ucblocks = 0xffffffff; /* reset uncorrected block counter */ _tda10021_writereg (state, 0x10, tda10021_inittab[0x10] & 0xdf); _tda10021_writereg (state, 0x10, tda10021_inittab[0x10]); return 0; } static int tda10021_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p) { struct tda10021_state* state = fe->demodulator_priv; int sync; s8 afc = 0; sync = tda10021_readreg(state, 0x11); afc = tda10021_readreg(state, 0x19); if (verbose) { /* AFC only valid when carrier has been recovered */ printk(sync & 2 ? "DVB: TDA10021(%d): AFC (%d) %dHz\n" : "DVB: TDA10021(%d): [AFC (%d) %dHz]\n", state->frontend.dvb->num, afc, -((s32)p->u.qam.symbol_rate * afc) >> 10); } p->inversion = ((state->reg0 & 0x20) == 0x20) ^ (state->config->invert != 0) ? INVERSION_ON : INVERSION_OFF; p->u.qam.modulation = ((state->reg0 >> 2) & 7) + QAM_16; p->u.qam.fec_inner = FEC_NONE; p->frequency = ((p->frequency + 31250) / 62500) * 62500; if (sync & 2) p->frequency -= ((s32)p->u.qam.symbol_rate * afc) >> 10; return 0; } static int tda10021_i2c_gate_ctrl(struct dvb_frontend* fe, int enable) { struct tda10021_state* state = fe->demodulator_priv; if (enable) { lock_tuner(state); } else { unlock_tuner(state); } return 0; } static int tda10021_sleep(struct dvb_frontend* fe) { struct tda10021_state* state = fe->demodulator_priv; _tda10021_writereg (state, 0x1b, 0x02); /* pdown ADC */ _tda10021_writereg (state, 0x00, 0x80); /* standby */ return 0; } static void tda10021_release(struct dvb_frontend* fe) { struct tda10021_state* state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops tda10021_ops; struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config, struct i2c_adapter* i2c, u8 pwm) { struct tda10021_state* state = NULL; u8 id; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct tda10021_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->config = config; state->i2c = i2c; state->pwm = pwm; state->reg0 = tda10021_inittab[0]; /* check if the demod is there */ id = tda10021_readreg(state, 0x1a); if ((id & 0xf0) != 0x70) goto error; /* Don't claim TDA10023 */ if (id == 0x7d) goto error; printk("TDA10021: i2c-addr = 0x%02x, id = 0x%02x\n", state->config->demod_address, id); /* create dvb_frontend */ memcpy(&state->frontend.ops, &tda10021_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } static struct dvb_frontend_ops tda10021_ops = { .info = { .name = "Philips TDA10021 DVB-C", .type = FE_QAM, .frequency_stepsize = 62500, .frequency_min = 47000000, .frequency_max = 862000000, .symbol_rate_min = (XIN/2)/64, /* SACLK/64 == (XIN/2)/64 */ .symbol_rate_max = (XIN/2)/4, /* SACLK/4 */ #if 0 .frequency_tolerance = ???, .symbol_rate_tolerance = ???, /* ppm */ /* == 8% (spec p. 5) */ #endif .caps = 0x400 | //FE_CAN_QAM_4 FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 | FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_FEC_AUTO }, .release = tda10021_release, .init = tda10021_init, .sleep = tda10021_sleep, .i2c_gate_ctrl = tda10021_i2c_gate_ctrl, .set_frontend = tda10021_set_parameters, .get_frontend = tda10021_get_frontend, .read_status = tda10021_read_status, .read_ber = tda10021_read_ber, .read_signal_strength = tda10021_read_signal_strength, .read_snr = tda10021_read_snr, .read_ucblocks = tda10021_read_ucblocks, }; module_param(verbose, int, 0644); MODULE_PARM_DESC(verbose, "print AFC offset after tuning for debugging the PWM setting"); MODULE_DESCRIPTION("Philips TDA10021 DVB-C demodulator driver"); MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Markus Schulz"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(tda10021_attach);
gpl-2.0
smipi1/elce2015-tiny-linux
arch/arm/mach-omap2/sdrc.c
4761
4067
/* * SMS/SDRC (SDRAM controller) common code for OMAP2/3 * * Copyright (C) 2005, 2008 Texas Instruments Inc. * Copyright (C) 2005, 2008 Nokia Corporation * * Tony Lindgren <tony@atomide.com> * Paul Walmsley * Richard Woodruff <r-woodruff2@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include "common.h" #include "clock.h" #include "sdrc.h" static struct omap_sdrc_params *sdrc_init_params_cs0, *sdrc_init_params_cs1; void __iomem *omap2_sdrc_base; void __iomem *omap2_sms_base; struct omap2_sms_regs { u32 sms_sysconfig; }; static struct omap2_sms_regs sms_context; /* SDRC_POWER register bits */ #define SDRC_POWER_EXTCLKDIS_SHIFT 3 #define SDRC_POWER_PWDENA_SHIFT 2 #define SDRC_POWER_PAGEPOLICY_SHIFT 0 /** * omap2_sms_save_context - Save SMS registers * * Save SMS registers that need to be restored after off mode. */ void omap2_sms_save_context(void) { sms_context.sms_sysconfig = sms_read_reg(SMS_SYSCONFIG); } /** * omap2_sms_restore_context - Restore SMS registers * * Restore SMS registers that need to be Restored after off mode. */ void omap2_sms_restore_context(void) { sms_write_reg(sms_context.sms_sysconfig, SMS_SYSCONFIG); } /** * omap2_sdrc_get_params - return SDRC register values for a given clock rate * @r: SDRC clock rate (in Hz) * @sdrc_cs0: chip select 0 ram timings ** * @sdrc_cs1: chip select 1 ram timings ** * * Return pre-calculated values for the SDRC_ACTIM_CTRLA, * SDRC_ACTIM_CTRLB, SDRC_RFR_CTRL and SDRC_MR registers in sdrc_cs[01] * structs,for a given SDRC clock rate 'r'. * These parameters control various timing delays in the SDRAM controller * that are expressed in terms of the number of SDRC clock cycles to * wait; hence the clock rate dependency. * * Supports 2 different timing parameters for both chip selects. * * Note 1: the sdrc_init_params_cs[01] must be sorted rate descending. * Note 2: If sdrc_init_params_cs_1 is not NULL it must be of same size * as sdrc_init_params_cs_0. * * Fills in the struct omap_sdrc_params * for each chip select. * Returns 0 upon success or -1 upon failure. */ int omap2_sdrc_get_params(unsigned long r, struct omap_sdrc_params **sdrc_cs0, struct omap_sdrc_params **sdrc_cs1) { struct omap_sdrc_params *sp0, *sp1; if (!sdrc_init_params_cs0) return -1; sp0 = sdrc_init_params_cs0; sp1 = sdrc_init_params_cs1; while (sp0->rate && sp0->rate != r) { sp0++; if (sdrc_init_params_cs1) sp1++; } if (!sp0->rate) return -1; *sdrc_cs0 = sp0; *sdrc_cs1 = sp1; return 0; } void __init omap2_set_globals_sdrc(void __iomem *sdrc, void __iomem *sms) { omap2_sdrc_base = sdrc; omap2_sms_base = sms; } /** * omap2_sdrc_init - initialize SMS, SDRC devices on boot * @sdrc_cs[01]: pointers to a null-terminated list of struct omap_sdrc_params * Support for 2 chip selects timings * * Turn on smart idle modes for SDRAM scheduler and controller. * Program a known-good configuration for the SDRC to deal with buggy * bootloaders. */ void __init omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0, struct omap_sdrc_params *sdrc_cs1) { u32 l; l = sms_read_reg(SMS_SYSCONFIG); l &= ~(0x3 << 3); l |= (0x2 << 3); sms_write_reg(l, SMS_SYSCONFIG); l = sdrc_read_reg(SDRC_SYSCONFIG); l &= ~(0x3 << 3); l |= (0x2 << 3); sdrc_write_reg(l, SDRC_SYSCONFIG); sdrc_init_params_cs0 = sdrc_cs0; sdrc_init_params_cs1 = sdrc_cs1; /* XXX Enable SRFRONIDLEREQ here also? */ /* * PWDENA should not be set due to 34xx erratum 1.150 - PWDENA * can cause random memory corruption */ l = (1 << SDRC_POWER_EXTCLKDIS_SHIFT) | (1 << SDRC_POWER_PAGEPOLICY_SHIFT); sdrc_write_reg(l, SDRC_POWER); omap2_sms_save_context(); }
gpl-2.0
Lukas1212/htc7x30-3.0
arch/ia64/kvm/kvm_fw.c
11673
16389
/* * PAL/SAL call delegation * * Copyright (c) 2004 Li Susie <susie.li@intel.com> * Copyright (c) 2005 Yu Ke <ke.yu@intel.com> * Copyright (c) 2007 Xiantao Zhang <xiantao.zhang@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. */ #include <linux/kvm_host.h> #include <linux/smp.h> #include <asm/sn/addrs.h> #include <asm/sn/clksupport.h> #include <asm/sn/shub_mmr.h> #include "vti.h" #include "misc.h" #include <asm/pal.h> #include <asm/sal.h> #include <asm/tlb.h> /* * Handy macros to make sure that the PAL return values start out * as something meaningful. */ #define INIT_PAL_STATUS_UNIMPLEMENTED(x) \ { \ x.status = PAL_STATUS_UNIMPLEMENTED; \ x.v0 = 0; \ x.v1 = 0; \ x.v2 = 0; \ } #define INIT_PAL_STATUS_SUCCESS(x) \ { \ x.status = PAL_STATUS_SUCCESS; \ x.v0 = 0; \ x.v1 = 0; \ x.v2 = 0; \ } static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu, u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) { struct exit_ctl_data *p; if (vcpu) { p = &vcpu->arch.exit_data; if (p->exit_reason == EXIT_REASON_PAL_CALL) { *gr28 = p->u.pal_data.gr28; *gr29 = p->u.pal_data.gr29; *gr30 = p->u.pal_data.gr30; *gr31 = p->u.pal_data.gr31; return ; } } printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n"); } static void set_pal_result(struct kvm_vcpu *vcpu, struct ia64_pal_retval result) { struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); if (p->exit_reason == EXIT_REASON_PAL_CALL) { p->u.pal_data.ret = result; return ; } INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret); } static void set_sal_result(struct kvm_vcpu *vcpu, struct sal_ret_values result) { struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); if (p->exit_reason == EXIT_REASON_SAL_CALL) { p->u.sal_data.ret = result; return ; } printk(KERN_WARNING"Failed to set sal result!!\n"); } struct cache_flush_args { u64 cache_type; u64 operation; u64 progress; long status; }; cpumask_t cpu_cache_coherent_map; static void remote_pal_cache_flush(void *data) { struct cache_flush_args *args = data; long status; u64 progress = args->progress; status = ia64_pal_cache_flush(args->cache_type, args->operation, &progress, NULL); if (status != 0) args->status = status; } static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu) { u64 gr28, gr29, gr30, gr31; struct ia64_pal_retval result = {0, 0, 0, 0}; struct cache_flush_args args = {0, 0, 0, 0}; long psr; gr28 = gr29 = gr30 = gr31 = 0; kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31); if (gr31 != 0) printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu); /* Always call Host Pal in int=1 */ gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS; args.cache_type = gr29; args.operation = gr30; smp_call_function(remote_pal_cache_flush, (void *)&args, 1); if (args.status != 0) printk(KERN_ERR"pal_cache_flush error!," "status:0x%lx\n", args.status); /* * Call Host PAL cache flush * Clear psr.ic when call PAL_CACHE_FLUSH */ local_irq_save(psr); result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1, &result.v0); local_irq_restore(psr); if (result.status != 0) printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld" "in1:%lx,in2:%lx\n", vcpu, result.status, gr29, gr30); #if 0 if (gr29 == PAL_CACHE_TYPE_COHERENT) { cpus_setall(vcpu->arch.cache_coherent_map); cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map); cpus_setall(cpu_cache_coherent_map); cpu_clear(vcpu->cpu, cpu_cache_coherent_map); } #endif return result; } struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu) { struct ia64_pal_retval result; PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0); return result; } static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu) { struct ia64_pal_retval result; PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0); /* * PAL_FREQ_BASE may not be implemented in some platforms, * call SAL instead. */ if (result.v0 == 0) { result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, &result.v0, &result.v1); result.v2 = 0; } return result; } /* * On the SGI SN2, the ITC isn't stable. Emulation backed by the SN2 * RTC is used instead. This function patches the ratios from SAL * to match the RTC before providing them to the guest. */ static void sn2_patch_itc_freq_ratios(struct ia64_pal_retval *result) { struct pal_freq_ratio *ratio; unsigned long sal_freq, sal_drift, factor; result->status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, &sal_freq, &sal_drift); ratio = (struct pal_freq_ratio *)&result->v2; factor = ((sal_freq * 3) + (sn_rtc_cycles_per_second / 2)) / sn_rtc_cycles_per_second; ratio->num = 3; ratio->den = factor; } static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu) { struct ia64_pal_retval result; PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0); if (vcpu->kvm->arch.is_sn2) sn2_patch_itc_freq_ratios(&result); return result; } static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu) { struct ia64_pal_retval result; INIT_PAL_STATUS_UNIMPLEMENTED(result); return result; } static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu) { struct ia64_pal_retval result; INIT_PAL_STATUS_SUCCESS(result); return result; } static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu) { struct ia64_pal_retval result = {0, 0, 0, 0}; long in0, in1, in2, in3; kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); result.status = ia64_pal_proc_get_features(&result.v0, &result.v1, &result.v2, in2); return result; } static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu) { struct ia64_pal_retval result = {0, 0, 0, 0}; long in0, in1, in2, in3; kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); result.status = ia64_pal_register_info(in1, &result.v1, &result.v2); return result; } static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu) { pal_cache_config_info_t ci; long status; unsigned long in0, in1, in2, in3, r9, r10; kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); status = ia64_pal_cache_config_info(in1, in2, &ci); r9 = ci.pcci_info_1.pcci1_data; r10 = ci.pcci_info_2.pcci2_data; return ((struct ia64_pal_retval){status, r9, r10, 0}); } #define GUEST_IMPL_VA_MSB 59 #define GUEST_RID_BITS 18 static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu) { pal_vm_info_1_u_t vminfo1; pal_vm_info_2_u_t vminfo2; struct ia64_pal_retval result; PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0); if (!result.status) { vminfo1.pvi1_val = result.v0; vminfo1.pal_vm_info_1_s.max_itr_entry = 8; vminfo1.pal_vm_info_1_s.max_dtr_entry = 8; result.v0 = vminfo1.pvi1_val; vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB; vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS; result.v1 = vminfo2.pvi2_val; } return result; } static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu) { struct ia64_pal_retval result; unsigned long in0, in1, in2, in3; kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); result.status = ia64_pal_vm_info(in1, in2, (pal_tc_info_u_t *)&result.v1, &result.v2); return result; } static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu) { u64 index = 0; struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); if (p->exit_reason == EXIT_REASON_PAL_CALL) index = p->u.pal_data.gr28; return index; } static void prepare_for_halt(struct kvm_vcpu *vcpu) { vcpu->arch.timer_pending = 1; vcpu->arch.timer_fired = 0; } static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu) { long status; unsigned long in0, in1, in2, in3, r9; unsigned long pm_buffer[16]; kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); status = ia64_pal_perf_mon_info(pm_buffer, (pal_perf_mon_info_u_t *) &r9); if (status != 0) { printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status); } else { if (in1) memcpy((void *)in1, pm_buffer, sizeof(pm_buffer)); else { status = PAL_STATUS_EINVAL; printk(KERN_WARNING"Invalid parameters " "for PAL call:0x%lx!\n", in0); } } return (struct ia64_pal_retval){status, r9, 0, 0}; } static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu) { unsigned long in0, in1, in2, in3; long status; unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32) | (1UL << 61) | (1UL << 60); kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); if (in1) { memcpy((void *)in1, &res, sizeof(res)); status = 0; } else{ status = PAL_STATUS_EINVAL; printk(KERN_WARNING"Invalid parameters " "for PAL call:0x%lx!\n", in0); } return (struct ia64_pal_retval){status, 0, 0, 0}; } static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu) { unsigned long r9; long status; status = ia64_pal_mem_attrib(&r9); return (struct ia64_pal_retval){status, r9, 0, 0}; } static void remote_pal_prefetch_visibility(void *v) { s64 trans_type = (s64)v; ia64_pal_prefetch_visibility(trans_type); } static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu) { struct ia64_pal_retval result = {0, 0, 0, 0}; unsigned long in0, in1, in2, in3; kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); result.status = ia64_pal_prefetch_visibility(in1); if (result.status == 0) { /* Must be performed on all remote processors in the coherence domain. */ smp_call_function(remote_pal_prefetch_visibility, (void *)in1, 1); /* Unnecessary on remote processor for other vcpus!*/ result.status = 1; } return result; } static void remote_pal_mc_drain(void *v) { ia64_pal_mc_drain(); } static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu) { struct ia64_pal_retval result = {0, 0, 0, 0}; unsigned long in0, in1, in2, in3; kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); if (in1 == 0 && in2) { char brand_info[128]; result.status = ia64_pal_get_brand_info(brand_info); if (result.status == PAL_STATUS_SUCCESS) memcpy((void *)in2, brand_info, 128); } else { result.status = PAL_STATUS_REQUIRES_MEMORY; printk(KERN_WARNING"Invalid parameters for " "PAL call:0x%lx!\n", in0); } return result; } int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) { u64 gr28; struct ia64_pal_retval result; int ret = 1; gr28 = kvm_get_pal_call_index(vcpu); switch (gr28) { case PAL_CACHE_FLUSH: result = pal_cache_flush(vcpu); break; case PAL_MEM_ATTRIB: result = pal_mem_attrib(vcpu); break; case PAL_CACHE_SUMMARY: result = pal_cache_summary(vcpu); break; case PAL_PERF_MON_INFO: result = pal_perf_mon_info(vcpu); break; case PAL_HALT_INFO: result = pal_halt_info(vcpu); break; case PAL_HALT_LIGHT: { INIT_PAL_STATUS_SUCCESS(result); prepare_for_halt(vcpu); if (kvm_highest_pending_irq(vcpu) == -1) ret = kvm_emulate_halt(vcpu); } break; case PAL_PREFETCH_VISIBILITY: result = pal_prefetch_visibility(vcpu); break; case PAL_MC_DRAIN: result.status = ia64_pal_mc_drain(); /* FIXME: All vcpus likely call PAL_MC_DRAIN. That causes the congestion. */ smp_call_function(remote_pal_mc_drain, NULL, 1); break; case PAL_FREQ_RATIOS: result = pal_freq_ratios(vcpu); break; case PAL_FREQ_BASE: result = pal_freq_base(vcpu); break; case PAL_LOGICAL_TO_PHYSICAL : result = pal_logical_to_physica(vcpu); break; case PAL_VM_SUMMARY : result = pal_vm_summary(vcpu); break; case PAL_VM_INFO : result = pal_vm_info(vcpu); break; case PAL_PLATFORM_ADDR : result = pal_platform_addr(vcpu); break; case PAL_CACHE_INFO: result = pal_cache_info(vcpu); break; case PAL_PTCE_INFO: INIT_PAL_STATUS_SUCCESS(result); result.v1 = (1L << 32) | 1L; break; case PAL_REGISTER_INFO: result = pal_register_info(vcpu); break; case PAL_VM_PAGE_SIZE: result.status = ia64_pal_vm_page_size(&result.v0, &result.v1); break; case PAL_RSE_INFO: result.status = ia64_pal_rse_info(&result.v0, (pal_hints_u_t *)&result.v1); break; case PAL_PROC_GET_FEATURES: result = pal_proc_get_features(vcpu); break; case PAL_DEBUG_INFO: result.status = ia64_pal_debug_info(&result.v0, &result.v1); break; case PAL_VERSION: result.status = ia64_pal_version( (pal_version_u_t *)&result.v0, (pal_version_u_t *)&result.v1); break; case PAL_FIXED_ADDR: result.status = PAL_STATUS_SUCCESS; result.v0 = vcpu->vcpu_id; break; case PAL_BRAND_INFO: result = pal_get_brand_info(vcpu); break; case PAL_GET_PSTATE: case PAL_CACHE_SHARED_INFO: INIT_PAL_STATUS_UNIMPLEMENTED(result); break; default: INIT_PAL_STATUS_UNIMPLEMENTED(result); printk(KERN_WARNING"kvm: Unsupported pal call," " index:0x%lx\n", gr28); } set_pal_result(vcpu, result); return ret; } static struct sal_ret_values sal_emulator(struct kvm *kvm, long index, unsigned long in1, unsigned long in2, unsigned long in3, unsigned long in4, unsigned long in5, unsigned long in6, unsigned long in7) { unsigned long r9 = 0; unsigned long r10 = 0; long r11 = 0; long status; status = 0; switch (index) { case SAL_FREQ_BASE: status = ia64_sal_freq_base(in1, &r9, &r10); break; case SAL_PCI_CONFIG_READ: printk(KERN_WARNING"kvm: Not allowed to call here!" " SAL_PCI_CONFIG_READ\n"); break; case SAL_PCI_CONFIG_WRITE: printk(KERN_WARNING"kvm: Not allowed to call here!" " SAL_PCI_CONFIG_WRITE\n"); break; case SAL_SET_VECTORS: if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) { if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) { status = -2; } else { kvm->arch.rdv_sal_data.boot_ip = in2; kvm->arch.rdv_sal_data.boot_gp = in3; } printk("Rendvous called! iip:%lx\n\n", in2); } else printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu." "ignored...\n", in1); break; case SAL_GET_STATE_INFO: /* No more info. */ status = -5; r9 = 0; break; case SAL_GET_STATE_INFO_SIZE: /* Return a dummy size. */ status = 0; r9 = 128; break; case SAL_CLEAR_STATE_INFO: /* Noop. */ break; case SAL_MC_RENDEZ: printk(KERN_WARNING "kvm: called SAL_MC_RENDEZ. ignored...\n"); break; case SAL_MC_SET_PARAMS: printk(KERN_WARNING "kvm: called SAL_MC_SET_PARAMS.ignored!\n"); break; case SAL_CACHE_FLUSH: if (1) { /*Flush using SAL. This method is faster but has a side effect on other vcpu running on this cpu. */ status = ia64_sal_cache_flush(in1); } else { /*Maybe need to implement the method without side effect!*/ status = 0; } break; case SAL_CACHE_INIT: printk(KERN_WARNING "kvm: called SAL_CACHE_INIT. ignored...\n"); break; case SAL_UPDATE_PAL: printk(KERN_WARNING "kvm: CALLED SAL_UPDATE_PAL. ignored...\n"); break; default: printk(KERN_WARNING"kvm: called SAL_CALL with unknown index." " index:%ld\n", index); status = -1; break; } return ((struct sal_ret_values) {status, r9, r10, r11}); } static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1, u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){ struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); if (p->exit_reason == EXIT_REASON_SAL_CALL) { *in0 = p->u.sal_data.in0; *in1 = p->u.sal_data.in1; *in2 = p->u.sal_data.in2; *in3 = p->u.sal_data.in3; *in4 = p->u.sal_data.in4; *in5 = p->u.sal_data.in5; *in6 = p->u.sal_data.in6; *in7 = p->u.sal_data.in7; return ; } *in0 = 0; } void kvm_sal_emul(struct kvm_vcpu *vcpu) { struct sal_ret_values result; u64 index, in1, in2, in3, in4, in5, in6, in7; kvm_get_sal_call_data(vcpu, &index, &in1, &in2, &in3, &in4, &in5, &in6, &in7); result = sal_emulator(vcpu->kvm, index, in1, in2, in3, in4, in5, in6, in7); set_sal_result(vcpu, result); }
gpl-2.0
dexter93/kernel_samsung_smdk4412
drivers/mtd/maps/pmcmsp-flash.c
12185
6243
/* * Mapping of a custom board with both AMD CFI and JEDEC flash in partitions. * Config with both CFI and JEDEC device support. * * Basically physmap.c with the addition of partitions and * an array of mapping info to accommodate more than one flash type per board. * * Copyright 2005-2007 PMC-Sierra, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <asm/io.h> #include <msp_prom.h> #include <msp_regs.h> static struct mtd_info **msp_flash; static struct mtd_partition **msp_parts; static struct map_info *msp_maps; static int fcnt; #define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n", __func__, __LINE__) static int __init init_msp_flash(void) { int i, j, ret = -ENOMEM; int offset, coff; char *env; int pcnt; char flash_name[] = "flash0"; char part_name[] = "flash0_0"; unsigned addr, size; /* If ELB is disabled by "ful-mux" mode, we can't get at flash */ if ((*DEV_ID_REG & DEV_ID_SINGLE_PC) && (*ELB_1PC_EN_REG & SINGLE_PCCARD)) { printk(KERN_NOTICE "Single PC Card mode: no flash access\n"); return -ENXIO; } /* examine the prom environment for flash devices */ for (fcnt = 0; (env = prom_getenv(flash_name)); fcnt++) flash_name[5] = '0' + fcnt + 1; if (fcnt < 1) return -ENXIO; printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt); msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL); if (!msp_flash) return -ENOMEM; msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL); if (!msp_parts) goto free_msp_flash; msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL); if (!msp_maps) goto free_msp_parts; /* loop over the flash devices, initializing each */ for (i = 0; i < fcnt; i++) { /* examine the prom environment for flash partititions */ part_name[5] = '0' + i; part_name[7] = '0'; for (pcnt = 0; (env = prom_getenv(part_name)); pcnt++) part_name[7] = '0' + pcnt + 1; if (pcnt == 0) { printk(KERN_NOTICE "Skipping flash device %d " "(no partitions defined)\n", i); continue; } msp_parts[i] = kcalloc(pcnt, sizeof(struct mtd_partition), GFP_KERNEL); if (!msp_parts[i]) goto cleanup_loop; /* now initialize the devices proper */ flash_name[5] = '0' + i; env = prom_getenv(flash_name); if (sscanf(env, "%x:%x", &addr, &size) < 2) { ret = -ENXIO; kfree(msp_parts[i]); goto cleanup_loop; } addr = CPHYSADDR(addr); printk(KERN_NOTICE "MSP flash device \"%s\": 0x%08x at 0x%08x\n", flash_name, size, addr); /* This must matchs the actual size of the flash chip */ msp_maps[i].size = size; msp_maps[i].phys = addr; /* * Platforms have a specific limit of the size of memory * which may be mapped for flash: */ if (size > CONFIG_MSP_FLASH_MAP_LIMIT) size = CONFIG_MSP_FLASH_MAP_LIMIT; msp_maps[i].virt = ioremap(addr, size); if (msp_maps[i].virt == NULL) { ret = -ENXIO; kfree(msp_parts[i]); goto cleanup_loop; } msp_maps[i].bankwidth = 1; msp_maps[i].name = kmalloc(7, GFP_KERNEL); if (!msp_maps[i].name) { iounmap(msp_maps[i].virt); kfree(msp_parts[i]); goto cleanup_loop; } msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7); for (j = 0; j < pcnt; j++) { part_name[5] = '0' + i; part_name[7] = '0' + j; env = prom_getenv(part_name); if (sscanf(env, "%x:%x:%n", &offset, &size, &coff) < 2) { ret = -ENXIO; kfree(msp_maps[i].name); iounmap(msp_maps[i].virt); kfree(msp_parts[i]); goto cleanup_loop; } msp_parts[i][j].size = size; msp_parts[i][j].offset = offset; msp_parts[i][j].name = env + coff; } /* now probe and add the device */ simple_map_init(&msp_maps[i]); msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]); if (msp_flash[i]) { msp_flash[i]->owner = THIS_MODULE; mtd_device_register(msp_flash[i], msp_parts[i], pcnt); } else { printk(KERN_ERR "map probe failed for flash\n"); ret = -ENXIO; kfree(msp_maps[i].name); iounmap(msp_maps[i].virt); kfree(msp_parts[i]); goto cleanup_loop; } } return 0; cleanup_loop: while (i--) { mtd_device_unregister(msp_flash[i]); map_destroy(msp_flash[i]); kfree(msp_maps[i].name); iounmap(msp_maps[i].virt); kfree(msp_parts[i]); } kfree(msp_maps); free_msp_parts: kfree(msp_parts); free_msp_flash: kfree(msp_flash); return ret; } static void __exit cleanup_msp_flash(void) { int i; for (i = 0; i < fcnt; i++) { mtd_device_unregister(msp_flash[i]); map_destroy(msp_flash[i]); iounmap((void *)msp_maps[i].virt); /* free the memory */ kfree(msp_maps[i].name); kfree(msp_parts[i]); } kfree(msp_flash); kfree(msp_parts); kfree(msp_maps); } MODULE_AUTHOR("PMC-Sierra, Inc"); MODULE_DESCRIPTION("MTD map driver for PMC-Sierra MSP boards"); MODULE_LICENSE("GPL"); module_init(init_msp_flash); module_exit(cleanup_msp_flash);
gpl-2.0
hgl888/linux
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
154
1635
/* * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "priv.h" #include "gf100.h" /* GK20A's FB is similar to GF100's, but without the ability to allocate VRAM */ static const struct nvkm_fb_func gk20a_fb = { .dtor = gf100_fb_dtor, .oneinit = gf100_fb_oneinit, .init = gf100_fb_init, .init_page = gf100_fb_init_page, .intr = gf100_fb_intr, .memtype_valid = gf100_fb_memtype_valid, }; int gk20a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) { return gf100_fb_new_(&gk20a_fb, device, index, pfb); }
gpl-2.0
ugers/linux-sunxi
fs/nfs/dns_resolve.c
922
8011
/* * linux/fs/nfs/dns_resolve.c * * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com> * * Resolves DNS hostnames into valid ip addresses */ #ifdef CONFIG_NFS_USE_KERNEL_DNS #include <linux/sunrpc/clnt.h> #include <linux/dns_resolver.h> ssize_t nfs_dns_resolve_name(char *name, size_t namelen, struct sockaddr *sa, size_t salen) { ssize_t ret; char *ip_addr = NULL; int ip_len; ip_len = dns_query(NULL, name, namelen, NULL, &ip_addr, NULL); if (ip_len > 0) ret = rpc_pton(ip_addr, ip_len, sa, salen); else ret = -ESRCH; kfree(ip_addr); return ret; } #else #include <linux/hash.h> #include <linux/string.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/socket.h> #include <linux/seq_file.h> #include <linux/inet.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/cache.h> #include <linux/sunrpc/svcauth.h> #include "dns_resolve.h" #include "cache_lib.h" #define NFS_DNS_HASHBITS 4 #define NFS_DNS_HASHTBL_SIZE (1 << NFS_DNS_HASHBITS) static struct cache_head *nfs_dns_table[NFS_DNS_HASHTBL_SIZE]; struct nfs_dns_ent { struct cache_head h; char *hostname; size_t namelen; struct sockaddr_storage addr; size_t addrlen; }; static void nfs_dns_ent_update(struct cache_head *cnew, struct cache_head *ckey) { struct nfs_dns_ent *new; struct nfs_dns_ent *key; new = container_of(cnew, struct nfs_dns_ent, h); key = container_of(ckey, struct nfs_dns_ent, h); memcpy(&new->addr, &key->addr, key->addrlen); new->addrlen = key->addrlen; } static void nfs_dns_ent_init(struct cache_head *cnew, struct cache_head *ckey) { struct nfs_dns_ent *new; struct nfs_dns_ent *key; new = container_of(cnew, struct nfs_dns_ent, h); key = container_of(ckey, struct nfs_dns_ent, h); kfree(new->hostname); new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL); if (new->hostname) { new->namelen = key->namelen; nfs_dns_ent_update(cnew, ckey); } else { new->namelen = 0; new->addrlen = 0; } } static void nfs_dns_ent_put(struct kref *ref) { struct nfs_dns_ent *item; item = container_of(ref, struct nfs_dns_ent, h.ref); kfree(item->hostname); kfree(item); } static struct cache_head *nfs_dns_ent_alloc(void) { struct nfs_dns_ent *item = kmalloc(sizeof(*item), GFP_KERNEL); if (item != NULL) { item->hostname = NULL; item->namelen = 0; item->addrlen = 0; return &item->h; } return NULL; }; static unsigned int nfs_dns_hash(const struct nfs_dns_ent *key) { return hash_str(key->hostname, NFS_DNS_HASHBITS); } static void nfs_dns_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, int *blen) { struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h); qword_add(bpp, blen, key->hostname); (*bpp)[-1] = '\n'; } static int nfs_dns_upcall(struct cache_detail *cd, struct cache_head *ch) { struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h); int ret; ret = nfs_cache_upcall(cd, key->hostname); if (ret) ret = sunrpc_cache_pipe_upcall(cd, ch, nfs_dns_request); return ret; } static int nfs_dns_match(struct cache_head *ca, struct cache_head *cb) { struct nfs_dns_ent *a; struct nfs_dns_ent *b; a = container_of(ca, struct nfs_dns_ent, h); b = container_of(cb, struct nfs_dns_ent, h); if (a->namelen == 0 || a->namelen != b->namelen) return 0; return memcmp(a->hostname, b->hostname, a->namelen) == 0; } static int nfs_dns_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct nfs_dns_ent *item; long ttl; if (h == NULL) { seq_puts(m, "# ip address hostname ttl\n"); return 0; } item = container_of(h, struct nfs_dns_ent, h); ttl = item->h.expiry_time - seconds_since_boot(); if (ttl < 0) ttl = 0; if (!test_bit(CACHE_NEGATIVE, &h->flags)) { char buf[INET6_ADDRSTRLEN+IPV6_SCOPE_ID_LEN+1]; rpc_ntop((struct sockaddr *)&item->addr, buf, sizeof(buf)); seq_printf(m, "%15s ", buf); } else seq_puts(m, "<none> "); seq_printf(m, "%15s %ld\n", item->hostname, ttl); return 0; } static struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd, struct nfs_dns_ent *key) { struct cache_head *ch; ch = sunrpc_cache_lookup(cd, &key->h, nfs_dns_hash(key)); if (!ch) return NULL; return container_of(ch, struct nfs_dns_ent, h); } static struct nfs_dns_ent *nfs_dns_update(struct cache_detail *cd, struct nfs_dns_ent *new, struct nfs_dns_ent *key) { struct cache_head *ch; ch = sunrpc_cache_update(cd, &new->h, &key->h, nfs_dns_hash(key)); if (!ch) return NULL; return container_of(ch, struct nfs_dns_ent, h); } static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen) { char buf1[NFS_DNS_HOSTNAME_MAXLEN+1]; struct nfs_dns_ent key, *item; unsigned int ttl; ssize_t len; int ret = -EINVAL; if (buf[buflen-1] != '\n') goto out; buf[buflen-1] = '\0'; len = qword_get(&buf, buf1, sizeof(buf1)); if (len <= 0) goto out; key.addrlen = rpc_pton(buf1, len, (struct sockaddr *)&key.addr, sizeof(key.addr)); len = qword_get(&buf, buf1, sizeof(buf1)); if (len <= 0) goto out; key.hostname = buf1; key.namelen = len; memset(&key.h, 0, sizeof(key.h)); if (get_uint(&buf, &ttl) < 0) goto out; if (ttl == 0) goto out; key.h.expiry_time = ttl + seconds_since_boot(); ret = -ENOMEM; item = nfs_dns_lookup(cd, &key); if (item == NULL) goto out; if (key.addrlen == 0) set_bit(CACHE_NEGATIVE, &key.h.flags); item = nfs_dns_update(cd, &key, item); if (item == NULL) goto out; ret = 0; cache_put(&item->h, cd); out: return ret; } static struct cache_detail nfs_dns_resolve = { .owner = THIS_MODULE, .hash_size = NFS_DNS_HASHTBL_SIZE, .hash_table = nfs_dns_table, .name = "dns_resolve", .cache_put = nfs_dns_ent_put, .cache_upcall = nfs_dns_upcall, .cache_parse = nfs_dns_parse, .cache_show = nfs_dns_show, .match = nfs_dns_match, .init = nfs_dns_ent_init, .update = nfs_dns_ent_update, .alloc = nfs_dns_ent_alloc, }; static int do_cache_lookup(struct cache_detail *cd, struct nfs_dns_ent *key, struct nfs_dns_ent **item, struct nfs_cache_defer_req *dreq) { int ret = -ENOMEM; *item = nfs_dns_lookup(cd, key); if (*item) { ret = cache_check(cd, &(*item)->h, &dreq->req); if (ret) *item = NULL; } return ret; } static int do_cache_lookup_nowait(struct cache_detail *cd, struct nfs_dns_ent *key, struct nfs_dns_ent **item) { int ret = -ENOMEM; *item = nfs_dns_lookup(cd, key); if (!*item) goto out_err; ret = -ETIMEDOUT; if (!test_bit(CACHE_VALID, &(*item)->h.flags) || (*item)->h.expiry_time < seconds_since_boot() || cd->flush_time > (*item)->h.last_refresh) goto out_put; ret = -ENOENT; if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags)) goto out_put; return 0; out_put: cache_put(&(*item)->h, cd); out_err: *item = NULL; return ret; } static int do_cache_lookup_wait(struct cache_detail *cd, struct nfs_dns_ent *key, struct nfs_dns_ent **item) { struct nfs_cache_defer_req *dreq; int ret = -ENOMEM; dreq = nfs_cache_defer_req_alloc(); if (!dreq) goto out; ret = do_cache_lookup(cd, key, item, dreq); if (ret == -EAGAIN) { ret = nfs_cache_wait_for_upcall(dreq); if (!ret) ret = do_cache_lookup_nowait(cd, key, item); } nfs_cache_defer_req_put(dreq); out: return ret; } ssize_t nfs_dns_resolve_name(char *name, size_t namelen, struct sockaddr *sa, size_t salen) { struct nfs_dns_ent key = { .hostname = name, .namelen = namelen, }; struct nfs_dns_ent *item = NULL; ssize_t ret; ret = do_cache_lookup_wait(&nfs_dns_resolve, &key, &item); if (ret == 0) { if (salen >= item->addrlen) { memcpy(sa, &item->addr, item->addrlen); ret = item->addrlen; } else ret = -EOVERFLOW; cache_put(&item->h, &nfs_dns_resolve); } else if (ret == -ENOENT) ret = -ESRCH; return ret; } int nfs_dns_resolver_init(void) { return nfs_cache_register(&nfs_dns_resolve); } void nfs_dns_resolver_destroy(void) { nfs_cache_unregister(&nfs_dns_resolve); } #endif
gpl-2.0
yytang2012/linux-kvm-arm
drivers/dma/intel_mid_dma.c
922
40616
/* * intel_mid_dma.c - Intel Langwell DMA Drivers * * Copyright (C) 2008-10 Intel Corp * Author: Vinod Koul <vinod.koul@intel.com> * The driver design is based on dw_dmac driver * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * */ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> #include <linux/intel_mid_dma.h> #include <linux/module.h> #include "dmaengine.h" #define MAX_CHAN 4 /*max ch across controllers*/ #include "intel_mid_dma_regs.h" #define INTEL_MID_DMAC1_ID 0x0814 #define INTEL_MID_DMAC2_ID 0x0813 #define INTEL_MID_GP_DMAC2_ID 0x0827 #define INTEL_MFLD_DMAC1_ID 0x0830 #define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008 #define LNW_PERIPHRAL_MASK_SIZE 0x10 #define LNW_PERIPHRAL_STATUS 0x0 #define LNW_PERIPHRAL_MASK 0x8 struct intel_mid_dma_probe_info { u8 max_chan; u8 ch_base; u16 block_size; u32 pimr_mask; }; #define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \ ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \ .max_chan = (_max_chan), \ .ch_base = (_ch_base), \ .block_size = (_block_size), \ .pimr_mask = (_pimr_mask), \ }) /***************************************************************************** Utility Functions*/ /** * get_ch_index - convert status to channel * @status: status mask * @base: dma ch base value * * Modify the status mask and return the channel index needing * attention (or -1 if neither) */ static int get_ch_index(int *status, unsigned int base) { int i; for (i = 0; i < MAX_CHAN; i++) { if (*status & (1 << (i + base))) { *status = *status & ~(1 << (i + base)); pr_debug("MDMA: index %d New status %x\n", i, *status); return i; } } return -1; } /** * get_block_ts - calculates dma transaction length * @len: dma transfer length * @tx_width: dma transfer src width * @block_size: dma controller max block size * * Based on src width calculate the DMA trsaction length in data items * return data items or FFFF if exceeds max length for block */ static int get_block_ts(int len, int tx_width, int block_size) { int byte_width = 0, block_ts = 0; switch (tx_width) { case DMA_SLAVE_BUSWIDTH_1_BYTE: byte_width = 1; break; case DMA_SLAVE_BUSWIDTH_2_BYTES: byte_width = 2; break; case DMA_SLAVE_BUSWIDTH_4_BYTES: default: byte_width = 4; break; } block_ts = len/byte_width; if (block_ts > block_size) block_ts = 0xFFFF; return block_ts; } /***************************************************************************** DMAC1 interrupt Functions*/ /** * dmac1_mask_periphral_intr - mask the periphral interrupt * @mid: dma device for which masking is required * * Masks the DMA periphral interrupt * this is valid for DMAC1 family controllers only * This controller should have periphral mask registers already mapped */ static void dmac1_mask_periphral_intr(struct middma_device *mid) { u32 pimr; if (mid->pimr_mask) { pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); pimr |= mid->pimr_mask; writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); } return; } /** * dmac1_unmask_periphral_intr - unmask the periphral interrupt * @midc: dma channel for which masking is required * * UnMasks the DMA periphral interrupt, * this is valid for DMAC1 family controllers only * This controller should have periphral mask registers already mapped */ static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc) { u32 pimr; struct middma_device *mid = to_middma_device(midc->chan.device); if (mid->pimr_mask) { pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); pimr &= ~mid->pimr_mask; writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); } return; } /** * enable_dma_interrupt - enable the periphral interrupt * @midc: dma channel for which enable interrupt is required * * Enable the DMA periphral interrupt, * this is valid for DMAC1 family controllers only * This controller should have periphral mask registers already mapped */ static void enable_dma_interrupt(struct intel_mid_dma_chan *midc) { dmac1_unmask_periphral_intr(midc); /*en ch interrupts*/ iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); return; } /** * disable_dma_interrupt - disable the periphral interrupt * @midc: dma channel for which disable interrupt is required * * Disable the DMA periphral interrupt, * this is valid for DMAC1 family controllers only * This controller should have periphral mask registers already mapped */ static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) { /*Check LPE PISR, make sure fwd is disabled*/ iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); return; } /***************************************************************************** DMA channel helper Functions*/ /** * mid_desc_get - get a descriptor * @midc: dma channel for which descriptor is required * * Obtain a descriptor for the channel. Returns NULL if none are free. * Once the descriptor is returned it is private until put on another * list or freed */ static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc) { struct intel_mid_dma_desc *desc, *_desc; struct intel_mid_dma_desc *ret = NULL; spin_lock_bh(&midc->lock); list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { if (async_tx_test_ack(&desc->txd)) { list_del(&desc->desc_node); ret = desc; break; } } spin_unlock_bh(&midc->lock); return ret; } /** * mid_desc_put - put a descriptor * @midc: dma channel for which descriptor is required * @desc: descriptor to put * * Return a descriptor from lwn_desc_get back to the free pool */ static void midc_desc_put(struct intel_mid_dma_chan *midc, struct intel_mid_dma_desc *desc) { if (desc) { spin_lock_bh(&midc->lock); list_add_tail(&desc->desc_node, &midc->free_list); spin_unlock_bh(&midc->lock); } } /** * midc_dostart - begin a DMA transaction * @midc: channel for which txn is to be started * @first: first descriptor of series * * Load a transaction into the engine. This must be called with midc->lock * held and bh disabled. */ static void midc_dostart(struct intel_mid_dma_chan *midc, struct intel_mid_dma_desc *first) { struct middma_device *mid = to_middma_device(midc->chan.device); /* channel is idle */ if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { /*error*/ pr_err("ERR_MDMA: channel is busy in start\n"); /* The tasklet will hopefully advance the queue... */ return; } midc->busy = true; /*write registers and en*/ iowrite32(first->sar, midc->ch_regs + SAR); iowrite32(first->dar, midc->ch_regs + DAR); iowrite32(first->lli_phys, midc->ch_regs + LLP); iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH); pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", (int)first->sar, (int)first->dar, first->cfg_hi, first->cfg_lo, first->ctl_hi, first->ctl_lo); first->status = DMA_IN_PROGRESS; iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); } /** * midc_descriptor_complete - process completed descriptor * @midc: channel owning the descriptor * @desc: the descriptor itself * * Process a completed descriptor and perform any callbacks upon * the completion. The completion handling drops the lock during the * callbacks but must be called with the lock held. */ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, struct intel_mid_dma_desc *desc) __releases(&midc->lock) __acquires(&midc->lock) { struct dma_async_tx_descriptor *txd = &desc->txd; dma_async_tx_callback callback_txd = NULL; struct intel_mid_dma_lli *llitem; void *param_txd = NULL; dma_cookie_complete(txd); callback_txd = txd->callback; param_txd = txd->callback_param; if (desc->lli != NULL) { /*clear the DONE bit of completed LLI in memory*/ llitem = desc->lli + desc->current_lli; llitem->ctl_hi &= CLEAR_DONE; if (desc->current_lli < desc->lli_length-1) (desc->current_lli)++; else desc->current_lli = 0; } spin_unlock_bh(&midc->lock); if (callback_txd) { pr_debug("MDMA: TXD callback set ... calling\n"); callback_txd(param_txd); } if (midc->raw_tfr) { desc->status = DMA_COMPLETE; if (desc->lli != NULL) { pci_pool_free(desc->lli_pool, desc->lli, desc->lli_phys); pci_pool_destroy(desc->lli_pool); desc->lli = NULL; } list_move(&desc->desc_node, &midc->free_list); midc->busy = false; } spin_lock_bh(&midc->lock); } /** * midc_scan_descriptors - check the descriptors in channel * mark completed when tx is completete * @mid: device * @midc: channel to scan * * Walk the descriptor chain for the device and process any entries * that are complete. */ static void midc_scan_descriptors(struct middma_device *mid, struct intel_mid_dma_chan *midc) { struct intel_mid_dma_desc *desc = NULL, *_desc = NULL; /*tx is complete*/ list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { if (desc->status == DMA_IN_PROGRESS) midc_descriptor_complete(midc, desc); } return; } /** * midc_lli_fill_sg - Helper function to convert * SG list to Linked List Items. *@midc: Channel *@desc: DMA descriptor *@sglist: Pointer to SG list *@sglen: SG list length *@flags: DMA transaction flags * * Walk through the SG list and convert the SG list into Linked * List Items (LLI). */ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, struct intel_mid_dma_desc *desc, struct scatterlist *sglist, unsigned int sglen, unsigned int flags) { struct intel_mid_dma_slave *mids; struct scatterlist *sg; dma_addr_t lli_next, sg_phy_addr; struct intel_mid_dma_lli *lli_bloc_desc; union intel_mid_dma_ctl_lo ctl_lo; union intel_mid_dma_ctl_hi ctl_hi; int i; pr_debug("MDMA: Entered midc_lli_fill_sg\n"); mids = midc->mid_slave; lli_bloc_desc = desc->lli; lli_next = desc->lli_phys; ctl_lo.ctl_lo = desc->ctl_lo; ctl_hi.ctl_hi = desc->ctl_hi; for_each_sg(sglist, sg, sglen, i) { /*Populate CTL_LOW and LLI values*/ if (i != sglen - 1) { lli_next = lli_next + sizeof(struct intel_mid_dma_lli); } else { /*Check for circular list, otherwise terminate LLI to ZERO*/ if (flags & DMA_PREP_CIRCULAR_LIST) { pr_debug("MDMA: LLI is configured in circular mode\n"); lli_next = desc->lli_phys; } else { lli_next = 0; ctl_lo.ctlx.llp_dst_en = 0; ctl_lo.ctlx.llp_src_en = 0; } } /*Populate CTL_HI values*/ ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg), desc->width, midc->dma->block_size); /*Populate SAR and DAR values*/ sg_phy_addr = sg_dma_address(sg); if (desc->dirn == DMA_MEM_TO_DEV) { lli_bloc_desc->sar = sg_phy_addr; lli_bloc_desc->dar = mids->dma_slave.dst_addr; } else if (desc->dirn == DMA_DEV_TO_MEM) { lli_bloc_desc->sar = mids->dma_slave.src_addr; lli_bloc_desc->dar = sg_phy_addr; } /*Copy values into block descriptor in system memroy*/ lli_bloc_desc->llp = lli_next; lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; lli_bloc_desc++; } /*Copy very first LLI values to descriptor*/ desc->ctl_lo = desc->lli->ctl_lo; desc->ctl_hi = desc->lli->ctl_hi; desc->sar = desc->lli->sar; desc->dar = desc->lli->dar; return 0; } /***************************************************************************** DMA engine callback Functions*/ /** * intel_mid_dma_tx_submit - callback to submit DMA transaction * @tx: dma engine descriptor * * Submit the DMA transaction for this descriptor, start if ch idle */ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) { struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx); struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan); dma_cookie_t cookie; spin_lock_bh(&midc->lock); cookie = dma_cookie_assign(tx); if (list_empty(&midc->active_list)) list_add_tail(&desc->desc_node, &midc->active_list); else list_add_tail(&desc->desc_node, &midc->queue); midc_dostart(midc, desc); spin_unlock_bh(&midc->lock); return cookie; } /** * intel_mid_dma_issue_pending - callback to issue pending txn * @chan: chan where pending trascation needs to be checked and submitted * * Call for scan to issue pending descriptors */ static void intel_mid_dma_issue_pending(struct dma_chan *chan) { struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); spin_lock_bh(&midc->lock); if (!list_empty(&midc->queue)) midc_scan_descriptors(to_middma_device(chan->device), midc); spin_unlock_bh(&midc->lock); } /** * intel_mid_dma_tx_status - Return status of txn * @chan: chan for where status needs to be checked * @cookie: cookie for txn * @txstate: DMA txn state * * Return status of DMA txn */ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); if (ret != DMA_COMPLETE) { spin_lock_bh(&midc->lock); midc_scan_descriptors(to_middma_device(chan->device), midc); spin_unlock_bh(&midc->lock); ret = dma_cookie_status(chan, cookie, txstate); } return ret; } static int dma_slave_control(struct dma_chan *chan, unsigned long arg) { struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); struct dma_slave_config *slave = (struct dma_slave_config *)arg; struct intel_mid_dma_slave *mid_slave; BUG_ON(!midc); BUG_ON(!slave); pr_debug("MDMA: slave control called\n"); mid_slave = to_intel_mid_dma_slave(slave); BUG_ON(!mid_slave); midc->mid_slave = mid_slave; return 0; } /** * intel_mid_dma_device_control - DMA device control * @chan: chan for DMA control * @cmd: control cmd * @arg: cmd arg value * * Perform DMA control command */ static int intel_mid_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); struct middma_device *mid = to_middma_device(chan->device); struct intel_mid_dma_desc *desc, *_desc; union intel_mid_dma_cfg_lo cfg_lo; if (cmd == DMA_SLAVE_CONFIG) return dma_slave_control(chan, arg); if (cmd != DMA_TERMINATE_ALL) return -ENXIO; spin_lock_bh(&midc->lock); if (midc->busy == false) { spin_unlock_bh(&midc->lock); return 0; } /*Suspend and disable the channel*/ cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); cfg_lo.cfgx.ch_susp = 1; iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); midc->busy = false; /* Disable interrupts */ disable_dma_interrupt(midc); midc->descs_allocated = 0; spin_unlock_bh(&midc->lock); list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { if (desc->lli != NULL) { pci_pool_free(desc->lli_pool, desc->lli, desc->lli_phys); pci_pool_destroy(desc->lli_pool); desc->lli = NULL; } list_move(&desc->desc_node, &midc->free_list); } return 0; } /** * intel_mid_dma_prep_memcpy - Prep memcpy txn * @chan: chan for DMA transfer * @dest: destn address * @src: src address * @len: DMA transfer len * @flags: DMA flags * * Perform a DMA memcpy. Note we support slave periphral DMA transfers only * The periphral txn details should be filled in slave structure properly * Returns the descriptor for this txn */ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct intel_mid_dma_chan *midc; struct intel_mid_dma_desc *desc = NULL; struct intel_mid_dma_slave *mids; union intel_mid_dma_ctl_lo ctl_lo; union intel_mid_dma_ctl_hi ctl_hi; union intel_mid_dma_cfg_lo cfg_lo; union intel_mid_dma_cfg_hi cfg_hi; enum dma_slave_buswidth width; pr_debug("MDMA: Prep for memcpy\n"); BUG_ON(!chan); if (!len) return NULL; midc = to_intel_mid_dma_chan(chan); BUG_ON(!midc); mids = midc->mid_slave; BUG_ON(!mids); pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", midc->dma->pci_id, midc->ch_id, len); pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", mids->cfg_mode, mids->dma_slave.direction, mids->hs_mode, mids->dma_slave.src_addr_width); /*calculate CFG_LO*/ if (mids->hs_mode == LNW_DMA_SW_HS) { cfg_lo.cfg_lo = 0; cfg_lo.cfgx.hs_sel_dst = 1; cfg_lo.cfgx.hs_sel_src = 1; } else if (mids->hs_mode == LNW_DMA_HW_HS) cfg_lo.cfg_lo = 0x00000; /*calculate CFG_HI*/ if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { /*SW HS only*/ cfg_hi.cfg_hi = 0; } else { cfg_hi.cfg_hi = 0; if (midc->dma->pimr_mask) { cfg_hi.cfgx.protctl = 0x0; /*default value*/ cfg_hi.cfgx.fifo_mode = 1; if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { cfg_hi.cfgx.src_per = 0; if (mids->device_instance == 0) cfg_hi.cfgx.dst_per = 3; if (mids->device_instance == 1) cfg_hi.cfgx.dst_per = 1; } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { if (mids->device_instance == 0) cfg_hi.cfgx.src_per = 2; if (mids->device_instance == 1) cfg_hi.cfgx.src_per = 0; cfg_hi.cfgx.dst_per = 0; } } else { cfg_hi.cfgx.protctl = 0x1; /*default value*/ cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per = midc->ch_id - midc->dma->chan_base; } } /*calculate CTL_HI*/ ctl_hi.ctlx.reser = 0; ctl_hi.ctlx.done = 0; width = mids->dma_slave.src_addr_width; ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); pr_debug("MDMA:calc len %d for block size %d\n", ctl_hi.ctlx.block_ts, midc->dma->block_size); /*calculate CTL_LO*/ ctl_lo.ctl_lo = 0; ctl_lo.ctlx.int_en = 1; ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; /* * Here we need some translation from "enum dma_slave_buswidth" * to the format for our dma controller * standard intel_mid_dmac's format * 1 Byte 0b000 * 2 Bytes 0b001 * 4 Bytes 0b010 */ ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { ctl_lo.ctlx.tt_fc = 0; ctl_lo.ctlx.sinc = 0; ctl_lo.ctlx.dinc = 0; } else { if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { ctl_lo.ctlx.sinc = 0; ctl_lo.ctlx.dinc = 2; ctl_lo.ctlx.tt_fc = 1; } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { ctl_lo.ctlx.sinc = 2; ctl_lo.ctlx.dinc = 0; ctl_lo.ctlx.tt_fc = 2; } } pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n", ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi); enable_dma_interrupt(midc); desc = midc_desc_get(midc); if (desc == NULL) goto err_desc_get; desc->sar = src; desc->dar = dest ; desc->len = len; desc->cfg_hi = cfg_hi.cfg_hi; desc->cfg_lo = cfg_lo.cfg_lo; desc->ctl_lo = ctl_lo.ctl_lo; desc->ctl_hi = ctl_hi.ctl_hi; desc->width = width; desc->dirn = mids->dma_slave.direction; desc->lli_phys = 0; desc->lli = NULL; desc->lli_pool = NULL; return &desc->txd; err_desc_get: pr_err("ERR_MDMA: Failed to get desc\n"); midc_desc_put(midc, desc); return NULL; } /** * intel_mid_dma_prep_slave_sg - Prep slave sg txn * @chan: chan for DMA transfer * @sgl: scatter gather list * @sg_len: length of sg txn * @direction: DMA transfer dirtn * @flags: DMA flags * @context: transfer context (ignored) * * Prepares LLI based periphral transfer */ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct intel_mid_dma_chan *midc = NULL; struct intel_mid_dma_slave *mids = NULL; struct intel_mid_dma_desc *desc = NULL; struct dma_async_tx_descriptor *txd = NULL; union intel_mid_dma_ctl_lo ctl_lo; pr_debug("MDMA: Prep for slave SG\n"); if (!sg_len) { pr_err("MDMA: Invalid SG length\n"); return NULL; } midc = to_intel_mid_dma_chan(chan); BUG_ON(!midc); mids = midc->mid_slave; BUG_ON(!mids); if (!midc->dma->pimr_mask) { /* We can still handle sg list with only one item */ if (sg_len == 1) { txd = intel_mid_dma_prep_memcpy(chan, mids->dma_slave.dst_addr, mids->dma_slave.src_addr, sg_dma_len(sgl), flags); return txd; } else { pr_warn("MDMA: SG list is not supported by this controller\n"); return NULL; } } pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", sg_len, direction, flags); txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags); if (NULL == txd) { pr_err("MDMA: Prep memcpy failed\n"); return NULL; } desc = to_intel_mid_dma_desc(txd); desc->dirn = direction; ctl_lo.ctl_lo = desc->ctl_lo; ctl_lo.ctlx.llp_dst_en = 1; ctl_lo.ctlx.llp_src_en = 1; desc->ctl_lo = ctl_lo.ctl_lo; desc->lli_length = sg_len; desc->current_lli = 0; /* DMA coherent memory pool for LLI descriptors*/ desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", midc->dma->pdev, (sizeof(struct intel_mid_dma_lli)*sg_len), 32, 0); if (NULL == desc->lli_pool) { pr_err("MID_DMA:LLI pool create failed\n"); return NULL; } desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); if (!desc->lli) { pr_err("MID_DMA: LLI alloc failed\n"); pci_pool_destroy(desc->lli_pool); return NULL; } midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); if (flags & DMA_PREP_INTERRUPT) { iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); pr_debug("MDMA:Enabled Block interrupt\n"); } return &desc->txd; } /** * intel_mid_dma_free_chan_resources - Frees dma resources * @chan: chan requiring attention * * Frees the allocated resources on this DMA chan */ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) { struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); struct middma_device *mid = to_middma_device(chan->device); struct intel_mid_dma_desc *desc, *_desc; if (true == midc->busy) { /*trying to free ch in use!!!!!*/ pr_err("ERR_MDMA: trying to free ch in use\n"); } spin_lock_bh(&midc->lock); midc->descs_allocated = 0; list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { list_del(&desc->desc_node); pci_pool_free(mid->dma_pool, desc, desc->txd.phys); } list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { list_del(&desc->desc_node); pci_pool_free(mid->dma_pool, desc, desc->txd.phys); } list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) { list_del(&desc->desc_node); pci_pool_free(mid->dma_pool, desc, desc->txd.phys); } spin_unlock_bh(&midc->lock); midc->in_use = false; midc->busy = false; /* Disable CH interrupts */ iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); pm_runtime_put(&mid->pdev->dev); } /** * intel_mid_dma_alloc_chan_resources - Allocate dma resources * @chan: chan requiring attention * * Allocates DMA resources on this chan * Return the descriptors allocated */ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) { struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); struct middma_device *mid = to_middma_device(chan->device); struct intel_mid_dma_desc *desc; dma_addr_t phys; int i = 0; pm_runtime_get_sync(&mid->pdev->dev); if (mid->state == SUSPENDED) { if (dma_resume(&mid->pdev->dev)) { pr_err("ERR_MDMA: resume failed"); return -EFAULT; } } /* ASSERT: channel is idle */ if (test_ch_en(mid->dma_base, midc->ch_id)) { /*ch is not idle*/ pr_err("ERR_MDMA: ch not idle\n"); pm_runtime_put(&mid->pdev->dev); return -EIO; } dma_cookie_init(chan); spin_lock_bh(&midc->lock); while (midc->descs_allocated < DESCS_PER_CHANNEL) { spin_unlock_bh(&midc->lock); desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); if (!desc) { pr_err("ERR_MDMA: desc failed\n"); pm_runtime_put(&mid->pdev->dev); return -ENOMEM; /*check*/ } dma_async_tx_descriptor_init(&desc->txd, chan); desc->txd.tx_submit = intel_mid_dma_tx_submit; desc->txd.flags = DMA_CTRL_ACK; desc->txd.phys = phys; spin_lock_bh(&midc->lock); i = ++midc->descs_allocated; list_add_tail(&desc->desc_node, &midc->free_list); } spin_unlock_bh(&midc->lock); midc->in_use = true; midc->busy = false; pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); return i; } /** * midc_handle_error - Handle DMA txn error * @mid: controller where error occurred * @midc: chan where error occurred * * Scan the descriptor for error */ static void midc_handle_error(struct middma_device *mid, struct intel_mid_dma_chan *midc) { midc_scan_descriptors(mid, midc); } /** * dma_tasklet - DMA interrupt tasklet * @data: tasklet arg (the controller structure) * * Scan the controller for interrupts for completion/error * Clear the interrupt and call for handling completion/error */ static void dma_tasklet(unsigned long data) { struct middma_device *mid = NULL; struct intel_mid_dma_chan *midc = NULL; u32 status, raw_tfr, raw_block; int i; mid = (struct middma_device *)data; if (mid == NULL) { pr_err("ERR_MDMA: tasklet Null param\n"); return; } pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); raw_tfr = ioread32(mid->dma_base + RAW_TFR); raw_block = ioread32(mid->dma_base + RAW_BLOCK); status = raw_tfr | raw_block; status &= mid->intr_mask; while (status) { /*txn interrupt*/ i = get_ch_index(&status, mid->chan_base); if (i < 0) { pr_err("ERR_MDMA:Invalid ch index %x\n", i); return; } midc = &mid->ch[i]; if (midc == NULL) { pr_err("ERR_MDMA:Null param midc\n"); return; } pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", status, midc->ch_id, i); midc->raw_tfr = raw_tfr; midc->raw_block = raw_block; spin_lock_bh(&midc->lock); /*clearing this interrupts first*/ iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); if (raw_block) { iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK); } midc_scan_descriptors(mid, midc); pr_debug("MDMA:Scan of desc... complete, unmasking\n"); iowrite32(UNMASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_TFR); if (raw_block) { iowrite32(UNMASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); } spin_unlock_bh(&midc->lock); } status = ioread32(mid->dma_base + RAW_ERR); status &= mid->intr_mask; while (status) { /*err interrupt*/ i = get_ch_index(&status, mid->chan_base); if (i < 0) { pr_err("ERR_MDMA:Invalid ch index %x\n", i); return; } midc = &mid->ch[i]; if (midc == NULL) { pr_err("ERR_MDMA:Null param midc\n"); return; } pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", status, midc->ch_id, i); iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR); spin_lock_bh(&midc->lock); midc_handle_error(mid, midc); iowrite32(UNMASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); spin_unlock_bh(&midc->lock); } pr_debug("MDMA:Exiting takslet...\n"); return; } static void dma_tasklet1(unsigned long data) { pr_debug("MDMA:in takslet1...\n"); return dma_tasklet(data); } static void dma_tasklet2(unsigned long data) { pr_debug("MDMA:in takslet2...\n"); return dma_tasklet(data); } /** * intel_mid_dma_interrupt - DMA ISR * @irq: IRQ where interrupt occurred * @data: ISR cllback data (the controller structure) * * See if this is our interrupt if so then schedule the tasklet * otherwise ignore */ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) { struct middma_device *mid = data; u32 tfr_status, err_status; int call_tasklet = 0; tfr_status = ioread32(mid->dma_base + RAW_TFR); err_status = ioread32(mid->dma_base + RAW_ERR); if (!tfr_status && !err_status) return IRQ_NONE; /*DMA Interrupt*/ pr_debug("MDMA:Got an interrupt on irq %d\n", irq); pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); tfr_status &= mid->intr_mask; if (tfr_status) { /*need to disable intr*/ iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); pr_debug("MDMA: Calling tasklet %x\n", tfr_status); call_tasklet = 1; } err_status &= mid->intr_mask; if (err_status) { iowrite32((err_status << INT_MASK_WE), mid->dma_base + MASK_ERR); call_tasklet = 1; } if (call_tasklet) tasklet_schedule(&mid->tasklet); return IRQ_HANDLED; } static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data) { return intel_mid_dma_interrupt(irq, data); } static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) { return intel_mid_dma_interrupt(irq, data); } /** * mid_setup_dma - Setup the DMA controller * @pdev: Controller PCI device structure * * Initialize the DMA controller, channels, registers with DMA engine, * ISR. Initialize DMA controller channels. */ static int mid_setup_dma(struct pci_dev *pdev) { struct middma_device *dma = pci_get_drvdata(pdev); int err, i; /* DMA coherent memory pool for DMA descriptor allocations */ dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, sizeof(struct intel_mid_dma_desc), 32, 0); if (NULL == dma->dma_pool) { pr_err("ERR_MDMA:pci_pool_create failed\n"); err = -ENOMEM; goto err_dma_pool; } INIT_LIST_HEAD(&dma->common.channels); dma->pci_id = pdev->device; if (dma->pimr_mask) { dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, LNW_PERIPHRAL_MASK_SIZE); if (dma->mask_reg == NULL) { pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); err = -ENOMEM; goto err_ioremap; } } else dma->mask_reg = NULL; pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); /*init CH structures*/ dma->intr_mask = 0; dma->state = RUNNING; for (i = 0; i < dma->max_chan; i++) { struct intel_mid_dma_chan *midch = &dma->ch[i]; midch->chan.device = &dma->common; dma_cookie_init(&midch->chan); midch->ch_id = dma->chan_base + i; pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); midch->dma_base = dma->dma_base; midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id; midch->dma = dma; dma->intr_mask |= 1 << (dma->chan_base + i); spin_lock_init(&midch->lock); INIT_LIST_HEAD(&midch->active_list); INIT_LIST_HEAD(&midch->queue); INIT_LIST_HEAD(&midch->free_list); /*mask interrupts*/ iowrite32(MASK_INTR_REG(midch->ch_id), dma->dma_base + MASK_BLOCK); iowrite32(MASK_INTR_REG(midch->ch_id), dma->dma_base + MASK_SRC_TRAN); iowrite32(MASK_INTR_REG(midch->ch_id), dma->dma_base + MASK_DST_TRAN); iowrite32(MASK_INTR_REG(midch->ch_id), dma->dma_base + MASK_ERR); iowrite32(MASK_INTR_REG(midch->ch_id), dma->dma_base + MASK_TFR); disable_dma_interrupt(midch); list_add_tail(&midch->chan.device_node, &dma->common.channels); } pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask); /*init dma structure*/ dma_cap_zero(dma->common.cap_mask); dma_cap_set(DMA_MEMCPY, dma->common.cap_mask); dma_cap_set(DMA_SLAVE, dma->common.cap_mask); dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); dma->common.dev = &pdev->dev; dma->common.device_alloc_chan_resources = intel_mid_dma_alloc_chan_resources; dma->common.device_free_chan_resources = intel_mid_dma_free_chan_resources; dma->common.device_tx_status = intel_mid_dma_tx_status; dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; dma->common.device_issue_pending = intel_mid_dma_issue_pending; dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; dma->common.device_control = intel_mid_dma_device_control; /*enable dma cntrl*/ iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); /*register irq */ if (dma->pimr_mask) { pr_debug("MDMA:Requesting irq shared for DMAC1\n"); err = request_irq(pdev->irq, intel_mid_dma_interrupt1, IRQF_SHARED, "INTEL_MID_DMAC1", dma); if (0 != err) goto err_irq; } else { dma->intr_mask = 0x03; pr_debug("MDMA:Requesting irq for DMAC2\n"); err = request_irq(pdev->irq, intel_mid_dma_interrupt2, IRQF_SHARED, "INTEL_MID_DMAC2", dma); if (0 != err) goto err_irq; } /*register device w/ engine*/ err = dma_async_device_register(&dma->common); if (0 != err) { pr_err("ERR_MDMA:device_register failed: %d\n", err); goto err_engine; } if (dma->pimr_mask) { pr_debug("setting up tasklet1 for DMAC1\n"); tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma); } else { pr_debug("setting up tasklet2 for DMAC2\n"); tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma); } return 0; err_engine: free_irq(pdev->irq, dma); err_irq: if (dma->mask_reg) iounmap(dma->mask_reg); err_ioremap: pci_pool_destroy(dma->dma_pool); err_dma_pool: pr_err("ERR_MDMA:setup_dma failed: %d\n", err); return err; } /** * middma_shutdown - Shutdown the DMA controller * @pdev: Controller PCI device structure * * Called by remove * Unregister DMa controller, clear all structures and free interrupt */ static void middma_shutdown(struct pci_dev *pdev) { struct middma_device *device = pci_get_drvdata(pdev); dma_async_device_unregister(&device->common); pci_pool_destroy(device->dma_pool); if (device->mask_reg) iounmap(device->mask_reg); if (device->dma_base) iounmap(device->dma_base); free_irq(pdev->irq, device); return; } /** * intel_mid_dma_probe - PCI Probe * @pdev: Controller PCI device structure * @id: pci device id structure * * Initialize the PCI device, map BARs, query driver data. * Call setup_dma to complete contoller and chan initilzation */ static int intel_mid_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct middma_device *device; u32 base_addr, bar_size; struct intel_mid_dma_probe_info *info; int err; pr_debug("MDMA: probe for %x\n", pdev->device); info = (void *)id->driver_data; pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n", info->max_chan, info->ch_base, info->block_size, info->pimr_mask); err = pci_enable_device(pdev); if (err) goto err_enable_device; err = pci_request_regions(pdev, "intel_mid_dmac"); if (err) goto err_request_regions; err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) goto err_set_dma_mask; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) goto err_set_dma_mask; device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) { pr_err("ERR_MDMA:kzalloc failed probe\n"); err = -ENOMEM; goto err_kzalloc; } device->pdev = pci_dev_get(pdev); base_addr = pci_resource_start(pdev, 0); bar_size = pci_resource_len(pdev, 0); device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE); if (!device->dma_base) { pr_err("ERR_MDMA:ioremap failed\n"); err = -ENOMEM; goto err_ioremap; } pci_set_drvdata(pdev, device); pci_set_master(pdev); device->max_chan = info->max_chan; device->chan_base = info->ch_base; device->block_size = info->block_size; device->pimr_mask = info->pimr_mask; err = mid_setup_dma(pdev); if (err) goto err_dma; pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); return 0; err_dma: iounmap(device->dma_base); err_ioremap: pci_dev_put(pdev); kfree(device); err_kzalloc: err_set_dma_mask: pci_release_regions(pdev); pci_disable_device(pdev); err_request_regions: err_enable_device: pr_err("ERR_MDMA:Probe failed %d\n", err); return err; } /** * intel_mid_dma_remove - PCI remove * @pdev: Controller PCI device structure * * Free up all resources and data * Call shutdown_dma to complete contoller and chan cleanup */ static void intel_mid_dma_remove(struct pci_dev *pdev) { struct middma_device *device = pci_get_drvdata(pdev); pm_runtime_get_noresume(&pdev->dev); pm_runtime_forbid(&pdev->dev); middma_shutdown(pdev); pci_dev_put(pdev); kfree(device); pci_release_regions(pdev); pci_disable_device(pdev); } /* Power Management */ /* * dma_suspend - PCI suspend function * * @pci: PCI device structure * @state: PM message * * This function is called by OS when a power event occurs */ static int dma_suspend(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); int i; struct middma_device *device = pci_get_drvdata(pci); pr_debug("MDMA: dma_suspend called\n"); for (i = 0; i < device->max_chan; i++) { if (device->ch[i].in_use) return -EAGAIN; } dmac1_mask_periphral_intr(device); device->state = SUSPENDED; pci_save_state(pci); pci_disable_device(pci); pci_set_power_state(pci, PCI_D3hot); return 0; } /** * dma_resume - PCI resume function * * @pci: PCI device structure * * This function is called by OS when a power event occurs */ int dma_resume(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); int ret; struct middma_device *device = pci_get_drvdata(pci); pr_debug("MDMA: dma_resume called\n"); pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); ret = pci_enable_device(pci); if (ret) { pr_err("MDMA: device can't be enabled for %x\n", pci->device); return ret; } device->state = RUNNING; iowrite32(REG_BIT0, device->dma_base + DMA_CFG); return 0; } static int dma_runtime_suspend(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct middma_device *device = pci_get_drvdata(pci_dev); device->state = SUSPENDED; return 0; } static int dma_runtime_resume(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct middma_device *device = pci_get_drvdata(pci_dev); device->state = RUNNING; iowrite32(REG_BIT0, device->dma_base + DMA_CFG); return 0; } static int dma_runtime_idle(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct middma_device *device = pci_get_drvdata(pdev); int i; for (i = 0; i < device->max_chan; i++) { if (device->ch[i].in_use) return -EAGAIN; } return 0; } /****************************************************************************** * PCI stuff */ static struct pci_device_id intel_mid_dma_ids[] = { { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)}, { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)}, { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)}, { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)}, { 0, } }; MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); static const struct dev_pm_ops intel_mid_dma_pm = { .runtime_suspend = dma_runtime_suspend, .runtime_resume = dma_runtime_resume, .runtime_idle = dma_runtime_idle, .suspend = dma_suspend, .resume = dma_resume, }; static struct pci_driver intel_mid_dma_pci_driver = { .name = "Intel MID DMA", .id_table = intel_mid_dma_ids, .probe = intel_mid_dma_probe, .remove = intel_mid_dma_remove, #ifdef CONFIG_PM .driver = { .pm = &intel_mid_dma_pm, }, #endif }; static int __init intel_mid_dma_init(void) { pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", INTEL_MID_DMA_DRIVER_VERSION); return pci_register_driver(&intel_mid_dma_pci_driver); } fs_initcall(intel_mid_dma_init); static void __exit intel_mid_dma_exit(void) { pci_unregister_driver(&intel_mid_dma_pci_driver); } module_exit(intel_mid_dma_exit); MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); MODULE_DESCRIPTION("Intel (R) MID DMAC Driver"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);
gpl-2.0
varchild/android_kernel_htc_msm8660
fs/nfs/mount_clnt.c
922
11963
/* * In-kernel MOUNT protocol client * * Copyright (C) 1997, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/socket.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/uio.h> #include <linux/net.h> #include <linux/in.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/sched.h> #include <linux/nfs_fs.h> #include "internal.h" #ifdef RPC_DEBUG # define NFSDBG_FACILITY NFSDBG_MOUNT #endif /* * Defined by RFC 1094, section A.3; and RFC 1813, section 5.1.4 */ #define MNTPATHLEN (1024) /* * XDR data type sizes */ #define encode_dirpath_sz (1 + XDR_QUADLEN(MNTPATHLEN)) #define MNT_status_sz (1) #define MNT_fhs_status_sz (1) #define MNT_fhandle_sz XDR_QUADLEN(NFS2_FHSIZE) #define MNT_fhandle3_sz (1 + XDR_QUADLEN(NFS3_FHSIZE)) #define MNT_authflav3_sz (1 + NFS_MAX_SECFLAVORS) /* * XDR argument and result sizes */ #define MNT_enc_dirpath_sz encode_dirpath_sz #define MNT_dec_mountres_sz (MNT_status_sz + MNT_fhandle_sz) #define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandle_sz + \ MNT_authflav3_sz) /* * Defined by RFC 1094, section A.5 */ enum { MOUNTPROC_NULL = 0, MOUNTPROC_MNT = 1, MOUNTPROC_DUMP = 2, MOUNTPROC_UMNT = 3, MOUNTPROC_UMNTALL = 4, MOUNTPROC_EXPORT = 5, }; /* * Defined by RFC 1813, section 5.2 */ enum { MOUNTPROC3_NULL = 0, MOUNTPROC3_MNT = 1, MOUNTPROC3_DUMP = 2, MOUNTPROC3_UMNT = 3, MOUNTPROC3_UMNTALL = 4, MOUNTPROC3_EXPORT = 5, }; static struct rpc_program mnt_program; /* * Defined by OpenGroup XNFS Version 3W, chapter 8 */ enum mountstat { MNT_OK = 0, MNT_EPERM = 1, MNT_ENOENT = 2, MNT_EACCES = 13, MNT_EINVAL = 22, }; static struct { u32 status; int errno; } mnt_errtbl[] = { { .status = MNT_OK, .errno = 0, }, { .status = MNT_EPERM, .errno = -EPERM, }, { .status = MNT_ENOENT, .errno = -ENOENT, }, { .status = MNT_EACCES, .errno = -EACCES, }, { .status = MNT_EINVAL, .errno = -EINVAL, }, }; /* * Defined by RFC 1813, section 5.1.5 */ enum mountstat3 { MNT3_OK = 0, /* no error */ MNT3ERR_PERM = 1, /* Not owner */ MNT3ERR_NOENT = 2, /* No such file or directory */ MNT3ERR_IO = 5, /* I/O error */ MNT3ERR_ACCES = 13, /* Permission denied */ MNT3ERR_NOTDIR = 20, /* Not a directory */ MNT3ERR_INVAL = 22, /* Invalid argument */ MNT3ERR_NAMETOOLONG = 63, /* Filename too long */ MNT3ERR_NOTSUPP = 10004, /* Operation not supported */ MNT3ERR_SERVERFAULT = 10006, /* A failure on the server */ }; static struct { u32 status; int errno; } mnt3_errtbl[] = { { .status = MNT3_OK, .errno = 0, }, { .status = MNT3ERR_PERM, .errno = -EPERM, }, { .status = MNT3ERR_NOENT, .errno = -ENOENT, }, { .status = MNT3ERR_IO, .errno = -EIO, }, { .status = MNT3ERR_ACCES, .errno = -EACCES, }, { .status = MNT3ERR_NOTDIR, .errno = -ENOTDIR, }, { .status = MNT3ERR_INVAL, .errno = -EINVAL, }, { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, }, { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, }, { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, }, }; struct mountres { int errno; struct nfs_fh *fh; unsigned int *auth_count; rpc_authflavor_t *auth_flavors; }; struct mnt_fhstatus { u32 status; struct nfs_fh *fh; }; /** * nfs_mount - Obtain an NFS file handle for the given host and path * @info: pointer to mount request arguments * * Uses default timeout parameters specified by underlying transport. */ int nfs_mount(struct nfs_mount_request *info) { struct mountres result = { .fh = info->fh, .auth_count = info->auth_flav_len, .auth_flavors = info->auth_flavs, }; struct rpc_message msg = { .rpc_argp = info->dirpath, .rpc_resp = &result, }; struct rpc_create_args args = { .net = &init_net, .protocol = info->protocol, .address = info->sap, .addrsize = info->salen, .servername = info->hostname, .program = &mnt_program, .version = info->version, .authflavor = RPC_AUTH_UNIX, }; struct rpc_clnt *mnt_clnt; int status; dprintk("NFS: sending MNT request for %s:%s\n", (info->hostname ? info->hostname : "server"), info->dirpath); if (info->noresvport) args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; mnt_clnt = rpc_create(&args); if (IS_ERR(mnt_clnt)) goto out_clnt_err; if (info->version == NFS_MNT3_VERSION) msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC3_MNT]; else msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC_MNT]; status = rpc_call_sync(mnt_clnt, &msg, RPC_TASK_SOFT|RPC_TASK_TIMEOUT); rpc_shutdown_client(mnt_clnt); if (status < 0) goto out_call_err; if (result.errno != 0) goto out_mnt_err; dprintk("NFS: MNT request succeeded\n"); status = 0; out: return status; out_clnt_err: status = PTR_ERR(mnt_clnt); dprintk("NFS: failed to create MNT RPC client, status=%d\n", status); goto out; out_call_err: dprintk("NFS: MNT request failed, status=%d\n", status); goto out; out_mnt_err: dprintk("NFS: MNT server returned result %d\n", result.errno); status = result.errno; goto out; } /** * nfs_umount - Notify a server that we have unmounted this export * @info: pointer to umount request arguments * * MOUNTPROC_UMNT is advisory, so we set a short timeout, and always * use UDP. */ void nfs_umount(const struct nfs_mount_request *info) { static const struct rpc_timeout nfs_umnt_timeout = { .to_initval = 1 * HZ, .to_maxval = 3 * HZ, .to_retries = 2, }; struct rpc_create_args args = { .net = &init_net, .protocol = IPPROTO_UDP, .address = info->sap, .addrsize = info->salen, .timeout = &nfs_umnt_timeout, .servername = info->hostname, .program = &mnt_program, .version = info->version, .authflavor = RPC_AUTH_UNIX, .flags = RPC_CLNT_CREATE_NOPING, }; struct rpc_message msg = { .rpc_argp = info->dirpath, }; struct rpc_clnt *clnt; int status; if (info->noresvport) args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; clnt = rpc_create(&args); if (IS_ERR(clnt)) goto out_clnt_err; dprintk("NFS: sending UMNT request for %s:%s\n", (info->hostname ? info->hostname : "server"), info->dirpath); if (info->version == NFS_MNT3_VERSION) msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC3_UMNT]; else msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC_UMNT]; status = rpc_call_sync(clnt, &msg, 0); rpc_shutdown_client(clnt); if (unlikely(status < 0)) goto out_call_err; return; out_clnt_err: dprintk("NFS: failed to create UMNT RPC client, status=%ld\n", PTR_ERR(clnt)); return; out_call_err: dprintk("NFS: UMNT request failed, status=%d\n", status); } /* * XDR encode/decode functions for MOUNT */ static void encode_mntdirpath(struct xdr_stream *xdr, const char *pathname) { const u32 pathname_len = strlen(pathname); __be32 *p; BUG_ON(pathname_len > MNTPATHLEN); p = xdr_reserve_space(xdr, 4 + pathname_len); xdr_encode_opaque(p, pathname, pathname_len); } static void mnt_xdr_enc_dirpath(struct rpc_rqst *req, struct xdr_stream *xdr, const char *dirpath) { encode_mntdirpath(xdr, dirpath); } /* * RFC 1094: "A non-zero status indicates some sort of error. In this * case, the status is a UNIX error number." This can be problematic * if the server and client use different errno values for the same * error. * * However, the OpenGroup XNFS spec provides a simple mapping that is * independent of local errno values on the server and the client. */ static int decode_status(struct xdr_stream *xdr, struct mountres *res) { unsigned int i; u32 status; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; status = be32_to_cpup(p); for (i = 0; i < ARRAY_SIZE(mnt_errtbl); i++) { if (mnt_errtbl[i].status == status) { res->errno = mnt_errtbl[i].errno; return 0; } } dprintk("NFS: unrecognized MNT status code: %u\n", status); res->errno = -EACCES; return 0; } static int decode_fhandle(struct xdr_stream *xdr, struct mountres *res) { struct nfs_fh *fh = res->fh; __be32 *p; p = xdr_inline_decode(xdr, NFS2_FHSIZE); if (unlikely(p == NULL)) return -EIO; fh->size = NFS2_FHSIZE; memcpy(fh->data, p, NFS2_FHSIZE); return 0; } static int mnt_xdr_dec_mountres(struct rpc_rqst *req, struct xdr_stream *xdr, struct mountres *res) { int status; status = decode_status(xdr, res); if (unlikely(status != 0 || res->errno != 0)) return status; return decode_fhandle(xdr, res); } static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res) { unsigned int i; u32 status; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; status = be32_to_cpup(p); for (i = 0; i < ARRAY_SIZE(mnt3_errtbl); i++) { if (mnt3_errtbl[i].status == status) { res->errno = mnt3_errtbl[i].errno; return 0; } } dprintk("NFS: unrecognized MNT3 status code: %u\n", status); res->errno = -EACCES; return 0; } static int decode_fhandle3(struct xdr_stream *xdr, struct mountres *res) { struct nfs_fh *fh = res->fh; u32 size; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; size = be32_to_cpup(p); if (size > NFS3_FHSIZE || size == 0) return -EIO; p = xdr_inline_decode(xdr, size); if (unlikely(p == NULL)) return -EIO; fh->size = size; memcpy(fh->data, p, size); return 0; } static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res) { rpc_authflavor_t *flavors = res->auth_flavors; unsigned int *count = res->auth_count; u32 entries, i; __be32 *p; if (*count == 0) return 0; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; entries = be32_to_cpup(p); dprintk("NFS: received %u auth flavors\n", entries); if (entries > NFS_MAX_SECFLAVORS) entries = NFS_MAX_SECFLAVORS; p = xdr_inline_decode(xdr, 4 * entries); if (unlikely(p == NULL)) return -EIO; if (entries > *count) entries = *count; for (i = 0; i < entries; i++) { flavors[i] = be32_to_cpup(p++); dprintk("NFS: auth flavor[%u]: %d\n", i, flavors[i]); } *count = i; return 0; } static int mnt_xdr_dec_mountres3(struct rpc_rqst *req, struct xdr_stream *xdr, struct mountres *res) { int status; status = decode_fhs_status(xdr, res); if (unlikely(status != 0 || res->errno != 0)) return status; status = decode_fhandle3(xdr, res); if (unlikely(status != 0)) { res->errno = -EBADHANDLE; return 0; } return decode_auth_flavors(xdr, res); } static struct rpc_procinfo mnt_procedures[] = { [MOUNTPROC_MNT] = { .p_proc = MOUNTPROC_MNT, .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath, .p_decode = (kxdrdproc_t)mnt_xdr_dec_mountres, .p_arglen = MNT_enc_dirpath_sz, .p_replen = MNT_dec_mountres_sz, .p_statidx = MOUNTPROC_MNT, .p_name = "MOUNT", }, [MOUNTPROC_UMNT] = { .p_proc = MOUNTPROC_UMNT, .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath, .p_arglen = MNT_enc_dirpath_sz, .p_statidx = MOUNTPROC_UMNT, .p_name = "UMOUNT", }, }; static struct rpc_procinfo mnt3_procedures[] = { [MOUNTPROC3_MNT] = { .p_proc = MOUNTPROC3_MNT, .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath, .p_decode = (kxdrdproc_t)mnt_xdr_dec_mountres3, .p_arglen = MNT_enc_dirpath_sz, .p_replen = MNT_dec_mountres3_sz, .p_statidx = MOUNTPROC3_MNT, .p_name = "MOUNT", }, [MOUNTPROC3_UMNT] = { .p_proc = MOUNTPROC3_UMNT, .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath, .p_arglen = MNT_enc_dirpath_sz, .p_statidx = MOUNTPROC3_UMNT, .p_name = "UMOUNT", }, }; static struct rpc_version mnt_version1 = { .number = 1, .nrprocs = ARRAY_SIZE(mnt_procedures), .procs = mnt_procedures, }; static struct rpc_version mnt_version3 = { .number = 3, .nrprocs = ARRAY_SIZE(mnt3_procedures), .procs = mnt3_procedures, }; static struct rpc_version *mnt_version[] = { NULL, &mnt_version1, NULL, &mnt_version3, }; static struct rpc_stat mnt_stats; static struct rpc_program mnt_program = { .name = "mount", .number = NFS_MNT_PROGRAM, .nrvers = ARRAY_SIZE(mnt_version), .version = mnt_version, .stats = &mnt_stats, };
gpl-2.0
chadouming/canuck-3.10
drivers/mfd/wcd9xxx-slimslave.c
1178
14128
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/slab.h> #include <linux/mutex.h> #include <linux/mfd/wcd9xxx/wcd9xxx-slimslave.h> #include <linux/mfd/wcd9xxx/wcd9xxx_registers.h> struct wcd9xxx_slim_sch { u16 rx_port_ch_reg_base; u16 port_tx_cfg_reg_base; u16 port_rx_cfg_reg_base; }; static struct wcd9xxx_slim_sch sh_ch; static int wcd9xxx_alloc_slim_sh_ch(struct wcd9xxx *wcd9xxx, u8 wcd9xxx_pgd_la, u32 cnt, struct wcd9xxx_ch *channels, u32 path); static int wcd9xxx_dealloc_slim_sh_ch(struct slim_device *slim, u32 cnt, struct wcd9xxx_ch *channels); static int wcd9xxx_configure_ports(struct wcd9xxx *wcd9xxx) { if (wcd9xxx->codec_type->slim_slave_type == WCD9XXX_SLIM_SLAVE_ADDR_TYPE_TABLA) { sh_ch.rx_port_ch_reg_base = 0x180; sh_ch.port_rx_cfg_reg_base = 0x040; sh_ch.port_tx_cfg_reg_base = 0x040; } else { sh_ch.rx_port_ch_reg_base = 0x180 - (TAIKO_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS * 4); sh_ch.port_rx_cfg_reg_base = 0x040 - TAIKO_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS; sh_ch.port_tx_cfg_reg_base = 0x050; } return 0; } int wcd9xxx_init_slimslave(struct wcd9xxx *wcd9xxx, u8 wcd9xxx_pgd_la, unsigned int tx_num, unsigned int *tx_slot, unsigned int rx_num, unsigned int *rx_slot) { int ret = 0; int i; ret = wcd9xxx_configure_ports(wcd9xxx); if (ret) { pr_err("%s: Failed to configure register address offset\n", __func__); goto err; } if (wcd9xxx->rx_chs) { wcd9xxx->num_rx_port = rx_num; for (i = 0; i < rx_num; i++) { wcd9xxx->rx_chs[i].ch_num = rx_slot[i]; INIT_LIST_HEAD(&wcd9xxx->rx_chs[i].list); } ret = wcd9xxx_alloc_slim_sh_ch(wcd9xxx, wcd9xxx_pgd_la, wcd9xxx->num_rx_port, wcd9xxx->rx_chs, SLIM_SINK); if (ret) { pr_err("%s: Failed to alloc %d rx slimbus channels\n", __func__, wcd9xxx->num_rx_port); kfree(wcd9xxx->rx_chs); wcd9xxx->rx_chs = NULL; wcd9xxx->num_rx_port = 0; } } else { pr_err("Not able to allocate memory for %d slimbus rx ports\n", wcd9xxx->num_rx_port); } if (wcd9xxx->tx_chs) { wcd9xxx->num_tx_port = tx_num; for (i = 0; i < tx_num; i++) { wcd9xxx->tx_chs[i].ch_num = tx_slot[i]; INIT_LIST_HEAD(&wcd9xxx->tx_chs[i].list); } ret = wcd9xxx_alloc_slim_sh_ch(wcd9xxx, wcd9xxx_pgd_la, wcd9xxx->num_tx_port, wcd9xxx->tx_chs, SLIM_SRC); if (ret) { pr_err("%s: Failed to alloc %d tx slimbus channels\n", __func__, wcd9xxx->num_tx_port); kfree(wcd9xxx->tx_chs); wcd9xxx->tx_chs = NULL; wcd9xxx->num_tx_port = 0; } } else { pr_err("Not able to allocate memory for %d slimbus tx ports\n", wcd9xxx->num_tx_port); } return 0; err: return ret; } int wcd9xxx_deinit_slimslave(struct wcd9xxx *wcd9xxx) { if (wcd9xxx->num_rx_port) { wcd9xxx_dealloc_slim_sh_ch(wcd9xxx->slim, wcd9xxx->num_rx_port, wcd9xxx->rx_chs); wcd9xxx->num_rx_port = 0; } if (wcd9xxx->num_tx_port) { wcd9xxx_dealloc_slim_sh_ch(wcd9xxx->slim, wcd9xxx->num_tx_port, wcd9xxx->tx_chs); wcd9xxx->num_tx_port = 0; } return 0; } static int wcd9xxx_alloc_slim_sh_ch(struct wcd9xxx *wcd9xxx, u8 wcd9xxx_pgd_la, u32 cnt, struct wcd9xxx_ch *channels, u32 path) { int ret = 0; u32 ch_idx ; /* The slimbus channel allocation seem take longer time * so do the allocation up front to avoid delay in start of * playback */ pr_debug("%s: pgd_la[%d]\n", __func__, wcd9xxx_pgd_la); for (ch_idx = 0; ch_idx < cnt; ch_idx++) { ret = slim_get_slaveport(wcd9xxx_pgd_la, channels[ch_idx].port, &channels[ch_idx].sph, path); pr_debug("%s: pgd_la[%d] channels[%d].port[%d]\n" "channels[%d].sph[%d] path[%d]\n", __func__, wcd9xxx_pgd_la, ch_idx, channels[ch_idx].port, ch_idx, channels[ch_idx].sph, path); if (ret < 0) { pr_err("%s: slave port failure id[%d] ret[%d]\n", __func__, channels[ch_idx].ch_num, ret); goto err; } ret = slim_query_ch(wcd9xxx->slim, channels[ch_idx].ch_num, &channels[ch_idx].ch_h); if (ret < 0) { pr_err("%s: slim_query_ch failed ch-num[%d] ret[%d]\n", __func__, channels[ch_idx].ch_num, ret); goto err; } } err: return ret; } static int wcd9xxx_dealloc_slim_sh_ch(struct slim_device *slim, u32 cnt, struct wcd9xxx_ch *channels) { int idx = 0; int ret = 0; /* slim_dealloc_ch */ for (idx = 0; idx < cnt; idx++) { ret = slim_dealloc_ch(slim, channels[idx].ch_h); if (ret < 0) { pr_err("%s: slim_dealloc_ch fail ret[%d] ch_h[%d]\n", __func__, ret, channels[idx].ch_h); } } return ret; } /* Enable slimbus slave device for RX path */ int wcd9xxx_cfg_slim_sch_rx(struct wcd9xxx *wcd9xxx, struct list_head *wcd9xxx_ch_list, unsigned int rate, unsigned int bit_width, u16 *grph) { u8 ch_cnt = 0; u16 ch_h[SLIM_MAX_RX_PORTS] = {0}; u8 payload = 0; u16 codec_port = 0; int ret; struct slim_ch prop; struct wcd9xxx_ch *rx; /* Configure slave interface device */ list_for_each_entry(rx, wcd9xxx_ch_list, list) { payload |= 1 << rx->shift; ch_h[ch_cnt] = rx->ch_h; ch_cnt++; pr_debug("list ch->ch_h %d ch->sph %d\n", rx->ch_h, rx->sph); } pr_debug("%s: ch_cnt[%d] rate=%d WATER_MARK_VAL %d\n", __func__, ch_cnt, rate, WATER_MARK_VAL); /* slim_define_ch api */ prop.prot = SLIM_AUTO_ISO; prop.baser = SLIM_RATE_4000HZ; prop.dataf = SLIM_CH_DATAF_NOT_DEFINED; prop.auxf = SLIM_CH_AUXF_NOT_APPLICABLE; prop.ratem = (rate/4000); prop.sampleszbits = bit_width; pr_debug("Before slim_define_ch:\n" "ch_cnt %d,ch_h[0] %d ch_h[1] %d, grph %d\n", ch_cnt, ch_h[0], ch_h[1], *grph); ret = slim_define_ch(wcd9xxx->slim, &prop, ch_h, ch_cnt, true, grph); if (ret < 0) { pr_err("%s: slim_define_ch failed ret[%d]\n", __func__, ret); goto err; } list_for_each_entry(rx, wcd9xxx_ch_list, list) { codec_port = rx->port; pr_debug("%s: codec_port %d rx 0x%x, payload %d\n" "sh_ch.rx_port_ch_reg_base0 0x%x\n" "sh_ch.port_rx_cfg_reg_base 0x%x\n", __func__, codec_port, (u32)rx, payload, sh_ch.rx_port_ch_reg_base, sh_ch.port_rx_cfg_reg_base); /* look for the valid port range and chose the * payload accordingly */ /* write to interface device */ ret = wcd9xxx_interface_reg_write(wcd9xxx, SB_PGD_RX_PORT_MULTI_CHANNEL_0( sh_ch.rx_port_ch_reg_base, codec_port), payload); if (ret < 0) { pr_err("%s:Intf-dev fail reg[%d] payload[%d] ret[%d]\n", __func__, SB_PGD_RX_PORT_MULTI_CHANNEL_0( sh_ch.rx_port_ch_reg_base, codec_port), payload, ret); goto err; } /* configure the slave port for water mark and enable*/ ret = wcd9xxx_interface_reg_write(wcd9xxx, SB_PGD_PORT_CFG_BYTE_ADDR( sh_ch.port_rx_cfg_reg_base, codec_port), WATER_MARK_VAL); if (ret < 0) { pr_err("%s:watermark set failure for port[%d] ret[%d]", __func__, codec_port, ret); } ret = slim_connect_sink(wcd9xxx->slim, &rx->sph, 1, rx->ch_h); if (ret < 0) { pr_err("%s: slim_connect_sink failed ret[%d]\n", __func__, ret); goto err_close_slim_sch; } } /* slim_control_ch */ ret = slim_control_ch(wcd9xxx->slim, *grph, SLIM_CH_ACTIVATE, true); if (ret < 0) { pr_err("%s: slim_control_ch failed ret[%d]\n", __func__, ret); goto err_close_slim_sch; } return 0; err_close_slim_sch: /* release all acquired handles */ wcd9xxx_close_slim_sch_rx(wcd9xxx, wcd9xxx_ch_list, *grph); err: return ret; } EXPORT_SYMBOL_GPL(wcd9xxx_cfg_slim_sch_rx); /* Enable slimbus slave device for RX path */ int wcd9xxx_cfg_slim_sch_tx(struct wcd9xxx *wcd9xxx, struct list_head *wcd9xxx_ch_list, unsigned int rate, unsigned int bit_width, u16 *grph) { u16 ch_cnt = 0; u16 payload = 0; u16 ch_h[SLIM_MAX_TX_PORTS] = {0}; u16 codec_port; int ret = 0; struct wcd9xxx_ch *tx; struct slim_ch prop; list_for_each_entry(tx, wcd9xxx_ch_list, list) { payload |= 1 << tx->shift; ch_h[ch_cnt] = tx->ch_h; ch_cnt++; } /* slim_define_ch api */ prop.prot = SLIM_AUTO_ISO; prop.baser = SLIM_RATE_4000HZ; prop.dataf = SLIM_CH_DATAF_NOT_DEFINED; prop.auxf = SLIM_CH_AUXF_NOT_APPLICABLE; prop.ratem = (rate/4000); prop.sampleszbits = 16; ret = slim_define_ch(wcd9xxx->slim, &prop, ch_h, ch_cnt, true, grph); if (ret < 0) { pr_err("%s: slim_define_ch failed ret[%d]\n", __func__, ret); goto err; } pr_debug("%s: ch_cnt[%d] rate[%d]\n", __func__, ch_cnt, rate); list_for_each_entry(tx, wcd9xxx_ch_list, list) { codec_port = tx->port; pr_debug("%s: codec_port %d rx 0x%x, payload 0x%x\n", __func__, codec_port, (u32)tx, payload); /* write to interface device */ ret = wcd9xxx_interface_reg_write(wcd9xxx, SB_PGD_TX_PORT_MULTI_CHANNEL_0(codec_port), payload & 0x00FF); if (ret < 0) { pr_err("%s:Intf-dev fail reg[%d] payload[%d] ret[%d]\n", __func__, SB_PGD_TX_PORT_MULTI_CHANNEL_0(codec_port), payload, ret); goto err; } /* ports 8,9 */ ret = wcd9xxx_interface_reg_write(wcd9xxx, SB_PGD_TX_PORT_MULTI_CHANNEL_1(codec_port), (payload & 0xFF00)>>8); if (ret < 0) { pr_err("%s:Intf-dev fail reg[%d] payload[%d] ret[%d]\n", __func__, SB_PGD_TX_PORT_MULTI_CHANNEL_1(codec_port), payload, ret); goto err; } /* configure the slave port for water mark and enable*/ ret = wcd9xxx_interface_reg_write(wcd9xxx, SB_PGD_PORT_CFG_BYTE_ADDR( sh_ch.port_tx_cfg_reg_base, codec_port), WATER_MARK_VAL); if (ret < 0) { pr_err("%s:watermark set failure for port[%d] ret[%d]", __func__, codec_port, ret); } ret = slim_connect_src(wcd9xxx->slim, tx->sph, tx->ch_h); if (ret < 0) { pr_err("%s: slim_connect_src failed ret[%d]\n", __func__, ret); goto err; } } /* slim_control_ch */ ret = slim_control_ch(wcd9xxx->slim, *grph, SLIM_CH_ACTIVATE, true); if (ret < 0) { pr_err("%s: slim_control_ch failed ret[%d]\n", __func__, ret); goto err; } return 0; err: /* release all acquired handles */ wcd9xxx_close_slim_sch_tx(wcd9xxx, wcd9xxx_ch_list, *grph); return ret; } EXPORT_SYMBOL_GPL(wcd9xxx_cfg_slim_sch_tx); int wcd9xxx_close_slim_sch_rx(struct wcd9xxx *wcd9xxx, struct list_head *wcd9xxx_ch_list, u16 grph) { u32 sph[SLIM_MAX_RX_PORTS] = {0}; int ch_cnt = 0 ; int ret = 0; struct wcd9xxx_ch *rx; list_for_each_entry(rx, wcd9xxx_ch_list, list) sph[ch_cnt++] = rx->sph; pr_debug("%s ch_cht %d, sph[0] %d sph[1] %d\n", __func__, ch_cnt, sph[0], sph[1]); /* slim_control_ch (REMOVE) */ pr_debug("%s before slim_control_ch grph %d\n", __func__, grph); ret = slim_control_ch(wcd9xxx->slim, grph, SLIM_CH_REMOVE, true); if (ret < 0) { pr_err("%s: slim_control_ch failed ret[%d]\n", __func__, ret); goto err; } err: return ret; } EXPORT_SYMBOL_GPL(wcd9xxx_close_slim_sch_rx); int wcd9xxx_close_slim_sch_tx(struct wcd9xxx *wcd9xxx, struct list_head *wcd9xxx_ch_list, u16 grph) { u32 sph[SLIM_MAX_TX_PORTS] = {0}; int ret = 0; int ch_cnt = 0 ; struct wcd9xxx_ch *tx; pr_debug("%s\n", __func__); list_for_each_entry(tx, wcd9xxx_ch_list, list) sph[ch_cnt++] = tx->sph; pr_debug("%s ch_cht %d, sph[0] %d sph[1] %d\n", __func__, ch_cnt, sph[0], sph[1]); /* slim_control_ch (REMOVE) */ ret = slim_control_ch(wcd9xxx->slim, grph, SLIM_CH_REMOVE, true); if (ret < 0) { pr_err("%s: slim_control_ch failed ret[%d]\n", __func__, ret); goto err; } err: return ret; } EXPORT_SYMBOL_GPL(wcd9xxx_close_slim_sch_tx); int wcd9xxx_get_slave_port(unsigned int ch_num) { int ret = 0; ret = (ch_num - BASE_CH_NUM); pr_debug("%s: ch_num[%d] slave port[%d]\n", __func__, ch_num, ret); if (ret < 0) { pr_err("%s: Error:- Invalid slave port found = %d\n", __func__, ret); return -EINVAL; } return ret; } EXPORT_SYMBOL_GPL(wcd9xxx_get_slave_port); int wcd9xxx_disconnect_port(struct wcd9xxx *wcd9xxx, struct list_head *wcd9xxx_ch_list, u16 grph) { u32 sph[SLIM_MAX_TX_PORTS + SLIM_MAX_RX_PORTS] = {0}; int ch_cnt = 0 ; int ret = 0; struct wcd9xxx_ch *slim_ch; list_for_each_entry(slim_ch, wcd9xxx_ch_list, list) sph[ch_cnt++] = slim_ch->sph; /* slim_disconnect_port */ ret = slim_disconnect_ports(wcd9xxx->slim, sph, ch_cnt); if (ret < 0) { pr_err("%s: slim_disconnect_ports failed ret[%d]\n", __func__, ret); } return ret; } EXPORT_SYMBOL_GPL(wcd9xxx_disconnect_port); /* This function is called with mutex acquired */ int wcd9xxx_rx_vport_validation(u32 port_id, struct list_head *codec_dai_list) { struct wcd9xxx_ch *ch; int ret = 0; pr_debug("%s: port_id %u\n", __func__, port_id); list_for_each_entry(ch, codec_dai_list, list) { pr_debug("%s: ch->port %u\n", __func__, ch->port); if (ch->port == port_id) { ret = -EINVAL; break; } } return ret; } EXPORT_SYMBOL_GPL(wcd9xxx_rx_vport_validation); /* This function is called with mutex acquired */ int wcd9xxx_tx_vport_validation(u32 vtable, u32 port_id, struct wcd9xxx_codec_dai_data *codec_dai) { struct wcd9xxx_ch *ch; int ret = 0; u32 index; u32 size = sizeof(vtable) * 8; pr_debug("%s: vtable 0x%x port_id %u size %d\n", __func__, vtable, port_id, size); for_each_set_bit(index, (unsigned long *)&vtable, size) { list_for_each_entry(ch, &codec_dai[index].wcd9xxx_ch_list, list) { pr_debug("%s: index %u ch->port %u vtable 0x%x\n", __func__, index, ch->port, vtable); if (ch->port == port_id) { pr_err("%s: TX%u is used by AIF%u_CAP Mixer\n", __func__, port_id + 1, (index + 1)/2); ret = -EINVAL; break; } } if (ret) break; } return ret; } EXPORT_SYMBOL_GPL(wcd9xxx_tx_vport_validation);
gpl-2.0
jpoirier/linux
drivers/w1/w1_family.c
1690
3286
/* * w1_family.c * * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/spinlock.h> #include <linux/list.h> #include <linux/sched.h> /* schedule_timeout() */ #include <linux/delay.h> #include <linux/export.h> #include "w1_family.h" #include "w1.h" DEFINE_SPINLOCK(w1_flock); static LIST_HEAD(w1_families); /** * w1_register_family() - register a device family driver * @newf: family to register */ int w1_register_family(struct w1_family *newf) { struct list_head *ent, *n; struct w1_family *f; int ret = 0; spin_lock(&w1_flock); list_for_each_safe(ent, n, &w1_families) { f = list_entry(ent, struct w1_family, family_entry); if (f->fid == newf->fid) { ret = -EEXIST; break; } } if (!ret) { atomic_set(&newf->refcnt, 0); list_add_tail(&newf->family_entry, &w1_families); } spin_unlock(&w1_flock); /* check default devices against the new set of drivers */ w1_reconnect_slaves(newf, 1); return ret; } /** * w1_unregister_family() - unregister a device family driver * @fent: family to unregister */ void w1_unregister_family(struct w1_family *fent) { struct list_head *ent, *n; struct w1_family *f; spin_lock(&w1_flock); list_for_each_safe(ent, n, &w1_families) { f = list_entry(ent, struct w1_family, family_entry); if (f->fid == fent->fid) { list_del(&fent->family_entry); break; } } spin_unlock(&w1_flock); /* deatch devices using this family code */ w1_reconnect_slaves(fent, 0); while (atomic_read(&fent->refcnt)) { pr_info("Waiting for family %u to become free: refcnt=%d.\n", fent->fid, atomic_read(&fent->refcnt)); if (msleep_interruptible(1000)) flush_signals(current); } } /* * Should be called under w1_flock held. */ struct w1_family * w1_family_registered(u8 fid) { struct list_head *ent, *n; struct w1_family *f = NULL; int ret = 0; list_for_each_safe(ent, n, &w1_families) { f = list_entry(ent, struct w1_family, family_entry); if (f->fid == fid) { ret = 1; break; } } return (ret) ? f : NULL; } static void __w1_family_put(struct w1_family *f) { atomic_dec(&f->refcnt); } void w1_family_put(struct w1_family *f) { spin_lock(&w1_flock); __w1_family_put(f); spin_unlock(&w1_flock); } #if 0 void w1_family_get(struct w1_family *f) { spin_lock(&w1_flock); __w1_family_get(f); spin_unlock(&w1_flock); } #endif /* 0 */ void __w1_family_get(struct w1_family *f) { smp_mb__before_atomic(); atomic_inc(&f->refcnt); smp_mb__after_atomic(); } EXPORT_SYMBOL(w1_unregister_family); EXPORT_SYMBOL(w1_register_family);
gpl-2.0
visi0nary/android_kernel_alps_k05ts_a
drivers/clk/clk-fixed-rate.c
2202
2928
/* * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Fixed rate clock implementation */ #include <linux/clk-provider.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/err.h> #include <linux/of.h> /* * DOC: basic fixed-rate clock that cannot gate * * Traits of this clock: * prepare - clk_(un)prepare only ensures parents are prepared * enable - clk_enable only ensures parents are enabled * rate - rate is always a fixed value. No clk_set_rate support * parent - fixed parent. No clk_set_parent support */ #define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw) static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { return to_clk_fixed_rate(hw)->fixed_rate; } const struct clk_ops clk_fixed_rate_ops = { .recalc_rate = clk_fixed_rate_recalc_rate, }; EXPORT_SYMBOL_GPL(clk_fixed_rate_ops); /** * clk_register_fixed_rate - register fixed-rate clock with the clock framework * @dev: device that is registering this clock * @name: name of this clock * @parent_name: name of clock's parent * @flags: framework-specific flags * @fixed_rate: non-adjustable clock rate */ struct clk *clk_register_fixed_rate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned long fixed_rate) { struct clk_fixed_rate *fixed; struct clk *clk; struct clk_init_data init; /* allocate fixed-rate clock */ fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL); if (!fixed) { pr_err("%s: could not allocate fixed clk\n", __func__); return ERR_PTR(-ENOMEM); } init.name = name; init.ops = &clk_fixed_rate_ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = (parent_name ? &parent_name: NULL); init.num_parents = (parent_name ? 1 : 0); /* struct clk_fixed_rate assignments */ fixed->fixed_rate = fixed_rate; fixed->hw.init = &init; /* register the clock */ clk = clk_register(dev, &fixed->hw); if (IS_ERR(clk)) kfree(fixed); return clk; } #ifdef CONFIG_OF /** * of_fixed_clk_setup() - Setup function for simple fixed rate clock */ void of_fixed_clk_setup(struct device_node *node) { struct clk *clk; const char *clk_name = node->name; u32 rate; if (of_property_read_u32(node, "clock-frequency", &rate)) return; of_property_read_string(node, "clock-output-names", &clk_name); clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate); if (!IS_ERR(clk)) of_clk_add_provider(node, of_clk_src_simple_get, clk); } EXPORT_SYMBOL_GPL(of_fixed_clk_setup); CLK_OF_DECLARE(fixed_clk, "fixed-clock", of_fixed_clk_setup); #endif
gpl-2.0
myjang0507/Alphabet
arch/arm/mach-omap2/board-zoom.c
2202
4534
/* * Copyright (C) 2009-2010 Texas Instruments Inc. * Mikkel Christensen <mlc@ti.com> * Felipe Balbi <balbi@ti.com> * * Modified from mach-omap2/board-ldp.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/gpio.h> #include <linux/i2c/twl.h> #include <linux/mtd/nand.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "common.h" #include "board-zoom.h" #include "board-flash.h" #include "mux.h" #include "sdram-micron-mt46h32m32lf-6.h" #include "sdram-hynix-h8mbx00u0mer-0em.h" #define ZOOM3_EHCI_RESET_GPIO 64 #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { /* WLAN IRQ - GPIO 162 */ OMAP3_MUX(MCBSP1_CLKX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), /* WLAN POWER ENABLE - GPIO 101 */ OMAP3_MUX(CAM_D2, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), /* WLAN SDIO: MMC3 CMD */ OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE3 | OMAP_PIN_INPUT_PULLUP), /* WLAN SDIO: MMC3 CLK */ OMAP3_MUX(ETK_CLK, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP), /* WLAN SDIO: MMC3 DAT[0-3] */ OMAP3_MUX(ETK_D3, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP), OMAP3_MUX(ETK_D4, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP), OMAP3_MUX(ETK_D5, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP), OMAP3_MUX(ETK_D6, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP), { .reg_offset = OMAP_MUX_TERMINATOR }, }; #endif static struct mtd_partition zoom_nand_partitions[] = { /* All the partition sizes are listed in terms of NAND block size */ { .name = "X-Loader-NAND", .offset = 0, .size = 4 * (64 * 2048), /* 512KB, 0x80000 */ .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "U-Boot-NAND", .offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */ .size = 10 * (64 * 2048), /* 1.25MB, 0x140000 */ .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "Boot Env-NAND", .offset = MTDPART_OFS_APPEND, /* Offset = 0x1c0000 */ .size = 2 * (64 * 2048), /* 256KB, 0x40000 */ }, { .name = "Kernel-NAND", .offset = MTDPART_OFS_APPEND, /* Offset = 0x0200000*/ .size = 240 * (64 * 2048), /* 30M, 0x1E00000 */ }, { .name = "system", .offset = MTDPART_OFS_APPEND, /* Offset = 0x2000000 */ .size = 3328 * (64 * 2048), /* 416M, 0x1A000000 */ }, { .name = "userdata", .offset = MTDPART_OFS_APPEND, /* Offset = 0x1C000000*/ .size = 256 * (64 * 2048), /* 32M, 0x2000000 */ }, { .name = "cache", .offset = MTDPART_OFS_APPEND, /* Offset = 0x1E000000*/ .size = 256 * (64 * 2048), /* 32M, 0x2000000 */ }, }; static struct usbhs_phy_data phy_data[] __initdata = { { .port = 2, .reset_gpio = ZOOM3_EHCI_RESET_GPIO, .vcc_gpio = -EINVAL, }, }; static struct usbhs_omap_platform_data usbhs_bdata __initdata = { .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, }; static void __init omap_zoom_init(void) { if (machine_is_omap_zoom2()) { omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); } else if (machine_is_omap_zoom3()) { omap3_mux_init(board_mux, OMAP_PACKAGE_CBP); omap_mux_init_gpio(ZOOM3_EHCI_RESET_GPIO, OMAP_PIN_OUTPUT); usbhs_init_phys(phy_data, ARRAY_SIZE(phy_data)); usbhs_init(&usbhs_bdata); } board_nand_init(zoom_nand_partitions, ARRAY_SIZE(zoom_nand_partitions), ZOOM_NAND_CS, NAND_BUSWIDTH_16, nand_default_timings); zoom_debugboard_init(); zoom_peripherals_init(); if (machine_is_omap_zoom2()) omap_sdrc_init(mt46h32m32lf6_sdrc_params, mt46h32m32lf6_sdrc_params); else if (machine_is_omap_zoom3()) omap_sdrc_init(h8mbx00u0mer0em_sdrc_params, h8mbx00u0mer0em_sdrc_params); zoom_display_init(); } MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board") .atag_offset = 0x100, .reserve = omap_reserve, .map_io = omap3_map_io, .init_early = omap3430_init_early, .init_irq = omap3_init_irq, .handle_irq = omap3_intc_handle_irq, .init_machine = omap_zoom_init, .init_late = omap3430_init_late, .init_time = omap3_sync32k_timer_init, .restart = omap3xxx_restart, MACHINE_END MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board") .atag_offset = 0x100, .reserve = omap_reserve, .map_io = omap3_map_io, .init_early = omap3630_init_early, .init_irq = omap3_init_irq, .handle_irq = omap3_intc_handle_irq, .init_machine = omap_zoom_init, .init_late = omap3630_init_late, .init_time = omap3_sync32k_timer_init, .restart = omap3xxx_restart, MACHINE_END
gpl-2.0
flzyup/gnexus_kernel
drivers/acpi/acpica/exstorob.c
3226
7221
/****************************************************************************** * * Module Name: exstorob - AML Interpreter object store support, store to object * *****************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exstorob") /******************************************************************************* * * FUNCTION: acpi_ex_store_buffer_to_buffer * * PARAMETERS: source_desc - Source object to copy * target_desc - Destination object of the copy * * RETURN: Status * * DESCRIPTION: Copy a buffer object to another buffer object. * ******************************************************************************/ acpi_status acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc, union acpi_operand_object *target_desc) { u32 length; u8 *buffer; ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc); /* If Source and Target are the same, just return */ if (source_desc == target_desc) { return_ACPI_STATUS(AE_OK); } /* We know that source_desc is a buffer by now */ buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer); length = source_desc->buffer.length; /* * If target is a buffer of length zero or is a static buffer, * allocate a new buffer of the proper length */ if ((target_desc->buffer.length == 0) || (target_desc->common.flags & AOPOBJ_STATIC_POINTER)) { target_desc->buffer.pointer = ACPI_ALLOCATE(length); if (!target_desc->buffer.pointer) { return_ACPI_STATUS(AE_NO_MEMORY); } target_desc->buffer.length = length; } /* Copy source buffer to target buffer */ if (length <= target_desc->buffer.length) { /* Clear existing buffer and copy in the new one */ ACPI_MEMSET(target_desc->buffer.pointer, 0, target_desc->buffer.length); ACPI_MEMCPY(target_desc->buffer.pointer, buffer, length); #ifdef ACPI_OBSOLETE_BEHAVIOR /* * NOTE: ACPI versions up to 3.0 specified that the buffer must be * truncated if the string is smaller than the buffer. However, "other" * implementations of ACPI never did this and thus became the defacto * standard. ACPI 3.0_a changes this behavior such that the buffer * is no longer truncated. */ /* * OBSOLETE BEHAVIOR: * If the original source was a string, we must truncate the buffer, * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer * copy must not truncate the original buffer. */ if (original_src_type == ACPI_TYPE_STRING) { /* Set the new length of the target */ target_desc->buffer.length = length; } #endif } else { /* Truncate the source, copy only what will fit */ ACPI_MEMCPY(target_desc->buffer.pointer, buffer, target_desc->buffer.length); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Truncating source buffer from %X to %X\n", length, target_desc->buffer.length)); } /* Copy flags */ target_desc->buffer.flags = source_desc->buffer.flags; target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_store_string_to_string * * PARAMETERS: source_desc - Source object to copy * target_desc - Destination object of the copy * * RETURN: Status * * DESCRIPTION: Copy a String object to another String object * ******************************************************************************/ acpi_status acpi_ex_store_string_to_string(union acpi_operand_object *source_desc, union acpi_operand_object *target_desc) { u32 length; u8 *buffer; ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc); /* If Source and Target are the same, just return */ if (source_desc == target_desc) { return_ACPI_STATUS(AE_OK); } /* We know that source_desc is a string by now */ buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer); length = source_desc->string.length; /* * Replace existing string value if it will fit and the string * pointer is not a static pointer (part of an ACPI table) */ if ((length < target_desc->string.length) && (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) { /* * String will fit in existing non-static buffer. * Clear old string and copy in the new one */ ACPI_MEMSET(target_desc->string.pointer, 0, (acpi_size) target_desc->string.length + 1); ACPI_MEMCPY(target_desc->string.pointer, buffer, length); } else { /* * Free the current buffer, then allocate a new buffer * large enough to hold the value */ if (target_desc->string.pointer && (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) { /* Only free if not a pointer into the DSDT */ ACPI_FREE(target_desc->string.pointer); } target_desc->string.pointer = ACPI_ALLOCATE_ZEROED((acpi_size) length + 1); if (!target_desc->string.pointer) { return_ACPI_STATUS(AE_NO_MEMORY); } target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER; ACPI_MEMCPY(target_desc->string.pointer, buffer, length); } /* Set the new target length */ target_desc->string.length = length; return_ACPI_STATUS(AE_OK); }
gpl-2.0
TeamRegular/android_kernel_lge_msm8916
drivers/memory/emif.c
4506
56039
/* * EMIF driver * * Copyright (C) 2012 Texas Instruments, Inc. * * Aneesh V <aneesh@ti.com> * Santosh Shilimkar <santosh.shilimkar@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/err.h> #include <linux/kernel.h> #include <linux/reboot.h> #include <linux/platform_data/emif_plat.h> #include <linux/io.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/module.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/pm.h> #include <memory/jedec_ddr.h> #include "emif.h" #include "of_memory.h" /** * struct emif_data - Per device static data for driver's use * @duplicate: Whether the DDR devices attached to this EMIF * instance are exactly same as that on EMIF1. In * this case we can save some memory and processing * @temperature_level: Maximum temperature of LPDDR2 devices attached * to this EMIF - read from MR4 register. If there * are two devices attached to this EMIF, this * value is the maximum of the two temperature * levels. * @node: node in the device list * @base: base address of memory-mapped IO registers. * @dev: device pointer. * @addressing table with addressing information from the spec * @regs_cache: An array of 'struct emif_regs' that stores * calculated register values for different * frequencies, to avoid re-calculating them on * each DVFS transition. * @curr_regs: The set of register values used in the last * frequency change (i.e. corresponding to the * frequency in effect at the moment) * @plat_data: Pointer to saved platform data. * @debugfs_root: dentry to the root folder for EMIF in debugfs * @np_ddr: Pointer to ddr device tree node */ struct emif_data { u8 duplicate; u8 temperature_level; u8 lpmode; struct list_head node; unsigned long irq_state; void __iomem *base; struct device *dev; const struct lpddr2_addressing *addressing; struct emif_regs *regs_cache[EMIF_MAX_NUM_FREQUENCIES]; struct emif_regs *curr_regs; struct emif_platform_data *plat_data; struct dentry *debugfs_root; struct device_node *np_ddr; }; static struct emif_data *emif1; static spinlock_t emif_lock; static unsigned long irq_state; static u32 t_ck; /* DDR clock period in ps */ static LIST_HEAD(device_list); #ifdef CONFIG_DEBUG_FS static void do_emif_regdump_show(struct seq_file *s, struct emif_data *emif, struct emif_regs *regs) { u32 type = emif->plat_data->device_info->type; u32 ip_rev = emif->plat_data->ip_rev; seq_printf(s, "EMIF register cache dump for %dMHz\n", regs->freq/1000000); seq_printf(s, "ref_ctrl_shdw\t: 0x%08x\n", regs->ref_ctrl_shdw); seq_printf(s, "sdram_tim1_shdw\t: 0x%08x\n", regs->sdram_tim1_shdw); seq_printf(s, "sdram_tim2_shdw\t: 0x%08x\n", regs->sdram_tim2_shdw); seq_printf(s, "sdram_tim3_shdw\t: 0x%08x\n", regs->sdram_tim3_shdw); if (ip_rev == EMIF_4D) { seq_printf(s, "read_idle_ctrl_shdw_normal\t: 0x%08x\n", regs->read_idle_ctrl_shdw_normal); seq_printf(s, "read_idle_ctrl_shdw_volt_ramp\t: 0x%08x\n", regs->read_idle_ctrl_shdw_volt_ramp); } else if (ip_rev == EMIF_4D5) { seq_printf(s, "dll_calib_ctrl_shdw_normal\t: 0x%08x\n", regs->dll_calib_ctrl_shdw_normal); seq_printf(s, "dll_calib_ctrl_shdw_volt_ramp\t: 0x%08x\n", regs->dll_calib_ctrl_shdw_volt_ramp); } if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) { seq_printf(s, "ref_ctrl_shdw_derated\t: 0x%08x\n", regs->ref_ctrl_shdw_derated); seq_printf(s, "sdram_tim1_shdw_derated\t: 0x%08x\n", regs->sdram_tim1_shdw_derated); seq_printf(s, "sdram_tim3_shdw_derated\t: 0x%08x\n", regs->sdram_tim3_shdw_derated); } } static int emif_regdump_show(struct seq_file *s, void *unused) { struct emif_data *emif = s->private; struct emif_regs **regs_cache; int i; if (emif->duplicate) regs_cache = emif1->regs_cache; else regs_cache = emif->regs_cache; for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) { do_emif_regdump_show(s, emif, regs_cache[i]); seq_printf(s, "\n"); } return 0; } static int emif_regdump_open(struct inode *inode, struct file *file) { return single_open(file, emif_regdump_show, inode->i_private); } static const struct file_operations emif_regdump_fops = { .open = emif_regdump_open, .read = seq_read, .release = single_release, }; static int emif_mr4_show(struct seq_file *s, void *unused) { struct emif_data *emif = s->private; seq_printf(s, "MR4=%d\n", emif->temperature_level); return 0; } static int emif_mr4_open(struct inode *inode, struct file *file) { return single_open(file, emif_mr4_show, inode->i_private); } static const struct file_operations emif_mr4_fops = { .open = emif_mr4_open, .read = seq_read, .release = single_release, }; static int __init_or_module emif_debugfs_init(struct emif_data *emif) { struct dentry *dentry; int ret; dentry = debugfs_create_dir(dev_name(emif->dev), NULL); if (!dentry) { ret = -ENOMEM; goto err0; } emif->debugfs_root = dentry; dentry = debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif, &emif_regdump_fops); if (!dentry) { ret = -ENOMEM; goto err1; } dentry = debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif, &emif_mr4_fops); if (!dentry) { ret = -ENOMEM; goto err1; } return 0; err1: debugfs_remove_recursive(emif->debugfs_root); err0: return ret; } static void __exit emif_debugfs_exit(struct emif_data *emif) { debugfs_remove_recursive(emif->debugfs_root); emif->debugfs_root = NULL; } #else static inline int __init_or_module emif_debugfs_init(struct emif_data *emif) { return 0; } static inline void __exit emif_debugfs_exit(struct emif_data *emif) { } #endif /* * Calculate the period of DDR clock from frequency value */ static void set_ddr_clk_period(u32 freq) { /* Divide 10^12 by frequency to get period in ps */ t_ck = (u32)DIV_ROUND_UP_ULL(1000000000000ull, freq); } /* * Get bus width used by EMIF. Note that this may be different from the * bus width of the DDR devices used. For instance two 16-bit DDR devices * may be connected to a given CS of EMIF. In this case bus width as far * as EMIF is concerned is 32, where as the DDR bus width is 16 bits. */ static u32 get_emif_bus_width(struct emif_data *emif) { u32 width; void __iomem *base = emif->base; width = (readl(base + EMIF_SDRAM_CONFIG) & NARROW_MODE_MASK) >> NARROW_MODE_SHIFT; width = width == 0 ? 32 : 16; return width; } /* * Get the CL from SDRAM_CONFIG register */ static u32 get_cl(struct emif_data *emif) { u32 cl; void __iomem *base = emif->base; cl = (readl(base + EMIF_SDRAM_CONFIG) & CL_MASK) >> CL_SHIFT; return cl; } static void set_lpmode(struct emif_data *emif, u8 lpmode) { u32 temp; void __iomem *base = emif->base; /* * Workaround for errata i743 - LPDDR2 Power-Down State is Not * Efficient * * i743 DESCRIPTION: * The EMIF supports power-down state for low power. The EMIF * automatically puts the SDRAM into power-down after the memory is * not accessed for a defined number of cycles and the * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set to 0x4. * As the EMIF supports automatic output impedance calibration, a ZQ * calibration long command is issued every time it exits active * power-down and precharge power-down modes. The EMIF waits and * blocks any other command during this calibration. * The EMIF does not allow selective disabling of ZQ calibration upon * exit of power-down mode. Due to very short periods of power-down * cycles, ZQ calibration overhead creates bandwidth issues and * increases overall system power consumption. On the other hand, * issuing ZQ calibration long commands when exiting self-refresh is * still required. * * WORKAROUND * Because there is no power consumption benefit of the power-down due * to the calibration and there is a performance risk, the guideline * is to not allow power-down state and, therefore, to not have set * the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4. */ if ((emif->plat_data->ip_rev == EMIF_4D) && (EMIF_LP_MODE_PWR_DN == lpmode)) { WARN_ONCE(1, "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by" "erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n"); /* rollback LP_MODE to Self-refresh mode */ lpmode = EMIF_LP_MODE_SELF_REFRESH; } temp = readl(base + EMIF_POWER_MANAGEMENT_CONTROL); temp &= ~LP_MODE_MASK; temp |= (lpmode << LP_MODE_SHIFT); writel(temp, base + EMIF_POWER_MANAGEMENT_CONTROL); } static void do_freq_update(void) { struct emif_data *emif; /* * Workaround for errata i728: Disable LPMODE during FREQ_UPDATE * * i728 DESCRIPTION: * The EMIF automatically puts the SDRAM into self-refresh mode * after the EMIF has not performed accesses during * EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM number of DDR clock cycles * and the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set * to 0x2. If during a small window the following three events * occur: * - The SR_TIMING counter expires * - And frequency change is requested * - And OCP access is requested * Then it causes instable clock on the DDR interface. * * WORKAROUND * To avoid the occurrence of the three events, the workaround * is to disable the self-refresh when requesting a frequency * change. Before requesting a frequency change the software must * program EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0. When the * frequency change has been done, the software can reprogram * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2 */ list_for_each_entry(emif, &device_list, node) { if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH) set_lpmode(emif, EMIF_LP_MODE_DISABLE); } /* * TODO: Do FREQ_UPDATE here when an API * is available for this as part of the new * clock framework */ list_for_each_entry(emif, &device_list, node) { if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH) set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH); } } /* Find addressing table entry based on the device's type and density */ static const struct lpddr2_addressing *get_addressing_table( const struct ddr_device_info *device_info) { u32 index, type, density; type = device_info->type; density = device_info->density; switch (type) { case DDR_TYPE_LPDDR2_S4: index = density - 1; break; case DDR_TYPE_LPDDR2_S2: switch (density) { case DDR_DENSITY_1Gb: case DDR_DENSITY_2Gb: index = density + 3; break; default: index = density - 1; } break; default: return NULL; } return &lpddr2_jedec_addressing_table[index]; } /* * Find the the right timing table from the array of timing * tables of the device using DDR clock frequency */ static const struct lpddr2_timings *get_timings_table(struct emif_data *emif, u32 freq) { u32 i, min, max, freq_nearest; const struct lpddr2_timings *timings = NULL; const struct lpddr2_timings *timings_arr = emif->plat_data->timings; struct device *dev = emif->dev; /* Start with a very high frequency - 1GHz */ freq_nearest = 1000000000; /* * Find the timings table such that: * 1. the frequency range covers the required frequency(safe) AND * 2. the max_freq is closest to the required frequency(optimal) */ for (i = 0; i < emif->plat_data->timings_arr_size; i++) { max = timings_arr[i].max_freq; min = timings_arr[i].min_freq; if ((freq >= min) && (freq <= max) && (max < freq_nearest)) { freq_nearest = max; timings = &timings_arr[i]; } } if (!timings) dev_err(dev, "%s: couldn't find timings for - %dHz\n", __func__, freq); dev_dbg(dev, "%s: timings table: freq %d, speed bin freq %d\n", __func__, freq, freq_nearest); return timings; } static u32 get_sdram_ref_ctrl_shdw(u32 freq, const struct lpddr2_addressing *addressing) { u32 ref_ctrl_shdw = 0, val = 0, freq_khz, t_refi; /* Scale down frequency and t_refi to avoid overflow */ freq_khz = freq / 1000; t_refi = addressing->tREFI_ns / 100; /* * refresh rate to be set is 'tREFI(in us) * freq in MHz * division by 10000 to account for change in units */ val = t_refi * freq_khz / 10000; ref_ctrl_shdw |= val << REFRESH_RATE_SHIFT; return ref_ctrl_shdw; } static u32 get_sdram_tim_1_shdw(const struct lpddr2_timings *timings, const struct lpddr2_min_tck *min_tck, const struct lpddr2_addressing *addressing) { u32 tim1 = 0, val = 0; val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1; tim1 |= val << T_WTR_SHIFT; if (addressing->num_banks == B8) val = DIV_ROUND_UP(timings->tFAW, t_ck*4); else val = max(min_tck->tRRD, DIV_ROUND_UP(timings->tRRD, t_ck)); tim1 |= (val - 1) << T_RRD_SHIFT; val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab, t_ck) - 1; tim1 |= val << T_RC_SHIFT; val = max(min_tck->tRASmin, DIV_ROUND_UP(timings->tRAS_min, t_ck)); tim1 |= (val - 1) << T_RAS_SHIFT; val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1; tim1 |= val << T_WR_SHIFT; val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD, t_ck)) - 1; tim1 |= val << T_RCD_SHIFT; val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab, t_ck)) - 1; tim1 |= val << T_RP_SHIFT; return tim1; } static u32 get_sdram_tim_1_shdw_derated(const struct lpddr2_timings *timings, const struct lpddr2_min_tck *min_tck, const struct lpddr2_addressing *addressing) { u32 tim1 = 0, val = 0; val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1; tim1 = val << T_WTR_SHIFT; /* * tFAW is approximately 4 times tRRD. So add 1875*4 = 7500ps * to tFAW for de-rating */ if (addressing->num_banks == B8) { val = DIV_ROUND_UP(timings->tFAW + 7500, 4 * t_ck) - 1; } else { val = DIV_ROUND_UP(timings->tRRD + 1875, t_ck); val = max(min_tck->tRRD, val) - 1; } tim1 |= val << T_RRD_SHIFT; val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab + 1875, t_ck); tim1 |= (val - 1) << T_RC_SHIFT; val = DIV_ROUND_UP(timings->tRAS_min + 1875, t_ck); val = max(min_tck->tRASmin, val) - 1; tim1 |= val << T_RAS_SHIFT; val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1; tim1 |= val << T_WR_SHIFT; val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD + 1875, t_ck)); tim1 |= (val - 1) << T_RCD_SHIFT; val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab + 1875, t_ck)); tim1 |= (val - 1) << T_RP_SHIFT; return tim1; } static u32 get_sdram_tim_2_shdw(const struct lpddr2_timings *timings, const struct lpddr2_min_tck *min_tck, const struct lpddr2_addressing *addressing, u32 type) { u32 tim2 = 0, val = 0; val = min_tck->tCKE - 1; tim2 |= val << T_CKE_SHIFT; val = max(min_tck->tRTP, DIV_ROUND_UP(timings->tRTP, t_ck)) - 1; tim2 |= val << T_RTP_SHIFT; /* tXSNR = tRFCab_ps + 10 ns(tRFCab_ps for LPDDR2). */ val = DIV_ROUND_UP(addressing->tRFCab_ps + 10000, t_ck) - 1; tim2 |= val << T_XSNR_SHIFT; /* XSRD same as XSNR for LPDDR2 */ tim2 |= val << T_XSRD_SHIFT; val = max(min_tck->tXP, DIV_ROUND_UP(timings->tXP, t_ck)) - 1; tim2 |= val << T_XP_SHIFT; return tim2; } static u32 get_sdram_tim_3_shdw(const struct lpddr2_timings *timings, const struct lpddr2_min_tck *min_tck, const struct lpddr2_addressing *addressing, u32 type, u32 ip_rev, u32 derated) { u32 tim3 = 0, val = 0, t_dqsck; val = timings->tRAS_max_ns / addressing->tREFI_ns - 1; val = val > 0xF ? 0xF : val; tim3 |= val << T_RAS_MAX_SHIFT; val = DIV_ROUND_UP(addressing->tRFCab_ps, t_ck) - 1; tim3 |= val << T_RFC_SHIFT; t_dqsck = (derated == EMIF_DERATED_TIMINGS) ? timings->tDQSCK_max_derated : timings->tDQSCK_max; if (ip_rev == EMIF_4D5) val = DIV_ROUND_UP(t_dqsck + 1000, t_ck) - 1; else val = DIV_ROUND_UP(t_dqsck, t_ck) - 1; tim3 |= val << T_TDQSCKMAX_SHIFT; val = DIV_ROUND_UP(timings->tZQCS, t_ck) - 1; tim3 |= val << ZQ_ZQCS_SHIFT; val = DIV_ROUND_UP(timings->tCKESR, t_ck); val = max(min_tck->tCKESR, val) - 1; tim3 |= val << T_CKESR_SHIFT; if (ip_rev == EMIF_4D5) { tim3 |= (EMIF_T_CSTA - 1) << T_CSTA_SHIFT; val = DIV_ROUND_UP(EMIF_T_PDLL_UL, 128) - 1; tim3 |= val << T_PDLL_UL_SHIFT; } return tim3; } static u32 get_zq_config_reg(const struct lpddr2_addressing *addressing, bool cs1_used, bool cal_resistors_per_cs) { u32 zq = 0, val = 0; val = EMIF_ZQCS_INTERVAL_US * 1000 / addressing->tREFI_ns; zq |= val << ZQ_REFINTERVAL_SHIFT; val = DIV_ROUND_UP(T_ZQCL_DEFAULT_NS, T_ZQCS_DEFAULT_NS) - 1; zq |= val << ZQ_ZQCL_MULT_SHIFT; val = DIV_ROUND_UP(T_ZQINIT_DEFAULT_NS, T_ZQCL_DEFAULT_NS) - 1; zq |= val << ZQ_ZQINIT_MULT_SHIFT; zq |= ZQ_SFEXITEN_ENABLE << ZQ_SFEXITEN_SHIFT; if (cal_resistors_per_cs) zq |= ZQ_DUALCALEN_ENABLE << ZQ_DUALCALEN_SHIFT; else zq |= ZQ_DUALCALEN_DISABLE << ZQ_DUALCALEN_SHIFT; zq |= ZQ_CS0EN_MASK; /* CS0 is used for sure */ val = cs1_used ? 1 : 0; zq |= val << ZQ_CS1EN_SHIFT; return zq; } static u32 get_temp_alert_config(const struct lpddr2_addressing *addressing, const struct emif_custom_configs *custom_configs, bool cs1_used, u32 sdram_io_width, u32 emif_bus_width) { u32 alert = 0, interval, devcnt; if (custom_configs && (custom_configs->mask & EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL)) interval = custom_configs->temp_alert_poll_interval_ms; else interval = TEMP_ALERT_POLL_INTERVAL_DEFAULT_MS; interval *= 1000000; /* Convert to ns */ interval /= addressing->tREFI_ns; /* Convert to refresh cycles */ alert |= (interval << TA_REFINTERVAL_SHIFT); /* * sdram_io_width is in 'log2(x) - 1' form. Convert emif_bus_width * also to this form and subtract to get TA_DEVCNT, which is * in log2(x) form. */ emif_bus_width = __fls(emif_bus_width) - 1; devcnt = emif_bus_width - sdram_io_width; alert |= devcnt << TA_DEVCNT_SHIFT; /* DEVWDT is in 'log2(x) - 3' form */ alert |= (sdram_io_width - 2) << TA_DEVWDT_SHIFT; alert |= 1 << TA_SFEXITEN_SHIFT; alert |= 1 << TA_CS0EN_SHIFT; alert |= (cs1_used ? 1 : 0) << TA_CS1EN_SHIFT; return alert; } static u32 get_read_idle_ctrl_shdw(u8 volt_ramp) { u32 idle = 0, val = 0; /* * Maximum value in normal conditions and increased frequency * when voltage is ramping */ if (volt_ramp) val = READ_IDLE_INTERVAL_DVFS / t_ck / 64 - 1; else val = 0x1FF; /* * READ_IDLE_CTRL register in EMIF4D has same offset and fields * as DLL_CALIB_CTRL in EMIF4D5, so use the same shifts */ idle |= val << DLL_CALIB_INTERVAL_SHIFT; idle |= EMIF_READ_IDLE_LEN_VAL << ACK_WAIT_SHIFT; return idle; } static u32 get_dll_calib_ctrl_shdw(u8 volt_ramp) { u32 calib = 0, val = 0; if (volt_ramp == DDR_VOLTAGE_RAMPING) val = DLL_CALIB_INTERVAL_DVFS / t_ck / 16 - 1; else val = 0; /* Disabled when voltage is stable */ calib |= val << DLL_CALIB_INTERVAL_SHIFT; calib |= DLL_CALIB_ACK_WAIT_VAL << ACK_WAIT_SHIFT; return calib; } static u32 get_ddr_phy_ctrl_1_attilaphy_4d(const struct lpddr2_timings *timings, u32 freq, u8 RL) { u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_ATTILAPHY, val = 0; val = RL + DIV_ROUND_UP(timings->tDQSCK_max, t_ck) - 1; phy |= val << READ_LATENCY_SHIFT_4D; if (freq <= 100000000) val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS_ATTILAPHY; else if (freq <= 200000000) val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ_ATTILAPHY; else val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ_ATTILAPHY; phy |= val << DLL_SLAVE_DLY_CTRL_SHIFT_4D; return phy; } static u32 get_phy_ctrl_1_intelliphy_4d5(u32 freq, u8 cl) { u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_INTELLIPHY, half_delay; /* * DLL operates at 266 MHz. If DDR frequency is near 266 MHz, * half-delay is not needed else set half-delay */ if (freq >= 265000000 && freq < 267000000) half_delay = 0; else half_delay = 1; phy |= half_delay << DLL_HALF_DELAY_SHIFT_4D5; phy |= ((cl + DIV_ROUND_UP(EMIF_PHY_TOTAL_READ_LATENCY_INTELLIPHY_PS, t_ck) - 1) << READ_LATENCY_SHIFT_4D5); return phy; } static u32 get_ext_phy_ctrl_2_intelliphy_4d5(void) { u32 fifo_we_slave_ratio; fifo_we_slave_ratio = DIV_ROUND_CLOSEST( EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck); return fifo_we_slave_ratio | fifo_we_slave_ratio << 11 | fifo_we_slave_ratio << 22; } static u32 get_ext_phy_ctrl_3_intelliphy_4d5(void) { u32 fifo_we_slave_ratio; fifo_we_slave_ratio = DIV_ROUND_CLOSEST( EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck); return fifo_we_slave_ratio >> 10 | fifo_we_slave_ratio << 1 | fifo_we_slave_ratio << 12 | fifo_we_slave_ratio << 23; } static u32 get_ext_phy_ctrl_4_intelliphy_4d5(void) { u32 fifo_we_slave_ratio; fifo_we_slave_ratio = DIV_ROUND_CLOSEST( EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck); return fifo_we_slave_ratio >> 9 | fifo_we_slave_ratio << 2 | fifo_we_slave_ratio << 13; } static u32 get_pwr_mgmt_ctrl(u32 freq, struct emif_data *emif, u32 ip_rev) { u32 pwr_mgmt_ctrl = 0, timeout; u32 lpmode = EMIF_LP_MODE_SELF_REFRESH; u32 timeout_perf = EMIF_LP_MODE_TIMEOUT_PERFORMANCE; u32 timeout_pwr = EMIF_LP_MODE_TIMEOUT_POWER; u32 freq_threshold = EMIF_LP_MODE_FREQ_THRESHOLD; u32 mask; u8 shift; struct emif_custom_configs *cust_cfgs = emif->plat_data->custom_configs; if (cust_cfgs && (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE)) { lpmode = cust_cfgs->lpmode; timeout_perf = cust_cfgs->lpmode_timeout_performance; timeout_pwr = cust_cfgs->lpmode_timeout_power; freq_threshold = cust_cfgs->lpmode_freq_threshold; } /* Timeout based on DDR frequency */ timeout = freq >= freq_threshold ? timeout_perf : timeout_pwr; /* * The value to be set in register is "log2(timeout) - 3" * if timeout < 16 load 0 in register * if timeout is not a power of 2, round to next highest power of 2 */ if (timeout < 16) { timeout = 0; } else { if (timeout & (timeout - 1)) timeout <<= 1; timeout = __fls(timeout) - 3; } switch (lpmode) { case EMIF_LP_MODE_CLOCK_STOP: shift = CS_TIM_SHIFT; mask = CS_TIM_MASK; break; case EMIF_LP_MODE_SELF_REFRESH: /* Workaround for errata i735 */ if (timeout < 6) timeout = 6; shift = SR_TIM_SHIFT; mask = SR_TIM_MASK; break; case EMIF_LP_MODE_PWR_DN: shift = PD_TIM_SHIFT; mask = PD_TIM_MASK; break; case EMIF_LP_MODE_DISABLE: default: mask = 0; shift = 0; break; } /* Round to maximum in case of overflow, BUT warn! */ if (lpmode != EMIF_LP_MODE_DISABLE && timeout > mask >> shift) { pr_err("TIMEOUT Overflow - lpmode=%d perf=%d pwr=%d freq=%d\n", lpmode, timeout_perf, timeout_pwr, freq_threshold); WARN(1, "timeout=0x%02x greater than 0x%02x. Using max\n", timeout, mask >> shift); timeout = mask >> shift; } /* Setup required timing */ pwr_mgmt_ctrl = (timeout << shift) & mask; /* setup a default mask for rest of the modes */ pwr_mgmt_ctrl |= (SR_TIM_MASK | CS_TIM_MASK | PD_TIM_MASK) & ~mask; /* No CS_TIM in EMIF_4D5 */ if (ip_rev == EMIF_4D5) pwr_mgmt_ctrl &= ~CS_TIM_MASK; pwr_mgmt_ctrl |= lpmode << LP_MODE_SHIFT; return pwr_mgmt_ctrl; } /* * Get the temperature level of the EMIF instance: * Reads the MR4 register of attached SDRAM parts to find out the temperature * level. If there are two parts attached(one on each CS), then the temperature * level for the EMIF instance is the higher of the two temperatures. */ static void get_temperature_level(struct emif_data *emif) { u32 temp, temperature_level; void __iomem *base; base = emif->base; /* Read mode register 4 */ writel(DDR_MR4, base + EMIF_LPDDR2_MODE_REG_CONFIG); temperature_level = readl(base + EMIF_LPDDR2_MODE_REG_DATA); temperature_level = (temperature_level & MR4_SDRAM_REF_RATE_MASK) >> MR4_SDRAM_REF_RATE_SHIFT; if (emif->plat_data->device_info->cs1_used) { writel(DDR_MR4 | CS_MASK, base + EMIF_LPDDR2_MODE_REG_CONFIG); temp = readl(base + EMIF_LPDDR2_MODE_REG_DATA); temp = (temp & MR4_SDRAM_REF_RATE_MASK) >> MR4_SDRAM_REF_RATE_SHIFT; temperature_level = max(temp, temperature_level); } /* treat everything less than nominal(3) in MR4 as nominal */ if (unlikely(temperature_level < SDRAM_TEMP_NOMINAL)) temperature_level = SDRAM_TEMP_NOMINAL; /* if we get reserved value in MR4 persist with the existing value */ if (likely(temperature_level != SDRAM_TEMP_RESERVED_4)) emif->temperature_level = temperature_level; } /* * Program EMIF shadow registers that are not dependent on temperature * or voltage */ static void setup_registers(struct emif_data *emif, struct emif_regs *regs) { void __iomem *base = emif->base; writel(regs->sdram_tim2_shdw, base + EMIF_SDRAM_TIMING_2_SHDW); writel(regs->phy_ctrl_1_shdw, base + EMIF_DDR_PHY_CTRL_1_SHDW); writel(regs->pwr_mgmt_ctrl_shdw, base + EMIF_POWER_MANAGEMENT_CTRL_SHDW); /* Settings specific for EMIF4D5 */ if (emif->plat_data->ip_rev != EMIF_4D5) return; writel(regs->ext_phy_ctrl_2_shdw, base + EMIF_EXT_PHY_CTRL_2_SHDW); writel(regs->ext_phy_ctrl_3_shdw, base + EMIF_EXT_PHY_CTRL_3_SHDW); writel(regs->ext_phy_ctrl_4_shdw, base + EMIF_EXT_PHY_CTRL_4_SHDW); } /* * When voltage ramps dll calibration and forced read idle should * happen more often */ static void setup_volt_sensitive_regs(struct emif_data *emif, struct emif_regs *regs, u32 volt_state) { u32 calib_ctrl; void __iomem *base = emif->base; /* * EMIF_READ_IDLE_CTRL in EMIF4D refers to the same register as * EMIF_DLL_CALIB_CTRL in EMIF4D5 and dll_calib_ctrl_shadow_* * is an alias of the respective read_idle_ctrl_shdw_* (members of * a union). So, the below code takes care of both cases */ if (volt_state == DDR_VOLTAGE_RAMPING) calib_ctrl = regs->dll_calib_ctrl_shdw_volt_ramp; else calib_ctrl = regs->dll_calib_ctrl_shdw_normal; writel(calib_ctrl, base + EMIF_DLL_CALIB_CTRL_SHDW); } /* * setup_temperature_sensitive_regs() - set the timings for temperature * sensitive registers. This happens once at initialisation time based * on the temperature at boot time and subsequently based on the temperature * alert interrupt. Temperature alert can happen when the temperature * increases or drops. So this function can have the effect of either * derating the timings or going back to nominal values. */ static void setup_temperature_sensitive_regs(struct emif_data *emif, struct emif_regs *regs) { u32 tim1, tim3, ref_ctrl, type; void __iomem *base = emif->base; u32 temperature; type = emif->plat_data->device_info->type; tim1 = regs->sdram_tim1_shdw; tim3 = regs->sdram_tim3_shdw; ref_ctrl = regs->ref_ctrl_shdw; /* No de-rating for non-lpddr2 devices */ if (type != DDR_TYPE_LPDDR2_S2 && type != DDR_TYPE_LPDDR2_S4) goto out; temperature = emif->temperature_level; if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH) { ref_ctrl = regs->ref_ctrl_shdw_derated; } else if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS) { tim1 = regs->sdram_tim1_shdw_derated; tim3 = regs->sdram_tim3_shdw_derated; ref_ctrl = regs->ref_ctrl_shdw_derated; } out: writel(tim1, base + EMIF_SDRAM_TIMING_1_SHDW); writel(tim3, base + EMIF_SDRAM_TIMING_3_SHDW); writel(ref_ctrl, base + EMIF_SDRAM_REFRESH_CTRL_SHDW); } static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif) { u32 old_temp_level; irqreturn_t ret = IRQ_HANDLED; struct emif_custom_configs *custom_configs; spin_lock_irqsave(&emif_lock, irq_state); old_temp_level = emif->temperature_level; get_temperature_level(emif); if (unlikely(emif->temperature_level == old_temp_level)) { goto out; } else if (!emif->curr_regs) { dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n"); goto out; } custom_configs = emif->plat_data->custom_configs; /* * IF we detect higher than "nominal rating" from DDR sensor * on an unsupported DDR part, shutdown system */ if (custom_configs && !(custom_configs->mask & EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART)) { if (emif->temperature_level >= SDRAM_TEMP_HIGH_DERATE_REFRESH) { dev_err(emif->dev, "%s:NOT Extended temperature capable memory." "Converting MR4=0x%02x as shutdown event\n", __func__, emif->temperature_level); /* * Temperature far too high - do kernel_power_off() * from thread context */ emif->temperature_level = SDRAM_TEMP_VERY_HIGH_SHUTDOWN; ret = IRQ_WAKE_THREAD; goto out; } } if (emif->temperature_level < old_temp_level || emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) { /* * Temperature coming down - defer handling to thread OR * Temperature far too high - do kernel_power_off() from * thread context */ ret = IRQ_WAKE_THREAD; } else { /* Temperature is going up - handle immediately */ setup_temperature_sensitive_regs(emif, emif->curr_regs); do_freq_update(); } out: spin_unlock_irqrestore(&emif_lock, irq_state); return ret; } static irqreturn_t emif_interrupt_handler(int irq, void *dev_id) { u32 interrupts; struct emif_data *emif = dev_id; void __iomem *base = emif->base; struct device *dev = emif->dev; irqreturn_t ret = IRQ_HANDLED; /* Save the status and clear it */ interrupts = readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS); writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS); /* * Handle temperature alert * Temperature alert should be same for all ports * So, it's enough to process it only for one of the ports */ if (interrupts & TA_SYS_MASK) ret = handle_temp_alert(base, emif); if (interrupts & ERR_SYS_MASK) dev_err(dev, "Access error from SYS port - %x\n", interrupts); if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) { /* Save the status and clear it */ interrupts = readl(base + EMIF_LL_OCP_INTERRUPT_STATUS); writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_STATUS); if (interrupts & ERR_LL_MASK) dev_err(dev, "Access error from LL port - %x\n", interrupts); } return ret; } static irqreturn_t emif_threaded_isr(int irq, void *dev_id) { struct emif_data *emif = dev_id; if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) { dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n"); /* If we have Power OFF ability, use it, else try restarting */ if (pm_power_off) { kernel_power_off(); } else { WARN(1, "FIXME: NO pm_power_off!!! trying restart\n"); kernel_restart("SDRAM Over-temp Emergency restart"); } return IRQ_HANDLED; } spin_lock_irqsave(&emif_lock, irq_state); if (emif->curr_regs) { setup_temperature_sensitive_regs(emif, emif->curr_regs); do_freq_update(); } else { dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n"); } spin_unlock_irqrestore(&emif_lock, irq_state); return IRQ_HANDLED; } static void clear_all_interrupts(struct emif_data *emif) { void __iomem *base = emif->base; writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS), base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS); if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) writel(readl(base + EMIF_LL_OCP_INTERRUPT_STATUS), base + EMIF_LL_OCP_INTERRUPT_STATUS); } static void disable_and_clear_all_interrupts(struct emif_data *emif) { void __iomem *base = emif->base; /* Disable all interrupts */ writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET), base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_CLEAR); if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) writel(readl(base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET), base + EMIF_LL_OCP_INTERRUPT_ENABLE_CLEAR); /* Clear all interrupts */ clear_all_interrupts(emif); } static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq) { u32 interrupts, type; void __iomem *base = emif->base; type = emif->plat_data->device_info->type; clear_all_interrupts(emif); /* Enable interrupts for SYS interface */ interrupts = EN_ERR_SYS_MASK; if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) interrupts |= EN_TA_SYS_MASK; writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET); /* Enable interrupts for LL interface */ if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) { /* TA need not be enabled for LL */ interrupts = EN_ERR_LL_MASK; writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET); } /* setup IRQ handlers */ return devm_request_threaded_irq(emif->dev, irq, emif_interrupt_handler, emif_threaded_isr, 0, dev_name(emif->dev), emif); } static void __init_or_module emif_onetime_settings(struct emif_data *emif) { u32 pwr_mgmt_ctrl, zq, temp_alert_cfg; void __iomem *base = emif->base; const struct lpddr2_addressing *addressing; const struct ddr_device_info *device_info; device_info = emif->plat_data->device_info; addressing = get_addressing_table(device_info); /* * Init power management settings * We don't know the frequency yet. Use a high frequency * value for a conservative timeout setting */ pwr_mgmt_ctrl = get_pwr_mgmt_ctrl(1000000000, emif, emif->plat_data->ip_rev); emif->lpmode = (pwr_mgmt_ctrl & LP_MODE_MASK) >> LP_MODE_SHIFT; writel(pwr_mgmt_ctrl, base + EMIF_POWER_MANAGEMENT_CONTROL); /* Init ZQ calibration settings */ zq = get_zq_config_reg(addressing, device_info->cs1_used, device_info->cal_resistors_per_cs); writel(zq, base + EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG); /* Check temperature level temperature level*/ get_temperature_level(emif); if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n"); /* Init temperature polling */ temp_alert_cfg = get_temp_alert_config(addressing, emif->plat_data->custom_configs, device_info->cs1_used, device_info->io_width, get_emif_bus_width(emif)); writel(temp_alert_cfg, base + EMIF_TEMPERATURE_ALERT_CONFIG); /* * Program external PHY control registers that are not frequency * dependent */ if (emif->plat_data->phy_type != EMIF_PHY_TYPE_INTELLIPHY) return; writel(EMIF_EXT_PHY_CTRL_1_VAL, base + EMIF_EXT_PHY_CTRL_1_SHDW); writel(EMIF_EXT_PHY_CTRL_5_VAL, base + EMIF_EXT_PHY_CTRL_5_SHDW); writel(EMIF_EXT_PHY_CTRL_6_VAL, base + EMIF_EXT_PHY_CTRL_6_SHDW); writel(EMIF_EXT_PHY_CTRL_7_VAL, base + EMIF_EXT_PHY_CTRL_7_SHDW); writel(EMIF_EXT_PHY_CTRL_8_VAL, base + EMIF_EXT_PHY_CTRL_8_SHDW); writel(EMIF_EXT_PHY_CTRL_9_VAL, base + EMIF_EXT_PHY_CTRL_9_SHDW); writel(EMIF_EXT_PHY_CTRL_10_VAL, base + EMIF_EXT_PHY_CTRL_10_SHDW); writel(EMIF_EXT_PHY_CTRL_11_VAL, base + EMIF_EXT_PHY_CTRL_11_SHDW); writel(EMIF_EXT_PHY_CTRL_12_VAL, base + EMIF_EXT_PHY_CTRL_12_SHDW); writel(EMIF_EXT_PHY_CTRL_13_VAL, base + EMIF_EXT_PHY_CTRL_13_SHDW); writel(EMIF_EXT_PHY_CTRL_14_VAL, base + EMIF_EXT_PHY_CTRL_14_SHDW); writel(EMIF_EXT_PHY_CTRL_15_VAL, base + EMIF_EXT_PHY_CTRL_15_SHDW); writel(EMIF_EXT_PHY_CTRL_16_VAL, base + EMIF_EXT_PHY_CTRL_16_SHDW); writel(EMIF_EXT_PHY_CTRL_17_VAL, base + EMIF_EXT_PHY_CTRL_17_SHDW); writel(EMIF_EXT_PHY_CTRL_18_VAL, base + EMIF_EXT_PHY_CTRL_18_SHDW); writel(EMIF_EXT_PHY_CTRL_19_VAL, base + EMIF_EXT_PHY_CTRL_19_SHDW); writel(EMIF_EXT_PHY_CTRL_20_VAL, base + EMIF_EXT_PHY_CTRL_20_SHDW); writel(EMIF_EXT_PHY_CTRL_21_VAL, base + EMIF_EXT_PHY_CTRL_21_SHDW); writel(EMIF_EXT_PHY_CTRL_22_VAL, base + EMIF_EXT_PHY_CTRL_22_SHDW); writel(EMIF_EXT_PHY_CTRL_23_VAL, base + EMIF_EXT_PHY_CTRL_23_SHDW); writel(EMIF_EXT_PHY_CTRL_24_VAL, base + EMIF_EXT_PHY_CTRL_24_SHDW); } static void get_default_timings(struct emif_data *emif) { struct emif_platform_data *pd = emif->plat_data; pd->timings = lpddr2_jedec_timings; pd->timings_arr_size = ARRAY_SIZE(lpddr2_jedec_timings); dev_warn(emif->dev, "%s: using default timings\n", __func__); } static int is_dev_data_valid(u32 type, u32 density, u32 io_width, u32 phy_type, u32 ip_rev, struct device *dev) { int valid; valid = (type == DDR_TYPE_LPDDR2_S4 || type == DDR_TYPE_LPDDR2_S2) && (density >= DDR_DENSITY_64Mb && density <= DDR_DENSITY_8Gb) && (io_width >= DDR_IO_WIDTH_8 && io_width <= DDR_IO_WIDTH_32); /* Combinations of EMIF and PHY revisions that we support today */ switch (ip_rev) { case EMIF_4D: valid = valid && (phy_type == EMIF_PHY_TYPE_ATTILAPHY); break; case EMIF_4D5: valid = valid && (phy_type == EMIF_PHY_TYPE_INTELLIPHY); break; default: valid = 0; } if (!valid) dev_err(dev, "%s: invalid DDR details\n", __func__); return valid; } static int is_custom_config_valid(struct emif_custom_configs *cust_cfgs, struct device *dev) { int valid = 1; if ((cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE) && (cust_cfgs->lpmode != EMIF_LP_MODE_DISABLE)) valid = cust_cfgs->lpmode_freq_threshold && cust_cfgs->lpmode_timeout_performance && cust_cfgs->lpmode_timeout_power; if (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL) valid = valid && cust_cfgs->temp_alert_poll_interval_ms; if (!valid) dev_warn(dev, "%s: invalid custom configs\n", __func__); return valid; } #if defined(CONFIG_OF) static void __init_or_module of_get_custom_configs(struct device_node *np_emif, struct emif_data *emif) { struct emif_custom_configs *cust_cfgs = NULL; int len; const __be32 *lpmode, *poll_intvl; lpmode = of_get_property(np_emif, "low-power-mode", &len); poll_intvl = of_get_property(np_emif, "temp-alert-poll-interval", &len); if (lpmode || poll_intvl) cust_cfgs = devm_kzalloc(emif->dev, sizeof(*cust_cfgs), GFP_KERNEL); if (!cust_cfgs) return; if (lpmode) { cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_LPMODE; cust_cfgs->lpmode = be32_to_cpup(lpmode); of_property_read_u32(np_emif, "low-power-mode-timeout-performance", &cust_cfgs->lpmode_timeout_performance); of_property_read_u32(np_emif, "low-power-mode-timeout-power", &cust_cfgs->lpmode_timeout_power); of_property_read_u32(np_emif, "low-power-mode-freq-threshold", &cust_cfgs->lpmode_freq_threshold); } if (poll_intvl) { cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL; cust_cfgs->temp_alert_poll_interval_ms = be32_to_cpup(poll_intvl); } if (of_find_property(np_emif, "extended-temp-part", &len)) cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART; if (!is_custom_config_valid(cust_cfgs, emif->dev)) { devm_kfree(emif->dev, cust_cfgs); return; } emif->plat_data->custom_configs = cust_cfgs; } static void __init_or_module of_get_ddr_info(struct device_node *np_emif, struct device_node *np_ddr, struct ddr_device_info *dev_info) { u32 density = 0, io_width = 0; int len; if (of_find_property(np_emif, "cs1-used", &len)) dev_info->cs1_used = true; if (of_find_property(np_emif, "cal-resistor-per-cs", &len)) dev_info->cal_resistors_per_cs = true; if (of_device_is_compatible(np_ddr , "jedec,lpddr2-s4")) dev_info->type = DDR_TYPE_LPDDR2_S4; else if (of_device_is_compatible(np_ddr , "jedec,lpddr2-s2")) dev_info->type = DDR_TYPE_LPDDR2_S2; of_property_read_u32(np_ddr, "density", &density); of_property_read_u32(np_ddr, "io-width", &io_width); /* Convert from density in Mb to the density encoding in jedc_ddr.h */ if (density & (density - 1)) dev_info->density = 0; else dev_info->density = __fls(density) - 5; /* Convert from io_width in bits to io_width encoding in jedc_ddr.h */ if (io_width & (io_width - 1)) dev_info->io_width = 0; else dev_info->io_width = __fls(io_width) - 1; } static struct emif_data * __init_or_module of_get_memory_device_details( struct device_node *np_emif, struct device *dev) { struct emif_data *emif = NULL; struct ddr_device_info *dev_info = NULL; struct emif_platform_data *pd = NULL; struct device_node *np_ddr; int len; np_ddr = of_parse_phandle(np_emif, "device-handle", 0); if (!np_ddr) goto error; emif = devm_kzalloc(dev, sizeof(struct emif_data), GFP_KERNEL); pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL); if (!emif || !pd || !dev_info) { dev_err(dev, "%s: Out of memory!!\n", __func__); goto error; } emif->plat_data = pd; pd->device_info = dev_info; emif->dev = dev; emif->np_ddr = np_ddr; emif->temperature_level = SDRAM_TEMP_NOMINAL; if (of_device_is_compatible(np_emif, "ti,emif-4d")) emif->plat_data->ip_rev = EMIF_4D; else if (of_device_is_compatible(np_emif, "ti,emif-4d5")) emif->plat_data->ip_rev = EMIF_4D5; of_property_read_u32(np_emif, "phy-type", &pd->phy_type); if (of_find_property(np_emif, "hw-caps-ll-interface", &len)) pd->hw_caps |= EMIF_HW_CAPS_LL_INTERFACE; of_get_ddr_info(np_emif, np_ddr, dev_info); if (!is_dev_data_valid(pd->device_info->type, pd->device_info->density, pd->device_info->io_width, pd->phy_type, pd->ip_rev, emif->dev)) { dev_err(dev, "%s: invalid device data!!\n", __func__); goto error; } /* * For EMIF instances other than EMIF1 see if the devices connected * are exactly same as on EMIF1(which is typically the case). If so, * mark it as a duplicate of EMIF1. This will save some memory and * computation. */ if (emif1 && emif1->np_ddr == np_ddr) { emif->duplicate = true; goto out; } else if (emif1) { dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n", __func__); } of_get_custom_configs(np_emif, emif); emif->plat_data->timings = of_get_ddr_timings(np_ddr, emif->dev, emif->plat_data->device_info->type, &emif->plat_data->timings_arr_size); emif->plat_data->min_tck = of_get_min_tck(np_ddr, emif->dev); goto out; error: return NULL; out: return emif; } #else static struct emif_data * __init_or_module of_get_memory_device_details( struct device_node *np_emif, struct device *dev) { return NULL; } #endif static struct emif_data *__init_or_module get_device_details( struct platform_device *pdev) { u32 size; struct emif_data *emif = NULL; struct ddr_device_info *dev_info; struct emif_custom_configs *cust_cfgs; struct emif_platform_data *pd; struct device *dev; void *temp; pd = pdev->dev.platform_data; dev = &pdev->dev; if (!(pd && pd->device_info && is_dev_data_valid(pd->device_info->type, pd->device_info->density, pd->device_info->io_width, pd->phy_type, pd->ip_rev, dev))) { dev_err(dev, "%s: invalid device data\n", __func__); goto error; } emif = devm_kzalloc(dev, sizeof(*emif), GFP_KERNEL); temp = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL); if (!emif || !pd || !dev_info) { dev_err(dev, "%s:%d: allocation error\n", __func__, __LINE__); goto error; } memcpy(temp, pd, sizeof(*pd)); pd = temp; memcpy(dev_info, pd->device_info, sizeof(*dev_info)); pd->device_info = dev_info; emif->plat_data = pd; emif->dev = dev; emif->temperature_level = SDRAM_TEMP_NOMINAL; /* * For EMIF instances other than EMIF1 see if the devices connected * are exactly same as on EMIF1(which is typically the case). If so, * mark it as a duplicate of EMIF1 and skip copying timings data. * This will save some memory and some computation later. */ emif->duplicate = emif1 && (memcmp(dev_info, emif1->plat_data->device_info, sizeof(struct ddr_device_info)) == 0); if (emif->duplicate) { pd->timings = NULL; pd->min_tck = NULL; goto out; } else if (emif1) { dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n", __func__); } /* * Copy custom configs - ignore allocation error, if any, as * custom_configs is not very critical */ cust_cfgs = pd->custom_configs; if (cust_cfgs && is_custom_config_valid(cust_cfgs, dev)) { temp = devm_kzalloc(dev, sizeof(*cust_cfgs), GFP_KERNEL); if (temp) memcpy(temp, cust_cfgs, sizeof(*cust_cfgs)); else dev_warn(dev, "%s:%d: allocation error\n", __func__, __LINE__); pd->custom_configs = temp; } /* * Copy timings and min-tck values from platform data. If it is not * available or if memory allocation fails, use JEDEC defaults */ size = sizeof(struct lpddr2_timings) * pd->timings_arr_size; if (pd->timings) { temp = devm_kzalloc(dev, size, GFP_KERNEL); if (temp) { memcpy(temp, pd->timings, size); pd->timings = temp; } else { dev_warn(dev, "%s:%d: allocation error\n", __func__, __LINE__); get_default_timings(emif); } } else { get_default_timings(emif); } if (pd->min_tck) { temp = devm_kzalloc(dev, sizeof(*pd->min_tck), GFP_KERNEL); if (temp) { memcpy(temp, pd->min_tck, sizeof(*pd->min_tck)); pd->min_tck = temp; } else { dev_warn(dev, "%s:%d: allocation error\n", __func__, __LINE__); pd->min_tck = &lpddr2_jedec_min_tck; } } else { pd->min_tck = &lpddr2_jedec_min_tck; } out: return emif; error: return NULL; } static int __init_or_module emif_probe(struct platform_device *pdev) { struct emif_data *emif; struct resource *res; int irq; if (pdev->dev.of_node) emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev); else emif = get_device_details(pdev); if (!emif) { pr_err("%s: error getting device data\n", __func__); goto error; } list_add(&emif->node, &device_list); emif->addressing = get_addressing_table(emif->plat_data->device_info); /* Save pointers to each other in emif and device structures */ emif->dev = &pdev->dev; platform_set_drvdata(pdev, emif); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); emif->base = devm_ioremap_resource(emif->dev, res); if (IS_ERR(emif->base)) goto error; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(emif->dev, "%s: error getting IRQ resource - %d\n", __func__, irq); goto error; } emif_onetime_settings(emif); emif_debugfs_init(emif); disable_and_clear_all_interrupts(emif); setup_interrupts(emif, irq); /* One-time actions taken on probing the first device */ if (!emif1) { emif1 = emif; spin_lock_init(&emif_lock); /* * TODO: register notifiers for frequency and voltage * change here once the respective frameworks are * available */ } dev_info(&pdev->dev, "%s: device configured with addr = %p and IRQ%d\n", __func__, emif->base, irq); return 0; error: return -ENODEV; } static int __exit emif_remove(struct platform_device *pdev) { struct emif_data *emif = platform_get_drvdata(pdev); emif_debugfs_exit(emif); return 0; } static void emif_shutdown(struct platform_device *pdev) { struct emif_data *emif = platform_get_drvdata(pdev); disable_and_clear_all_interrupts(emif); } static int get_emif_reg_values(struct emif_data *emif, u32 freq, struct emif_regs *regs) { u32 cs1_used, ip_rev, phy_type; u32 cl, type; const struct lpddr2_timings *timings; const struct lpddr2_min_tck *min_tck; const struct ddr_device_info *device_info; const struct lpddr2_addressing *addressing; struct emif_data *emif_for_calc; struct device *dev; const struct emif_custom_configs *custom_configs; dev = emif->dev; /* * If the devices on this EMIF instance is duplicate of EMIF1, * use EMIF1 details for the calculation */ emif_for_calc = emif->duplicate ? emif1 : emif; timings = get_timings_table(emif_for_calc, freq); addressing = emif_for_calc->addressing; if (!timings || !addressing) { dev_err(dev, "%s: not enough data available for %dHz", __func__, freq); return -1; } device_info = emif_for_calc->plat_data->device_info; type = device_info->type; cs1_used = device_info->cs1_used; ip_rev = emif_for_calc->plat_data->ip_rev; phy_type = emif_for_calc->plat_data->phy_type; min_tck = emif_for_calc->plat_data->min_tck; custom_configs = emif_for_calc->plat_data->custom_configs; set_ddr_clk_period(freq); regs->ref_ctrl_shdw = get_sdram_ref_ctrl_shdw(freq, addressing); regs->sdram_tim1_shdw = get_sdram_tim_1_shdw(timings, min_tck, addressing); regs->sdram_tim2_shdw = get_sdram_tim_2_shdw(timings, min_tck, addressing, type); regs->sdram_tim3_shdw = get_sdram_tim_3_shdw(timings, min_tck, addressing, type, ip_rev, EMIF_NORMAL_TIMINGS); cl = get_cl(emif); if (phy_type == EMIF_PHY_TYPE_ATTILAPHY && ip_rev == EMIF_4D) { regs->phy_ctrl_1_shdw = get_ddr_phy_ctrl_1_attilaphy_4d( timings, freq, cl); } else if (phy_type == EMIF_PHY_TYPE_INTELLIPHY && ip_rev == EMIF_4D5) { regs->phy_ctrl_1_shdw = get_phy_ctrl_1_intelliphy_4d5(freq, cl); regs->ext_phy_ctrl_2_shdw = get_ext_phy_ctrl_2_intelliphy_4d5(); regs->ext_phy_ctrl_3_shdw = get_ext_phy_ctrl_3_intelliphy_4d5(); regs->ext_phy_ctrl_4_shdw = get_ext_phy_ctrl_4_intelliphy_4d5(); } else { return -1; } /* Only timeout values in pwr_mgmt_ctrl_shdw register */ regs->pwr_mgmt_ctrl_shdw = get_pwr_mgmt_ctrl(freq, emif_for_calc, ip_rev) & (CS_TIM_MASK | SR_TIM_MASK | PD_TIM_MASK); if (ip_rev & EMIF_4D) { regs->read_idle_ctrl_shdw_normal = get_read_idle_ctrl_shdw(DDR_VOLTAGE_STABLE); regs->read_idle_ctrl_shdw_volt_ramp = get_read_idle_ctrl_shdw(DDR_VOLTAGE_RAMPING); } else if (ip_rev & EMIF_4D5) { regs->dll_calib_ctrl_shdw_normal = get_dll_calib_ctrl_shdw(DDR_VOLTAGE_STABLE); regs->dll_calib_ctrl_shdw_volt_ramp = get_dll_calib_ctrl_shdw(DDR_VOLTAGE_RAMPING); } if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) { regs->ref_ctrl_shdw_derated = get_sdram_ref_ctrl_shdw(freq / 4, addressing); regs->sdram_tim1_shdw_derated = get_sdram_tim_1_shdw_derated(timings, min_tck, addressing); regs->sdram_tim3_shdw_derated = get_sdram_tim_3_shdw(timings, min_tck, addressing, type, ip_rev, EMIF_DERATED_TIMINGS); } regs->freq = freq; return 0; } /* * get_regs() - gets the cached emif_regs structure for a given EMIF instance * given frequency(freq): * * As an optimisation, every EMIF instance other than EMIF1 shares the * register cache with EMIF1 if the devices connected on this instance * are same as that on EMIF1(indicated by the duplicate flag) * * If we do not have an entry corresponding to the frequency given, we * allocate a new entry and calculate the values * * Upon finding the right reg dump, save it in curr_regs. It can be * directly used for thermal de-rating and voltage ramping changes. */ static struct emif_regs *get_regs(struct emif_data *emif, u32 freq) { int i; struct emif_regs **regs_cache; struct emif_regs *regs = NULL; struct device *dev; dev = emif->dev; if (emif->curr_regs && emif->curr_regs->freq == freq) { dev_dbg(dev, "%s: using curr_regs - %u Hz", __func__, freq); return emif->curr_regs; } if (emif->duplicate) regs_cache = emif1->regs_cache; else regs_cache = emif->regs_cache; for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) { if (regs_cache[i]->freq == freq) { regs = regs_cache[i]; dev_dbg(dev, "%s: reg dump found in reg cache for %u Hz\n", __func__, freq); break; } } /* * If we don't have an entry for this frequency in the cache create one * and calculate the values */ if (!regs) { regs = devm_kzalloc(emif->dev, sizeof(*regs), GFP_ATOMIC); if (!regs) return NULL; if (get_emif_reg_values(emif, freq, regs)) { devm_kfree(emif->dev, regs); return NULL; } /* * Now look for an un-used entry in the cache and save the * newly created struct. If there are no free entries * over-write the last entry */ for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) ; if (i >= EMIF_MAX_NUM_FREQUENCIES) { dev_warn(dev, "%s: regs_cache full - reusing a slot!!\n", __func__); i = EMIF_MAX_NUM_FREQUENCIES - 1; devm_kfree(emif->dev, regs_cache[i]); } regs_cache[i] = regs; } return regs; } static void do_volt_notify_handling(struct emif_data *emif, u32 volt_state) { dev_dbg(emif->dev, "%s: voltage notification : %d", __func__, volt_state); if (!emif->curr_regs) { dev_err(emif->dev, "%s: volt-notify before registers are ready: %d\n", __func__, volt_state); return; } setup_volt_sensitive_regs(emif, emif->curr_regs, volt_state); } /* * TODO: voltage notify handling should be hooked up to * regulator framework as soon as the necessary support * is available in mainline kernel. This function is un-used * right now. */ static void __attribute__((unused)) volt_notify_handling(u32 volt_state) { struct emif_data *emif; spin_lock_irqsave(&emif_lock, irq_state); list_for_each_entry(emif, &device_list, node) do_volt_notify_handling(emif, volt_state); do_freq_update(); spin_unlock_irqrestore(&emif_lock, irq_state); } static void do_freq_pre_notify_handling(struct emif_data *emif, u32 new_freq) { struct emif_regs *regs; regs = get_regs(emif, new_freq); if (!regs) return; emif->curr_regs = regs; /* * Update the shadow registers: * Temperature and voltage-ramp sensitive settings are also configured * in terms of DDR cycles. So, we need to update them too when there * is a freq change */ dev_dbg(emif->dev, "%s: setting up shadow registers for %uHz", __func__, new_freq); setup_registers(emif, regs); setup_temperature_sensitive_regs(emif, regs); setup_volt_sensitive_regs(emif, regs, DDR_VOLTAGE_STABLE); /* * Part of workaround for errata i728. See do_freq_update() * for more details */ if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH) set_lpmode(emif, EMIF_LP_MODE_DISABLE); } /* * TODO: frequency notify handling should be hooked up to * clock framework as soon as the necessary support is * available in mainline kernel. This function is un-used * right now. */ static void __attribute__((unused)) freq_pre_notify_handling(u32 new_freq) { struct emif_data *emif; /* * NOTE: we are taking the spin-lock here and releases it * only in post-notifier. This doesn't look good and * Sparse complains about it, but this seems to be * un-avoidable. We need to lock a sequence of events * that is split between EMIF and clock framework. * * 1. EMIF driver updates EMIF timings in shadow registers in the * frequency pre-notify callback from clock framework * 2. clock framework sets up the registers for the new frequency * 3. clock framework initiates a hw-sequence that updates * the frequency EMIF timings synchronously. * * All these 3 steps should be performed as an atomic operation * vis-a-vis similar sequence in the EMIF interrupt handler * for temperature events. Otherwise, there could be race * conditions that could result in incorrect EMIF timings for * a given frequency */ spin_lock_irqsave(&emif_lock, irq_state); list_for_each_entry(emif, &device_list, node) do_freq_pre_notify_handling(emif, new_freq); } static void do_freq_post_notify_handling(struct emif_data *emif) { /* * Part of workaround for errata i728. See do_freq_update() * for more details */ if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH) set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH); } /* * TODO: frequency notify handling should be hooked up to * clock framework as soon as the necessary support is * available in mainline kernel. This function is un-used * right now. */ static void __attribute__((unused)) freq_post_notify_handling(void) { struct emif_data *emif; list_for_each_entry(emif, &device_list, node) do_freq_post_notify_handling(emif); /* * Lock is done in pre-notify handler. See freq_pre_notify_handling() * for more details */ spin_unlock_irqrestore(&emif_lock, irq_state); } #if defined(CONFIG_OF) static const struct of_device_id emif_of_match[] = { { .compatible = "ti,emif-4d" }, { .compatible = "ti,emif-4d5" }, {}, }; MODULE_DEVICE_TABLE(of, emif_of_match); #endif static struct platform_driver emif_driver = { .remove = __exit_p(emif_remove), .shutdown = emif_shutdown, .driver = { .name = "emif", .of_match_table = of_match_ptr(emif_of_match), }, }; module_platform_driver_probe(emif_driver, emif_probe); MODULE_DESCRIPTION("TI EMIF SDRAM Controller Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:emif"); MODULE_AUTHOR("Texas Instruments Inc");
gpl-2.0
ndufresne/linux-meson
drivers/isdn/mISDN/layer1.c
4762
9951
/* * * Author Karsten Keil <kkeil@novell.com> * * Copyright 2008 by Karsten Keil <kkeil@novell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/slab.h> #include <linux/module.h> #include <linux/mISDNhw.h> #include "core.h" #include "layer1.h" #include "fsm.h" static u_int *debug; struct layer1 { u_long Flags; struct FsmInst l1m; struct FsmTimer timer3; struct FsmTimer timerX; int delay; int t3_value; struct dchannel *dch; dchannel_l1callback *dcb; }; #define TIMER3_DEFAULT_VALUE 7000 static struct Fsm l1fsm_s = {NULL, 0, 0, NULL, NULL}; enum { ST_L1_F2, ST_L1_F3, ST_L1_F4, ST_L1_F5, ST_L1_F6, ST_L1_F7, ST_L1_F8, }; #define L1S_STATE_COUNT (ST_L1_F8 + 1) static char *strL1SState[] = { "ST_L1_F2", "ST_L1_F3", "ST_L1_F4", "ST_L1_F5", "ST_L1_F6", "ST_L1_F7", "ST_L1_F8", }; enum { EV_PH_ACTIVATE, EV_PH_DEACTIVATE, EV_RESET_IND, EV_DEACT_CNF, EV_DEACT_IND, EV_POWER_UP, EV_ANYSIG_IND, EV_INFO2_IND, EV_INFO4_IND, EV_TIMER_DEACT, EV_TIMER_ACT, EV_TIMER3, }; #define L1_EVENT_COUNT (EV_TIMER3 + 1) static char *strL1Event[] = { "EV_PH_ACTIVATE", "EV_PH_DEACTIVATE", "EV_RESET_IND", "EV_DEACT_CNF", "EV_DEACT_IND", "EV_POWER_UP", "EV_ANYSIG_IND", "EV_INFO2_IND", "EV_INFO4_IND", "EV_TIMER_DEACT", "EV_TIMER_ACT", "EV_TIMER3", }; static void l1m_debug(struct FsmInst *fi, char *fmt, ...) { struct layer1 *l1 = fi->userdata; struct va_format vaf; va_list va; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; printk(KERN_DEBUG "%s: %pV\n", dev_name(&l1->dch->dev.dev), &vaf); va_end(va); } static void l1_reset(struct FsmInst *fi, int event, void *arg) { mISDN_FsmChangeState(fi, ST_L1_F3); } static void l1_deact_cnf(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; mISDN_FsmChangeState(fi, ST_L1_F3); if (test_bit(FLG_L1_ACTIVATING, &l1->Flags)) l1->dcb(l1->dch, HW_POWERUP_REQ); } static void l1_deact_req_s(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; mISDN_FsmChangeState(fi, ST_L1_F3); mISDN_FsmRestartTimer(&l1->timerX, 550, EV_TIMER_DEACT, NULL, 2); test_and_set_bit(FLG_L1_DEACTTIMER, &l1->Flags); } static void l1_power_up_s(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; if (test_bit(FLG_L1_ACTIVATING, &l1->Flags)) { mISDN_FsmChangeState(fi, ST_L1_F4); l1->dcb(l1->dch, INFO3_P8); } else mISDN_FsmChangeState(fi, ST_L1_F3); } static void l1_go_F5(struct FsmInst *fi, int event, void *arg) { mISDN_FsmChangeState(fi, ST_L1_F5); } static void l1_go_F8(struct FsmInst *fi, int event, void *arg) { mISDN_FsmChangeState(fi, ST_L1_F8); } static void l1_info2_ind(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; mISDN_FsmChangeState(fi, ST_L1_F6); l1->dcb(l1->dch, INFO3_P8); } static void l1_info4_ind(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; mISDN_FsmChangeState(fi, ST_L1_F7); l1->dcb(l1->dch, INFO3_P8); if (test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags)) mISDN_FsmDelTimer(&l1->timerX, 4); if (!test_bit(FLG_L1_ACTIVATED, &l1->Flags)) { if (test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags)) mISDN_FsmDelTimer(&l1->timer3, 3); mISDN_FsmRestartTimer(&l1->timerX, 110, EV_TIMER_ACT, NULL, 2); test_and_set_bit(FLG_L1_ACTTIMER, &l1->Flags); } } static void l1_timer3(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags); if (test_and_clear_bit(FLG_L1_ACTIVATING, &l1->Flags)) { if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags)) l1->dcb(l1->dch, HW_D_NOBLOCKED); l1->dcb(l1->dch, PH_DEACTIVATE_IND); } if (l1->l1m.state != ST_L1_F6) { mISDN_FsmChangeState(fi, ST_L1_F3); /* do not force anything here, we need send INFO 0 */ } } static void l1_timer_act(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; test_and_clear_bit(FLG_L1_ACTTIMER, &l1->Flags); test_and_set_bit(FLG_L1_ACTIVATED, &l1->Flags); l1->dcb(l1->dch, PH_ACTIVATE_IND); } static void l1_timer_deact(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags); test_and_clear_bit(FLG_L1_ACTIVATED, &l1->Flags); if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags)) l1->dcb(l1->dch, HW_D_NOBLOCKED); l1->dcb(l1->dch, PH_DEACTIVATE_IND); l1->dcb(l1->dch, HW_DEACT_REQ); } static void l1_activate_s(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; mISDN_FsmRestartTimer(&l1->timer3, l1->t3_value, EV_TIMER3, NULL, 2); test_and_set_bit(FLG_L1_T3RUN, &l1->Flags); /* Tell HW to send INFO 1 */ l1->dcb(l1->dch, HW_RESET_REQ); } static void l1_activate_no(struct FsmInst *fi, int event, void *arg) { struct layer1 *l1 = fi->userdata; if ((!test_bit(FLG_L1_DEACTTIMER, &l1->Flags)) && (!test_bit(FLG_L1_T3RUN, &l1->Flags))) { test_and_clear_bit(FLG_L1_ACTIVATING, &l1->Flags); if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags)) l1->dcb(l1->dch, HW_D_NOBLOCKED); l1->dcb(l1->dch, PH_DEACTIVATE_IND); } } static struct FsmNode L1SFnList[] = { {ST_L1_F3, EV_PH_ACTIVATE, l1_activate_s}, {ST_L1_F6, EV_PH_ACTIVATE, l1_activate_no}, {ST_L1_F8, EV_PH_ACTIVATE, l1_activate_no}, {ST_L1_F3, EV_RESET_IND, l1_reset}, {ST_L1_F4, EV_RESET_IND, l1_reset}, {ST_L1_F5, EV_RESET_IND, l1_reset}, {ST_L1_F6, EV_RESET_IND, l1_reset}, {ST_L1_F7, EV_RESET_IND, l1_reset}, {ST_L1_F8, EV_RESET_IND, l1_reset}, {ST_L1_F3, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F4, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F5, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F6, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F7, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F8, EV_DEACT_CNF, l1_deact_cnf}, {ST_L1_F6, EV_DEACT_IND, l1_deact_req_s}, {ST_L1_F7, EV_DEACT_IND, l1_deact_req_s}, {ST_L1_F8, EV_DEACT_IND, l1_deact_req_s}, {ST_L1_F3, EV_POWER_UP, l1_power_up_s}, {ST_L1_F4, EV_ANYSIG_IND, l1_go_F5}, {ST_L1_F6, EV_ANYSIG_IND, l1_go_F8}, {ST_L1_F7, EV_ANYSIG_IND, l1_go_F8}, {ST_L1_F3, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F4, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F5, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F7, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F8, EV_INFO2_IND, l1_info2_ind}, {ST_L1_F3, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F4, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F5, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F6, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F8, EV_INFO4_IND, l1_info4_ind}, {ST_L1_F3, EV_TIMER3, l1_timer3}, {ST_L1_F4, EV_TIMER3, l1_timer3}, {ST_L1_F5, EV_TIMER3, l1_timer3}, {ST_L1_F6, EV_TIMER3, l1_timer3}, {ST_L1_F8, EV_TIMER3, l1_timer3}, {ST_L1_F7, EV_TIMER_ACT, l1_timer_act}, {ST_L1_F3, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F4, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F5, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F6, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F7, EV_TIMER_DEACT, l1_timer_deact}, {ST_L1_F8, EV_TIMER_DEACT, l1_timer_deact}, }; static void release_l1(struct layer1 *l1) { mISDN_FsmDelTimer(&l1->timerX, 0); mISDN_FsmDelTimer(&l1->timer3, 0); if (l1->dch) l1->dch->l1 = NULL; module_put(THIS_MODULE); kfree(l1); } int l1_event(struct layer1 *l1, u_int event) { int err = 0; if (!l1) return -EINVAL; switch (event) { case HW_RESET_IND: mISDN_FsmEvent(&l1->l1m, EV_RESET_IND, NULL); break; case HW_DEACT_IND: mISDN_FsmEvent(&l1->l1m, EV_DEACT_IND, NULL); break; case HW_POWERUP_IND: mISDN_FsmEvent(&l1->l1m, EV_POWER_UP, NULL); break; case HW_DEACT_CNF: mISDN_FsmEvent(&l1->l1m, EV_DEACT_CNF, NULL); break; case ANYSIGNAL: mISDN_FsmEvent(&l1->l1m, EV_ANYSIG_IND, NULL); break; case LOSTFRAMING: mISDN_FsmEvent(&l1->l1m, EV_ANYSIG_IND, NULL); break; case INFO2: mISDN_FsmEvent(&l1->l1m, EV_INFO2_IND, NULL); break; case INFO4_P8: mISDN_FsmEvent(&l1->l1m, EV_INFO4_IND, NULL); break; case INFO4_P10: mISDN_FsmEvent(&l1->l1m, EV_INFO4_IND, NULL); break; case PH_ACTIVATE_REQ: if (test_bit(FLG_L1_ACTIVATED, &l1->Flags)) l1->dcb(l1->dch, PH_ACTIVATE_IND); else { test_and_set_bit(FLG_L1_ACTIVATING, &l1->Flags); mISDN_FsmEvent(&l1->l1m, EV_PH_ACTIVATE, NULL); } break; case CLOSE_CHANNEL: release_l1(l1); break; default: if ((event & ~HW_TIMER3_VMASK) == HW_TIMER3_VALUE) { int val = event & HW_TIMER3_VMASK; if (val < 5) val = 5; if (val > 30) val = 30; l1->t3_value = val; break; } if (*debug & DEBUG_L1) printk(KERN_DEBUG "%s %x unhandled\n", __func__, event); err = -EINVAL; } return err; } EXPORT_SYMBOL(l1_event); int create_l1(struct dchannel *dch, dchannel_l1callback *dcb) { struct layer1 *nl1; nl1 = kzalloc(sizeof(struct layer1), GFP_ATOMIC); if (!nl1) { printk(KERN_ERR "kmalloc struct layer1 failed\n"); return -ENOMEM; } nl1->l1m.fsm = &l1fsm_s; nl1->l1m.state = ST_L1_F3; nl1->Flags = 0; nl1->t3_value = TIMER3_DEFAULT_VALUE; nl1->l1m.debug = *debug & DEBUG_L1_FSM; nl1->l1m.userdata = nl1; nl1->l1m.userint = 0; nl1->l1m.printdebug = l1m_debug; nl1->dch = dch; nl1->dcb = dcb; mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer3); mISDN_FsmInitTimer(&nl1->l1m, &nl1->timerX); __module_get(THIS_MODULE); dch->l1 = nl1; return 0; } EXPORT_SYMBOL(create_l1); int l1_init(u_int *deb) { debug = deb; l1fsm_s.state_count = L1S_STATE_COUNT; l1fsm_s.event_count = L1_EVENT_COUNT; l1fsm_s.strEvent = strL1Event; l1fsm_s.strState = strL1SState; mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList)); return 0; } void l1_cleanup(void) { mISDN_FsmFree(&l1fsm_s); }
gpl-2.0
SaberMod/Linux-stable
arch/arm/mach-imx/devices/platform-imx21-hcd.c
4762
1127
/* * Copyright (C) 2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include "../hardware.h" #include "devices-common.h" #define imx_imx21_hcd_data_entry_single(soc) \ { \ .iobase = soc ## _USBOTG_BASE_ADDR, \ .irq = soc ## _INT_USBHOST, \ } #ifdef CONFIG_SOC_IMX21 const struct imx_imx21_hcd_data imx21_imx21_hcd_data __initconst = imx_imx21_hcd_data_entry_single(MX21); #endif /* ifdef CONFIG_SOC_IMX21 */ struct platform_device *__init imx_add_imx21_hcd( const struct imx_imx21_hcd_data *data, const struct mx21_usbh_platform_data *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + SZ_8K - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device_dmamask("imx21-hcd", 0, res, ARRAY_SIZE(res), pdata, sizeof(*pdata), DMA_BIT_MASK(32)); }
gpl-2.0
iwinoto/v4l-media_build
media/drivers/net/wireless/ti/wl18xx/io.c
5018
1703
/* * This file is part of wl18xx * * Copyright (C) 2011 Texas Instruments * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include "../wlcore/wlcore.h" #include "../wlcore/io.h" #include "io.h" int wl18xx_top_reg_write(struct wl1271 *wl, int addr, u16 val) { u32 tmp; int ret; if (WARN_ON(addr % 2)) return -EINVAL; if ((addr % 4) == 0) { ret = wlcore_read32(wl, addr, &tmp); if (ret < 0) goto out; tmp = (tmp & 0xffff0000) | val; ret = wlcore_write32(wl, addr, tmp); } else { ret = wlcore_read32(wl, addr - 2, &tmp); if (ret < 0) goto out; tmp = (tmp & 0xffff) | (val << 16); ret = wlcore_write32(wl, addr - 2, tmp); } out: return ret; } int wl18xx_top_reg_read(struct wl1271 *wl, int addr, u16 *out) { u32 val = 0; int ret; if (WARN_ON(addr % 2)) return -EINVAL; if ((addr % 4) == 0) { /* address is 4-bytes aligned */ ret = wlcore_read32(wl, addr, &val); if (ret >= 0 && out) *out = val & 0xffff; } else { ret = wlcore_read32(wl, addr - 2, &val); if (ret >= 0 && out) *out = (val & 0xffff0000) >> 16; } return ret; }
gpl-2.0
franciscofranco/mako
drivers/mca/mca-proc.c
9882
7280
/* -*- mode: c; c-basic-offset: 8 -*- */ /* * MCA bus support functions for the proc fs. * * NOTE: this code *requires* the legacy MCA api. * * Legacy API means the API that operates in terms of MCA slot number * * (C) 2002 James Bottomley <James.Bottomley@HansenPartnership.com> * **----------------------------------------------------------------------------- ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ** ** You should have received a copy of the GNU General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ** **----------------------------------------------------------------------------- */ #include <linux/module.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/mca.h> static int get_mca_info_helper(struct mca_device *mca_dev, char *page, int len) { int j; for(j=0; j<8; j++) len += sprintf(page+len, "%02x ", mca_dev ? mca_dev->pos[j] : 0xff); len += sprintf(page+len, " %s\n", mca_dev ? mca_dev->name : ""); return len; } static int get_mca_info(char *page, char **start, off_t off, int count, int *eof, void *data) { int i, len = 0; if(MCA_bus) { struct mca_device *mca_dev; /* Format POS registers of eight MCA slots */ for(i=0; i<MCA_MAX_SLOT_NR; i++) { mca_dev = mca_find_device_by_slot(i); len += sprintf(page+len, "Slot %d: ", i+1); len = get_mca_info_helper(mca_dev, page, len); } /* Format POS registers of integrated video subsystem */ mca_dev = mca_find_device_by_slot(MCA_INTEGVIDEO); len += sprintf(page+len, "Video : "); len = get_mca_info_helper(mca_dev, page, len); /* Format POS registers of integrated SCSI subsystem */ mca_dev = mca_find_device_by_slot(MCA_INTEGSCSI); len += sprintf(page+len, "SCSI : "); len = get_mca_info_helper(mca_dev, page, len); /* Format POS registers of motherboard */ mca_dev = mca_find_device_by_slot(MCA_MOTHERBOARD); len += sprintf(page+len, "Planar: "); len = get_mca_info_helper(mca_dev, page, len); } else { /* Leave it empty if MCA not detected - this should *never* * happen! */ } if (len <= off+count) *eof = 1; *start = page + off; len -= off; if (len>count) len = count; if (len<0) len = 0; return len; } /*--------------------------------------------------------------------*/ static int mca_default_procfn(char* buf, struct mca_device *mca_dev) { int len = 0, i; int slot = mca_dev->slot; /* Print out the basic information */ if(slot < MCA_MAX_SLOT_NR) { len += sprintf(buf+len, "Slot: %d\n", slot+1); } else if(slot == MCA_INTEGSCSI) { len += sprintf(buf+len, "Integrated SCSI Adapter\n"); } else if(slot == MCA_INTEGVIDEO) { len += sprintf(buf+len, "Integrated Video Adapter\n"); } else if(slot == MCA_MOTHERBOARD) { len += sprintf(buf+len, "Motherboard\n"); } if (mca_dev->name[0]) { /* Drivers might register a name without /proc handler... */ len += sprintf(buf+len, "Adapter Name: %s\n", mca_dev->name); } else { len += sprintf(buf+len, "Adapter Name: Unknown\n"); } len += sprintf(buf+len, "Id: %02x%02x\n", mca_dev->pos[1], mca_dev->pos[0]); len += sprintf(buf+len, "Enabled: %s\nPOS: ", mca_device_status(mca_dev) == MCA_ADAPTER_NORMAL ? "Yes" : "No"); for(i=0; i<8; i++) { len += sprintf(buf+len, "%02x ", mca_dev->pos[i]); } len += sprintf(buf+len, "\nDriver Installed: %s", mca_device_claimed(mca_dev) ? "Yes" : "No"); buf[len++] = '\n'; buf[len] = 0; return len; } /* mca_default_procfn() */ static int get_mca_machine_info(char* page, char **start, off_t off, int count, int *eof, void *data) { int len = 0; len += sprintf(page+len, "Model Id: 0x%x\n", machine_id); len += sprintf(page+len, "Submodel Id: 0x%x\n", machine_submodel_id); len += sprintf(page+len, "BIOS Revision: 0x%x\n", BIOS_revision); if (len <= off+count) *eof = 1; *start = page + off; len -= off; if (len>count) len = count; if (len<0) len = 0; return len; } static int mca_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { struct mca_device *mca_dev = (struct mca_device *)data; int len = 0; /* Get the standard info */ len = mca_default_procfn(page, mca_dev); /* Do any device-specific processing, if there is any */ if(mca_dev->procfn) { len += mca_dev->procfn(page+len, mca_dev->slot, mca_dev->proc_dev); } if (len <= off+count) *eof = 1; *start = page + off; len -= off; if (len>count) len = count; if (len<0) len = 0; return len; } /* mca_read_proc() */ /*--------------------------------------------------------------------*/ void __init mca_do_proc_init(void) { int i; struct proc_dir_entry *proc_mca; struct proc_dir_entry* node = NULL; struct mca_device *mca_dev; proc_mca = proc_mkdir("mca", NULL); create_proc_read_entry("pos",0,proc_mca,get_mca_info,NULL); create_proc_read_entry("machine",0,proc_mca,get_mca_machine_info,NULL); /* Initialize /proc/mca entries for existing adapters */ for(i = 0; i < MCA_NUMADAPTERS; i++) { enum MCA_AdapterStatus status; mca_dev = mca_find_device_by_slot(i); if(!mca_dev) continue; mca_dev->procfn = NULL; if(i < MCA_MAX_SLOT_NR) sprintf(mca_dev->procname,"slot%d", i+1); else if(i == MCA_INTEGVIDEO) sprintf(mca_dev->procname,"video"); else if(i == MCA_INTEGSCSI) sprintf(mca_dev->procname,"scsi"); else if(i == MCA_MOTHERBOARD) sprintf(mca_dev->procname,"planar"); status = mca_device_status(mca_dev); if (status != MCA_ADAPTER_NORMAL && status != MCA_ADAPTER_DISABLED) continue; node = create_proc_read_entry(mca_dev->procname, 0, proc_mca, mca_read_proc, (void *)mca_dev); if(node == NULL) { printk("Failed to allocate memory for MCA proc-entries!"); return; } } } /* mca_do_proc_init() */ /** * mca_set_adapter_procfn - Set the /proc callback * @slot: slot to configure * @procfn: callback function to call for /proc * @dev: device information passed to the callback * * This sets up an information callback for /proc/mca/slot?. The * function is called with the buffer, slot, and device pointer (or * some equally informative context information, or nothing, if you * prefer), and is expected to put useful information into the * buffer. The adapter name, ID, and POS registers get printed * before this is called though, so don't do it again. * * This should be called with a %NULL @procfn when a module * unregisters, thus preventing kernel crashes and other such * nastiness. */ void mca_set_adapter_procfn(int slot, MCA_ProcFn procfn, void* proc_dev) { struct mca_device *mca_dev = mca_find_device_by_slot(slot); if(!mca_dev) return; mca_dev->procfn = procfn; mca_dev->proc_dev = proc_dev; } EXPORT_SYMBOL(mca_set_adapter_procfn);
gpl-2.0
infoburp/binutils
opcodes/m32r-desc.c
155
48869
/* CPU data for m32r. THIS FILE IS MACHINE GENERATED WITH CGEN. Copyright 1996-2010 Free Software Foundation, Inc. This file is part of the GNU Binutils and/or GDB, the GNU debugger. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. It is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ #include "sysdep.h" #include <stdio.h> #include <stdarg.h> #include "ansidecl.h" #include "bfd.h" #include "symcat.h" #include "m32r-desc.h" #include "m32r-opc.h" #include "opintl.h" #include "libiberty.h" #include "xregex.h" /* Attributes. */ static const CGEN_ATTR_ENTRY bool_attr[] = { { "#f", 0 }, { "#t", 1 }, { 0, 0 } }; static const CGEN_ATTR_ENTRY MACH_attr[] ATTRIBUTE_UNUSED = { { "base", MACH_BASE }, { "m32r", MACH_M32R }, { "m32rx", MACH_M32RX }, { "m32r2", MACH_M32R2 }, { "max", MACH_MAX }, { 0, 0 } }; static const CGEN_ATTR_ENTRY ISA_attr[] ATTRIBUTE_UNUSED = { { "m32r", ISA_M32R }, { "max", ISA_MAX }, { 0, 0 } }; static const CGEN_ATTR_ENTRY PIPE_attr[] ATTRIBUTE_UNUSED = { { "NONE", PIPE_NONE }, { "O", PIPE_O }, { "S", PIPE_S }, { "OS", PIPE_OS }, { "O_OS", PIPE_O_OS }, { 0, 0 } }; const CGEN_ATTR_TABLE m32r_cgen_ifield_attr_table[] = { { "MACH", & MACH_attr[0], & MACH_attr[0] }, { "VIRTUAL", &bool_attr[0], &bool_attr[0] }, { "PCREL-ADDR", &bool_attr[0], &bool_attr[0] }, { "ABS-ADDR", &bool_attr[0], &bool_attr[0] }, { "RESERVED", &bool_attr[0], &bool_attr[0] }, { "SIGN-OPT", &bool_attr[0], &bool_attr[0] }, { "SIGNED", &bool_attr[0], &bool_attr[0] }, { "RELOC", &bool_attr[0], &bool_attr[0] }, { 0, 0, 0 } }; const CGEN_ATTR_TABLE m32r_cgen_hardware_attr_table[] = { { "MACH", & MACH_attr[0], & MACH_attr[0] }, { "VIRTUAL", &bool_attr[0], &bool_attr[0] }, { "CACHE-ADDR", &bool_attr[0], &bool_attr[0] }, { "PC", &bool_attr[0], &bool_attr[0] }, { "PROFILE", &bool_attr[0], &bool_attr[0] }, { 0, 0, 0 } }; const CGEN_ATTR_TABLE m32r_cgen_operand_attr_table[] = { { "MACH", & MACH_attr[0], & MACH_attr[0] }, { "VIRTUAL", &bool_attr[0], &bool_attr[0] }, { "PCREL-ADDR", &bool_attr[0], &bool_attr[0] }, { "ABS-ADDR", &bool_attr[0], &bool_attr[0] }, { "SIGN-OPT", &bool_attr[0], &bool_attr[0] }, { "SIGNED", &bool_attr[0], &bool_attr[0] }, { "NEGATIVE", &bool_attr[0], &bool_attr[0] }, { "RELAX", &bool_attr[0], &bool_attr[0] }, { "SEM-ONLY", &bool_attr[0], &bool_attr[0] }, { "RELOC", &bool_attr[0], &bool_attr[0] }, { 0, 0, 0 } }; const CGEN_ATTR_TABLE m32r_cgen_insn_attr_table[] = { { "MACH", & MACH_attr[0], & MACH_attr[0] }, { "PIPE", & PIPE_attr[0], & PIPE_attr[0] }, { "ALIAS", &bool_attr[0], &bool_attr[0] }, { "VIRTUAL", &bool_attr[0], &bool_attr[0] }, { "UNCOND-CTI", &bool_attr[0], &bool_attr[0] }, { "COND-CTI", &bool_attr[0], &bool_attr[0] }, { "SKIP-CTI", &bool_attr[0], &bool_attr[0] }, { "DELAY-SLOT", &bool_attr[0], &bool_attr[0] }, { "RELAXABLE", &bool_attr[0], &bool_attr[0] }, { "RELAXED", &bool_attr[0], &bool_attr[0] }, { "NO-DIS", &bool_attr[0], &bool_attr[0] }, { "PBB", &bool_attr[0], &bool_attr[0] }, { "FILL-SLOT", &bool_attr[0], &bool_attr[0] }, { "SPECIAL", &bool_attr[0], &bool_attr[0] }, { "SPECIAL_M32R", &bool_attr[0], &bool_attr[0] }, { "SPECIAL_FLOAT", &bool_attr[0], &bool_attr[0] }, { 0, 0, 0 } }; /* Instruction set variants. */ static const CGEN_ISA m32r_cgen_isa_table[] = { { "m32r", 32, 32, 16, 32 }, { 0, 0, 0, 0, 0 } }; /* Machine variants. */ static const CGEN_MACH m32r_cgen_mach_table[] = { { "m32r", "m32r", MACH_M32R, 0 }, { "m32rx", "m32rx", MACH_M32RX, 0 }, { "m32r2", "m32r2", MACH_M32R2, 0 }, { 0, 0, 0, 0 } }; static CGEN_KEYWORD_ENTRY m32r_cgen_opval_gr_names_entries[] = { { "fp", 13, {0, {{{0, 0}}}}, 0, 0 }, { "lr", 14, {0, {{{0, 0}}}}, 0, 0 }, { "sp", 15, {0, {{{0, 0}}}}, 0, 0 }, { "r0", 0, {0, {{{0, 0}}}}, 0, 0 }, { "r1", 1, {0, {{{0, 0}}}}, 0, 0 }, { "r2", 2, {0, {{{0, 0}}}}, 0, 0 }, { "r3", 3, {0, {{{0, 0}}}}, 0, 0 }, { "r4", 4, {0, {{{0, 0}}}}, 0, 0 }, { "r5", 5, {0, {{{0, 0}}}}, 0, 0 }, { "r6", 6, {0, {{{0, 0}}}}, 0, 0 }, { "r7", 7, {0, {{{0, 0}}}}, 0, 0 }, { "r8", 8, {0, {{{0, 0}}}}, 0, 0 }, { "r9", 9, {0, {{{0, 0}}}}, 0, 0 }, { "r10", 10, {0, {{{0, 0}}}}, 0, 0 }, { "r11", 11, {0, {{{0, 0}}}}, 0, 0 }, { "r12", 12, {0, {{{0, 0}}}}, 0, 0 }, { "r13", 13, {0, {{{0, 0}}}}, 0, 0 }, { "r14", 14, {0, {{{0, 0}}}}, 0, 0 }, { "r15", 15, {0, {{{0, 0}}}}, 0, 0 } }; CGEN_KEYWORD m32r_cgen_opval_gr_names = { & m32r_cgen_opval_gr_names_entries[0], 19, 0, 0, 0, 0, "" }; static CGEN_KEYWORD_ENTRY m32r_cgen_opval_cr_names_entries[] = { { "psw", 0, {0, {{{0, 0}}}}, 0, 0 }, { "cbr", 1, {0, {{{0, 0}}}}, 0, 0 }, { "spi", 2, {0, {{{0, 0}}}}, 0, 0 }, { "spu", 3, {0, {{{0, 0}}}}, 0, 0 }, { "bpc", 6, {0, {{{0, 0}}}}, 0, 0 }, { "bbpsw", 8, {0, {{{0, 0}}}}, 0, 0 }, { "bbpc", 14, {0, {{{0, 0}}}}, 0, 0 }, { "evb", 5, {0, {{{0, 0}}}}, 0, 0 }, { "cr0", 0, {0, {{{0, 0}}}}, 0, 0 }, { "cr1", 1, {0, {{{0, 0}}}}, 0, 0 }, { "cr2", 2, {0, {{{0, 0}}}}, 0, 0 }, { "cr3", 3, {0, {{{0, 0}}}}, 0, 0 }, { "cr4", 4, {0, {{{0, 0}}}}, 0, 0 }, { "cr5", 5, {0, {{{0, 0}}}}, 0, 0 }, { "cr6", 6, {0, {{{0, 0}}}}, 0, 0 }, { "cr7", 7, {0, {{{0, 0}}}}, 0, 0 }, { "cr8", 8, {0, {{{0, 0}}}}, 0, 0 }, { "cr9", 9, {0, {{{0, 0}}}}, 0, 0 }, { "cr10", 10, {0, {{{0, 0}}}}, 0, 0 }, { "cr11", 11, {0, {{{0, 0}}}}, 0, 0 }, { "cr12", 12, {0, {{{0, 0}}}}, 0, 0 }, { "cr13", 13, {0, {{{0, 0}}}}, 0, 0 }, { "cr14", 14, {0, {{{0, 0}}}}, 0, 0 }, { "cr15", 15, {0, {{{0, 0}}}}, 0, 0 } }; CGEN_KEYWORD m32r_cgen_opval_cr_names = { & m32r_cgen_opval_cr_names_entries[0], 24, 0, 0, 0, 0, "" }; static CGEN_KEYWORD_ENTRY m32r_cgen_opval_h_accums_entries[] = { { "a0", 0, {0, {{{0, 0}}}}, 0, 0 }, { "a1", 1, {0, {{{0, 0}}}}, 0, 0 } }; CGEN_KEYWORD m32r_cgen_opval_h_accums = { & m32r_cgen_opval_h_accums_entries[0], 2, 0, 0, 0, 0, "" }; /* The hardware table. */ #define A(a) (1 << CGEN_HW_##a) const CGEN_HW_ENTRY m32r_cgen_hw_table[] = { { "h-memory", HW_H_MEMORY, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { "h-sint", HW_H_SINT, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { "h-uint", HW_H_UINT, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { "h-addr", HW_H_ADDR, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { "h-iaddr", HW_H_IADDR, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { "h-pc", HW_H_PC, CGEN_ASM_NONE, 0, { 0|A(PROFILE)|A(PC), { { { (1<<MACH_BASE), 0 } } } } }, { "h-hi16", HW_H_HI16, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { "h-slo16", HW_H_SLO16, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { "h-ulo16", HW_H_ULO16, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { "h-gr", HW_H_GR, CGEN_ASM_KEYWORD, (PTR) & m32r_cgen_opval_gr_names, { 0|A(CACHE_ADDR)|A(PROFILE), { { { (1<<MACH_BASE), 0 } } } } }, { "h-cr", HW_H_CR, CGEN_ASM_KEYWORD, (PTR) & m32r_cgen_opval_cr_names, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { "h-accum", HW_H_ACCUM, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { "h-accums", HW_H_ACCUMS, CGEN_ASM_KEYWORD, (PTR) & m32r_cgen_opval_h_accums, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } } } } }, { "h-cond", HW_H_COND, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { "h-psw", HW_H_PSW, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { "h-bpsw", HW_H_BPSW, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { "h-bbpsw", HW_H_BBPSW, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { "h-lock", HW_H_LOCK, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { 0, 0, CGEN_ASM_NONE, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } } }; #undef A /* The instruction field table. */ #define A(a) (1 << CGEN_IFLD_##a) const CGEN_IFLD m32r_cgen_ifld_table[] = { { M32R_F_NIL, "f-nil", 0, 0, 0, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_ANYOF, "f-anyof", 0, 0, 0, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_OP1, "f-op1", 0, 32, 0, 4, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_OP2, "f-op2", 0, 32, 8, 4, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_COND, "f-cond", 0, 32, 4, 4, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_R1, "f-r1", 0, 32, 4, 4, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_R2, "f-r2", 0, 32, 12, 4, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_SIMM8, "f-simm8", 0, 32, 8, 8, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_SIMM16, "f-simm16", 0, 32, 16, 16, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_SHIFT_OP2, "f-shift-op2", 0, 32, 8, 3, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_UIMM3, "f-uimm3", 0, 32, 5, 3, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_UIMM4, "f-uimm4", 0, 32, 12, 4, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_UIMM5, "f-uimm5", 0, 32, 11, 5, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_UIMM8, "f-uimm8", 0, 32, 8, 8, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_UIMM16, "f-uimm16", 0, 32, 16, 16, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_UIMM24, "f-uimm24", 0, 32, 8, 24, { 0|A(RELOC)|A(ABS_ADDR), { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_HI16, "f-hi16", 0, 32, 16, 16, { 0|A(SIGN_OPT), { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_DISP8, "f-disp8", 0, 32, 8, 8, { 0|A(RELOC)|A(PCREL_ADDR), { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_DISP16, "f-disp16", 0, 32, 16, 16, { 0|A(RELOC)|A(PCREL_ADDR), { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_DISP24, "f-disp24", 0, 32, 8, 24, { 0|A(RELOC)|A(PCREL_ADDR), { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_OP23, "f-op23", 0, 32, 9, 3, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_OP3, "f-op3", 0, 32, 14, 2, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_ACC, "f-acc", 0, 32, 8, 1, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_ACCS, "f-accs", 0, 32, 12, 2, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_ACCD, "f-accd", 0, 32, 4, 2, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_BITS67, "f-bits67", 0, 32, 6, 2, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_BIT4, "f-bit4", 0, 32, 4, 1, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_BIT14, "f-bit14", 0, 32, 14, 1, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { M32R_F_IMM1, "f-imm1", 0, 32, 15, 1, { 0, { { { (1<<MACH_BASE), 0 } } } } }, { 0, 0, 0, 0, 0, 0, { 0, { { { (1<<MACH_BASE), 0 } } } } } }; #undef A /* multi ifield declarations */ /* multi ifield definitions */ /* The operand table. */ #define A(a) (1 << CGEN_OPERAND_##a) #define OPERAND(op) M32R_OPERAND_##op const CGEN_OPERAND m32r_cgen_operand_table[] = { /* pc: program counter */ { "pc", M32R_OPERAND_PC, HW_H_PC, 0, 0, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_NIL] } }, { 0|A(SEM_ONLY), { { { (1<<MACH_BASE), 0 } } } } }, /* sr: source register */ { "sr", M32R_OPERAND_SR, HW_H_GR, 12, 4, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_R2] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* dr: destination register */ { "dr", M32R_OPERAND_DR, HW_H_GR, 4, 4, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_R1] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* src1: source register 1 */ { "src1", M32R_OPERAND_SRC1, HW_H_GR, 4, 4, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_R1] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* src2: source register 2 */ { "src2", M32R_OPERAND_SRC2, HW_H_GR, 12, 4, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_R2] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* scr: source control register */ { "scr", M32R_OPERAND_SCR, HW_H_CR, 12, 4, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_R2] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* dcr: destination control register */ { "dcr", M32R_OPERAND_DCR, HW_H_CR, 4, 4, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_R1] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* simm8: 8 bit signed immediate */ { "simm8", M32R_OPERAND_SIMM8, HW_H_SINT, 8, 8, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_SIMM8] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* simm16: 16 bit signed immediate */ { "simm16", M32R_OPERAND_SIMM16, HW_H_SINT, 16, 16, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_SIMM16] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* uimm3: 3 bit unsigned number */ { "uimm3", M32R_OPERAND_UIMM3, HW_H_UINT, 5, 3, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_UIMM3] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* uimm4: 4 bit trap number */ { "uimm4", M32R_OPERAND_UIMM4, HW_H_UINT, 12, 4, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_UIMM4] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* uimm5: 5 bit shift count */ { "uimm5", M32R_OPERAND_UIMM5, HW_H_UINT, 11, 5, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_UIMM5] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* uimm8: 8 bit unsigned immediate */ { "uimm8", M32R_OPERAND_UIMM8, HW_H_UINT, 8, 8, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_UIMM8] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* uimm16: 16 bit unsigned immediate */ { "uimm16", M32R_OPERAND_UIMM16, HW_H_UINT, 16, 16, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_UIMM16] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* imm1: 1 bit immediate */ { "imm1", M32R_OPERAND_IMM1, HW_H_UINT, 15, 1, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_IMM1] } }, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } } } } }, /* accd: accumulator destination register */ { "accd", M32R_OPERAND_ACCD, HW_H_ACCUMS, 4, 2, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_ACCD] } }, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } } } } }, /* accs: accumulator source register */ { "accs", M32R_OPERAND_ACCS, HW_H_ACCUMS, 12, 2, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_ACCS] } }, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } } } } }, /* acc: accumulator reg (d) */ { "acc", M32R_OPERAND_ACC, HW_H_ACCUMS, 8, 1, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_ACC] } }, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } } } } }, /* hash: # prefix */ { "hash", M32R_OPERAND_HASH, HW_H_SINT, 0, 0, { 0, { (const PTR) 0 } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* hi16: high 16 bit immediate, sign optional */ { "hi16", M32R_OPERAND_HI16, HW_H_HI16, 16, 16, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_HI16] } }, { 0|A(SIGN_OPT), { { { (1<<MACH_BASE), 0 } } } } }, /* slo16: 16 bit signed immediate, for low() */ { "slo16", M32R_OPERAND_SLO16, HW_H_SLO16, 16, 16, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_SIMM16] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* ulo16: 16 bit unsigned immediate, for low() */ { "ulo16", M32R_OPERAND_ULO16, HW_H_ULO16, 16, 16, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_UIMM16] } }, { 0, { { { (1<<MACH_BASE), 0 } } } } }, /* uimm24: 24 bit address */ { "uimm24", M32R_OPERAND_UIMM24, HW_H_ADDR, 8, 24, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_UIMM24] } }, { 0|A(RELOC)|A(ABS_ADDR), { { { (1<<MACH_BASE), 0 } } } } }, /* disp8: 8 bit displacement */ { "disp8", M32R_OPERAND_DISP8, HW_H_IADDR, 8, 8, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_DISP8] } }, { 0|A(RELAX)|A(RELOC)|A(PCREL_ADDR), { { { (1<<MACH_BASE), 0 } } } } }, /* disp16: 16 bit displacement */ { "disp16", M32R_OPERAND_DISP16, HW_H_IADDR, 16, 16, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_DISP16] } }, { 0|A(RELOC)|A(PCREL_ADDR), { { { (1<<MACH_BASE), 0 } } } } }, /* disp24: 24 bit displacement */ { "disp24", M32R_OPERAND_DISP24, HW_H_IADDR, 8, 24, { 0, { (const PTR) &m32r_cgen_ifld_table[M32R_F_DISP24] } }, { 0|A(RELAX)|A(RELOC)|A(PCREL_ADDR), { { { (1<<MACH_BASE), 0 } } } } }, /* condbit: condition bit */ { "condbit", M32R_OPERAND_CONDBIT, HW_H_COND, 0, 0, { 0, { (const PTR) 0 } }, { 0|A(SEM_ONLY), { { { (1<<MACH_BASE), 0 } } } } }, /* accum: accumulator */ { "accum", M32R_OPERAND_ACCUM, HW_H_ACCUM, 0, 0, { 0, { (const PTR) 0 } }, { 0|A(SEM_ONLY), { { { (1<<MACH_BASE), 0 } } } } }, /* sentinel */ { 0, 0, 0, 0, 0, { 0, { (const PTR) 0 } }, { 0, { { { (1<<MACH_BASE), 0 } } } } } }; #undef A /* The instruction table. */ #define OP(field) CGEN_SYNTAX_MAKE_FIELD (OPERAND (field)) #define A(a) (1 << CGEN_INSN_##a) static const CGEN_IBASE m32r_cgen_insn_table[MAX_INSNS] = { /* Special null first entry. A `num' value of zero is thus invalid. Also, the special `invalid' insn resides here. */ { 0, 0, 0, 0, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* add $dr,$sr */ { M32R_INSN_ADD, "add", "add", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* add3 $dr,$sr,$hash$slo16 */ { M32R_INSN_ADD3, "add3", "add3", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* and $dr,$sr */ { M32R_INSN_AND, "and", "and", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* and3 $dr,$sr,$uimm16 */ { M32R_INSN_AND3, "and3", "and3", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* or $dr,$sr */ { M32R_INSN_OR, "or", "or", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* or3 $dr,$sr,$hash$ulo16 */ { M32R_INSN_OR3, "or3", "or3", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* xor $dr,$sr */ { M32R_INSN_XOR, "xor", "xor", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* xor3 $dr,$sr,$uimm16 */ { M32R_INSN_XOR3, "xor3", "xor3", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* addi $dr,$simm8 */ { M32R_INSN_ADDI, "addi", "addi", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* addv $dr,$sr */ { M32R_INSN_ADDV, "addv", "addv", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* addv3 $dr,$sr,$simm16 */ { M32R_INSN_ADDV3, "addv3", "addv3", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* addx $dr,$sr */ { M32R_INSN_ADDX, "addx", "addx", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* bc.s $disp8 */ { M32R_INSN_BC8, "bc8", "bc.s", 16, { 0|A(COND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* bc.l $disp24 */ { M32R_INSN_BC24, "bc24", "bc.l", 32, { 0|A(COND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* beq $src1,$src2,$disp16 */ { M32R_INSN_BEQ, "beq", "beq", 32, { 0|A(COND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* beqz $src2,$disp16 */ { M32R_INSN_BEQZ, "beqz", "beqz", 32, { 0|A(COND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* bgez $src2,$disp16 */ { M32R_INSN_BGEZ, "bgez", "bgez", 32, { 0|A(COND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* bgtz $src2,$disp16 */ { M32R_INSN_BGTZ, "bgtz", "bgtz", 32, { 0|A(COND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* blez $src2,$disp16 */ { M32R_INSN_BLEZ, "blez", "blez", 32, { 0|A(COND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* bltz $src2,$disp16 */ { M32R_INSN_BLTZ, "bltz", "bltz", 32, { 0|A(COND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* bnez $src2,$disp16 */ { M32R_INSN_BNEZ, "bnez", "bnez", 32, { 0|A(COND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* bl.s $disp8 */ { M32R_INSN_BL8, "bl8", "bl.s", 16, { 0|A(FILL_SLOT)|A(UNCOND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* bl.l $disp24 */ { M32R_INSN_BL24, "bl24", "bl.l", 32, { 0|A(UNCOND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* bcl.s $disp8 */ { M32R_INSN_BCL8, "bcl8", "bcl.s", 16, { 0|A(FILL_SLOT)|A(COND_CTI), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_O, 0 } } } } }, /* bcl.l $disp24 */ { M32R_INSN_BCL24, "bcl24", "bcl.l", 32, { 0|A(COND_CTI), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_NONE, 0 } } } } }, /* bnc.s $disp8 */ { M32R_INSN_BNC8, "bnc8", "bnc.s", 16, { 0|A(COND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* bnc.l $disp24 */ { M32R_INSN_BNC24, "bnc24", "bnc.l", 32, { 0|A(COND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* bne $src1,$src2,$disp16 */ { M32R_INSN_BNE, "bne", "bne", 32, { 0|A(COND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* bra.s $disp8 */ { M32R_INSN_BRA8, "bra8", "bra.s", 16, { 0|A(FILL_SLOT)|A(UNCOND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* bra.l $disp24 */ { M32R_INSN_BRA24, "bra24", "bra.l", 32, { 0|A(UNCOND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* bncl.s $disp8 */ { M32R_INSN_BNCL8, "bncl8", "bncl.s", 16, { 0|A(FILL_SLOT)|A(COND_CTI), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_O, 0 } } } } }, /* bncl.l $disp24 */ { M32R_INSN_BNCL24, "bncl24", "bncl.l", 32, { 0|A(COND_CTI), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_NONE, 0 } } } } }, /* cmp $src1,$src2 */ { M32R_INSN_CMP, "cmp", "cmp", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* cmpi $src2,$simm16 */ { M32R_INSN_CMPI, "cmpi", "cmpi", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* cmpu $src1,$src2 */ { M32R_INSN_CMPU, "cmpu", "cmpu", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* cmpui $src2,$simm16 */ { M32R_INSN_CMPUI, "cmpui", "cmpui", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* cmpeq $src1,$src2 */ { M32R_INSN_CMPEQ, "cmpeq", "cmpeq", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_OS, 0 } } } } }, /* cmpz $src2 */ { M32R_INSN_CMPZ, "cmpz", "cmpz", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_OS, 0 } } } } }, /* div $dr,$sr */ { M32R_INSN_DIV, "div", "div", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* divu $dr,$sr */ { M32R_INSN_DIVU, "divu", "divu", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* rem $dr,$sr */ { M32R_INSN_REM, "rem", "rem", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* remu $dr,$sr */ { M32R_INSN_REMU, "remu", "remu", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* remh $dr,$sr */ { M32R_INSN_REMH, "remh", "remh", 32, { 0, { { { (1<<MACH_M32R2), 0 } }, { { PIPE_NONE, 0 } } } } }, /* remuh $dr,$sr */ { M32R_INSN_REMUH, "remuh", "remuh", 32, { 0, { { { (1<<MACH_M32R2), 0 } }, { { PIPE_NONE, 0 } } } } }, /* remb $dr,$sr */ { M32R_INSN_REMB, "remb", "remb", 32, { 0, { { { (1<<MACH_M32R2), 0 } }, { { PIPE_NONE, 0 } } } } }, /* remub $dr,$sr */ { M32R_INSN_REMUB, "remub", "remub", 32, { 0, { { { (1<<MACH_M32R2), 0 } }, { { PIPE_NONE, 0 } } } } }, /* divuh $dr,$sr */ { M32R_INSN_DIVUH, "divuh", "divuh", 32, { 0, { { { (1<<MACH_M32R2), 0 } }, { { PIPE_NONE, 0 } } } } }, /* divb $dr,$sr */ { M32R_INSN_DIVB, "divb", "divb", 32, { 0, { { { (1<<MACH_M32R2), 0 } }, { { PIPE_NONE, 0 } } } } }, /* divub $dr,$sr */ { M32R_INSN_DIVUB, "divub", "divub", 32, { 0, { { { (1<<MACH_M32R2), 0 } }, { { PIPE_NONE, 0 } } } } }, /* divh $dr,$sr */ { M32R_INSN_DIVH, "divh", "divh", 32, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_NONE, 0 } } } } }, /* jc $sr */ { M32R_INSN_JC, "jc", "jc", 16, { 0|A(SPECIAL)|A(COND_CTI), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_O, 0 } } } } }, /* jnc $sr */ { M32R_INSN_JNC, "jnc", "jnc", 16, { 0|A(SPECIAL)|A(COND_CTI), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_O, 0 } } } } }, /* jl $sr */ { M32R_INSN_JL, "jl", "jl", 16, { 0|A(FILL_SLOT)|A(UNCOND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* jmp $sr */ { M32R_INSN_JMP, "jmp", "jmp", 16, { 0|A(UNCOND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* ld $dr,@$sr */ { M32R_INSN_LD, "ld", "ld", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* ld $dr,@($slo16,$sr) */ { M32R_INSN_LD_D, "ld-d", "ld", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* ldb $dr,@$sr */ { M32R_INSN_LDB, "ldb", "ldb", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* ldb $dr,@($slo16,$sr) */ { M32R_INSN_LDB_D, "ldb-d", "ldb", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* ldh $dr,@$sr */ { M32R_INSN_LDH, "ldh", "ldh", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* ldh $dr,@($slo16,$sr) */ { M32R_INSN_LDH_D, "ldh-d", "ldh", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* ldub $dr,@$sr */ { M32R_INSN_LDUB, "ldub", "ldub", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* ldub $dr,@($slo16,$sr) */ { M32R_INSN_LDUB_D, "ldub-d", "ldub", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* lduh $dr,@$sr */ { M32R_INSN_LDUH, "lduh", "lduh", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* lduh $dr,@($slo16,$sr) */ { M32R_INSN_LDUH_D, "lduh-d", "lduh", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* ld $dr,@$sr+ */ { M32R_INSN_LD_PLUS, "ld-plus", "ld", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* ld24 $dr,$uimm24 */ { M32R_INSN_LD24, "ld24", "ld24", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* ldi8 $dr,$simm8 */ { M32R_INSN_LDI8, "ldi8", "ldi8", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* ldi16 $dr,$hash$slo16 */ { M32R_INSN_LDI16, "ldi16", "ldi16", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* lock $dr,@$sr */ { M32R_INSN_LOCK, "lock", "lock", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* machi $src1,$src2 */ { M32R_INSN_MACHI, "machi", "machi", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* machi $src1,$src2,$acc */ { M32R_INSN_MACHI_A, "machi-a", "machi", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* maclo $src1,$src2 */ { M32R_INSN_MACLO, "maclo", "maclo", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* maclo $src1,$src2,$acc */ { M32R_INSN_MACLO_A, "maclo-a", "maclo", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* macwhi $src1,$src2 */ { M32R_INSN_MACWHI, "macwhi", "macwhi", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* macwhi $src1,$src2,$acc */ { M32R_INSN_MACWHI_A, "macwhi-a", "macwhi", 16, { 0|A(SPECIAL), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* macwlo $src1,$src2 */ { M32R_INSN_MACWLO, "macwlo", "macwlo", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* macwlo $src1,$src2,$acc */ { M32R_INSN_MACWLO_A, "macwlo-a", "macwlo", 16, { 0|A(SPECIAL), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* mul $dr,$sr */ { M32R_INSN_MUL, "mul", "mul", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_S, 0 } } } } }, /* mulhi $src1,$src2 */ { M32R_INSN_MULHI, "mulhi", "mulhi", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* mulhi $src1,$src2,$acc */ { M32R_INSN_MULHI_A, "mulhi-a", "mulhi", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* mullo $src1,$src2 */ { M32R_INSN_MULLO, "mullo", "mullo", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* mullo $src1,$src2,$acc */ { M32R_INSN_MULLO_A, "mullo-a", "mullo", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* mulwhi $src1,$src2 */ { M32R_INSN_MULWHI, "mulwhi", "mulwhi", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* mulwhi $src1,$src2,$acc */ { M32R_INSN_MULWHI_A, "mulwhi-a", "mulwhi", 16, { 0|A(SPECIAL), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* mulwlo $src1,$src2 */ { M32R_INSN_MULWLO, "mulwlo", "mulwlo", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* mulwlo $src1,$src2,$acc */ { M32R_INSN_MULWLO_A, "mulwlo-a", "mulwlo", 16, { 0|A(SPECIAL), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* mv $dr,$sr */ { M32R_INSN_MV, "mv", "mv", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* mvfachi $dr */ { M32R_INSN_MVFACHI, "mvfachi", "mvfachi", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* mvfachi $dr,$accs */ { M32R_INSN_MVFACHI_A, "mvfachi-a", "mvfachi", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* mvfaclo $dr */ { M32R_INSN_MVFACLO, "mvfaclo", "mvfaclo", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* mvfaclo $dr,$accs */ { M32R_INSN_MVFACLO_A, "mvfaclo-a", "mvfaclo", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* mvfacmi $dr */ { M32R_INSN_MVFACMI, "mvfacmi", "mvfacmi", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* mvfacmi $dr,$accs */ { M32R_INSN_MVFACMI_A, "mvfacmi-a", "mvfacmi", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* mvfc $dr,$scr */ { M32R_INSN_MVFC, "mvfc", "mvfc", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* mvtachi $src1 */ { M32R_INSN_MVTACHI, "mvtachi", "mvtachi", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* mvtachi $src1,$accs */ { M32R_INSN_MVTACHI_A, "mvtachi-a", "mvtachi", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* mvtaclo $src1 */ { M32R_INSN_MVTACLO, "mvtaclo", "mvtaclo", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* mvtaclo $src1,$accs */ { M32R_INSN_MVTACLO_A, "mvtaclo-a", "mvtaclo", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* mvtc $sr,$dcr */ { M32R_INSN_MVTC, "mvtc", "mvtc", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* neg $dr,$sr */ { M32R_INSN_NEG, "neg", "neg", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* nop */ { M32R_INSN_NOP, "nop", "nop", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* not $dr,$sr */ { M32R_INSN_NOT, "not", "not", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* rac */ { M32R_INSN_RAC, "rac", "rac", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* rac $accd,$accs,$imm1 */ { M32R_INSN_RAC_DSI, "rac-dsi", "rac", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* rach */ { M32R_INSN_RACH, "rach", "rach", 16, { 0, { { { (1<<MACH_M32R), 0 } }, { { PIPE_S, 0 } } } } }, /* rach $accd,$accs,$imm1 */ { M32R_INSN_RACH_DSI, "rach-dsi", "rach", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* rte */ { M32R_INSN_RTE, "rte", "rte", 16, { 0|A(UNCOND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* seth $dr,$hash$hi16 */ { M32R_INSN_SETH, "seth", "seth", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* sll $dr,$sr */ { M32R_INSN_SLL, "sll", "sll", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O_OS, 0 } } } } }, /* sll3 $dr,$sr,$simm16 */ { M32R_INSN_SLL3, "sll3", "sll3", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* slli $dr,$uimm5 */ { M32R_INSN_SLLI, "slli", "slli", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O_OS, 0 } } } } }, /* sra $dr,$sr */ { M32R_INSN_SRA, "sra", "sra", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O_OS, 0 } } } } }, /* sra3 $dr,$sr,$simm16 */ { M32R_INSN_SRA3, "sra3", "sra3", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* srai $dr,$uimm5 */ { M32R_INSN_SRAI, "srai", "srai", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O_OS, 0 } } } } }, /* srl $dr,$sr */ { M32R_INSN_SRL, "srl", "srl", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O_OS, 0 } } } } }, /* srl3 $dr,$sr,$simm16 */ { M32R_INSN_SRL3, "srl3", "srl3", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* srli $dr,$uimm5 */ { M32R_INSN_SRLI, "srli", "srli", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O_OS, 0 } } } } }, /* st $src1,@$src2 */ { M32R_INSN_ST, "st", "st", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* st $src1,@($slo16,$src2) */ { M32R_INSN_ST_D, "st-d", "st", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* stb $src1,@$src2 */ { M32R_INSN_STB, "stb", "stb", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* stb $src1,@($slo16,$src2) */ { M32R_INSN_STB_D, "stb-d", "stb", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* sth $src1,@$src2 */ { M32R_INSN_STH, "sth", "sth", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* sth $src1,@($slo16,$src2) */ { M32R_INSN_STH_D, "sth-d", "sth", 32, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* st $src1,@+$src2 */ { M32R_INSN_ST_PLUS, "st-plus", "st", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* sth $src1,@$src2+ */ { M32R_INSN_STH_PLUS, "sth-plus", "sth", 16, { 0|A(SPECIAL), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_O, 0 } } } } }, /* stb $src1,@$src2+ */ { M32R_INSN_STB_PLUS, "stb-plus", "stb", 16, { 0|A(SPECIAL), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_O, 0 } } } } }, /* st $src1,@-$src2 */ { M32R_INSN_ST_MINUS, "st-minus", "st", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* sub $dr,$sr */ { M32R_INSN_SUB, "sub", "sub", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* subv $dr,$sr */ { M32R_INSN_SUBV, "subv", "subv", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* subx $dr,$sr */ { M32R_INSN_SUBX, "subx", "subx", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_OS, 0 } } } } }, /* trap $uimm4 */ { M32R_INSN_TRAP, "trap", "trap", 16, { 0|A(FILL_SLOT)|A(UNCOND_CTI), { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* unlock $src1,@$src2 */ { M32R_INSN_UNLOCK, "unlock", "unlock", 16, { 0, { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* satb $dr,$sr */ { M32R_INSN_SATB, "satb", "satb", 32, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_NONE, 0 } } } } }, /* sath $dr,$sr */ { M32R_INSN_SATH, "sath", "sath", 32, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_NONE, 0 } } } } }, /* sat $dr,$sr */ { M32R_INSN_SAT, "sat", "sat", 32, { 0|A(SPECIAL), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_NONE, 0 } } } } }, /* pcmpbz $src2 */ { M32R_INSN_PCMPBZ, "pcmpbz", "pcmpbz", 16, { 0|A(SPECIAL), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_OS, 0 } } } } }, /* sadd */ { M32R_INSN_SADD, "sadd", "sadd", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* macwu1 $src1,$src2 */ { M32R_INSN_MACWU1, "macwu1", "macwu1", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* msblo $src1,$src2 */ { M32R_INSN_MSBLO, "msblo", "msblo", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* mulwu1 $src1,$src2 */ { M32R_INSN_MULWU1, "mulwu1", "mulwu1", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* maclh1 $src1,$src2 */ { M32R_INSN_MACLH1, "maclh1", "maclh1", 16, { 0, { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_S, 0 } } } } }, /* sc */ { M32R_INSN_SC, "sc", "sc", 16, { 0|A(SPECIAL)|A(SKIP_CTI), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_O, 0 } } } } }, /* snc */ { M32R_INSN_SNC, "snc", "snc", 16, { 0|A(SPECIAL)|A(SKIP_CTI), { { { (1<<MACH_M32RX)|(1<<MACH_M32R2), 0 } }, { { PIPE_O, 0 } } } } }, /* clrpsw $uimm8 */ { M32R_INSN_CLRPSW, "clrpsw", "clrpsw", 16, { 0|A(SPECIAL_M32R), { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* setpsw $uimm8 */ { M32R_INSN_SETPSW, "setpsw", "setpsw", 16, { 0|A(SPECIAL_M32R), { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, /* bset $uimm3,@($slo16,$sr) */ { M32R_INSN_BSET, "bset", "bset", 32, { 0|A(SPECIAL_M32R), { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* bclr $uimm3,@($slo16,$sr) */ { M32R_INSN_BCLR, "bclr", "bclr", 32, { 0|A(SPECIAL_M32R), { { { (1<<MACH_BASE), 0 } }, { { PIPE_NONE, 0 } } } } }, /* btst $uimm3,$sr */ { M32R_INSN_BTST, "btst", "btst", 16, { 0|A(SPECIAL_M32R), { { { (1<<MACH_BASE), 0 } }, { { PIPE_O, 0 } } } } }, }; #undef OP #undef A /* Initialize anything needed to be done once, before any cpu_open call. */ static void init_tables (void) { } static const CGEN_MACH * lookup_mach_via_bfd_name (const CGEN_MACH *, const char *); static void build_hw_table (CGEN_CPU_TABLE *); static void build_ifield_table (CGEN_CPU_TABLE *); static void build_operand_table (CGEN_CPU_TABLE *); static void build_insn_table (CGEN_CPU_TABLE *); static void m32r_cgen_rebuild_tables (CGEN_CPU_TABLE *); /* Subroutine of m32r_cgen_cpu_open to look up a mach via its bfd name. */ static const CGEN_MACH * lookup_mach_via_bfd_name (const CGEN_MACH *table, const char *name) { while (table->name) { if (strcmp (name, table->bfd_name) == 0) return table; ++table; } abort (); } /* Subroutine of m32r_cgen_cpu_open to build the hardware table. */ static void build_hw_table (CGEN_CPU_TABLE *cd) { int i; int machs = cd->machs; const CGEN_HW_ENTRY *init = & m32r_cgen_hw_table[0]; /* MAX_HW is only an upper bound on the number of selected entries. However each entry is indexed by it's enum so there can be holes in the table. */ const CGEN_HW_ENTRY **selected = (const CGEN_HW_ENTRY **) xmalloc (MAX_HW * sizeof (CGEN_HW_ENTRY *)); cd->hw_table.init_entries = init; cd->hw_table.entry_size = sizeof (CGEN_HW_ENTRY); memset (selected, 0, MAX_HW * sizeof (CGEN_HW_ENTRY *)); /* ??? For now we just use machs to determine which ones we want. */ for (i = 0; init[i].name != NULL; ++i) if (CGEN_HW_ATTR_VALUE (&init[i], CGEN_HW_MACH) & machs) selected[init[i].type] = &init[i]; cd->hw_table.entries = selected; cd->hw_table.num_entries = MAX_HW; } /* Subroutine of m32r_cgen_cpu_open to build the hardware table. */ static void build_ifield_table (CGEN_CPU_TABLE *cd) { cd->ifld_table = & m32r_cgen_ifld_table[0]; } /* Subroutine of m32r_cgen_cpu_open to build the hardware table. */ static void build_operand_table (CGEN_CPU_TABLE *cd) { int i; int machs = cd->machs; const CGEN_OPERAND *init = & m32r_cgen_operand_table[0]; /* MAX_OPERANDS is only an upper bound on the number of selected entries. However each entry is indexed by it's enum so there can be holes in the table. */ const CGEN_OPERAND **selected = xmalloc (MAX_OPERANDS * sizeof (* selected)); cd->operand_table.init_entries = init; cd->operand_table.entry_size = sizeof (CGEN_OPERAND); memset (selected, 0, MAX_OPERANDS * sizeof (CGEN_OPERAND *)); /* ??? For now we just use mach to determine which ones we want. */ for (i = 0; init[i].name != NULL; ++i) if (CGEN_OPERAND_ATTR_VALUE (&init[i], CGEN_OPERAND_MACH) & machs) selected[init[i].type] = &init[i]; cd->operand_table.entries = selected; cd->operand_table.num_entries = MAX_OPERANDS; } /* Subroutine of m32r_cgen_cpu_open to build the hardware table. ??? This could leave out insns not supported by the specified mach/isa, but that would cause errors like "foo only supported by bar" to become "unknown insn", so for now we include all insns and require the app to do the checking later. ??? On the other hand, parsing of such insns may require their hardware or operand elements to be in the table [which they mightn't be]. */ static void build_insn_table (CGEN_CPU_TABLE *cd) { int i; const CGEN_IBASE *ib = & m32r_cgen_insn_table[0]; CGEN_INSN *insns = xmalloc (MAX_INSNS * sizeof (CGEN_INSN)); memset (insns, 0, MAX_INSNS * sizeof (CGEN_INSN)); for (i = 0; i < MAX_INSNS; ++i) insns[i].base = &ib[i]; cd->insn_table.init_entries = insns; cd->insn_table.entry_size = sizeof (CGEN_IBASE); cd->insn_table.num_init_entries = MAX_INSNS; } /* Subroutine of m32r_cgen_cpu_open to rebuild the tables. */ static void m32r_cgen_rebuild_tables (CGEN_CPU_TABLE *cd) { int i; CGEN_BITSET *isas = cd->isas; unsigned int machs = cd->machs; cd->int_insn_p = CGEN_INT_INSN_P; /* Data derived from the isa spec. */ #define UNSET (CGEN_SIZE_UNKNOWN + 1) cd->default_insn_bitsize = UNSET; cd->base_insn_bitsize = UNSET; cd->min_insn_bitsize = 65535; /* Some ridiculously big number. */ cd->max_insn_bitsize = 0; for (i = 0; i < MAX_ISAS; ++i) if (cgen_bitset_contains (isas, i)) { const CGEN_ISA *isa = & m32r_cgen_isa_table[i]; /* Default insn sizes of all selected isas must be equal or we set the result to 0, meaning "unknown". */ if (cd->default_insn_bitsize == UNSET) cd->default_insn_bitsize = isa->default_insn_bitsize; else if (isa->default_insn_bitsize == cd->default_insn_bitsize) ; /* This is ok. */ else cd->default_insn_bitsize = CGEN_SIZE_UNKNOWN; /* Base insn sizes of all selected isas must be equal or we set the result to 0, meaning "unknown". */ if (cd->base_insn_bitsize == UNSET) cd->base_insn_bitsize = isa->base_insn_bitsize; else if (isa->base_insn_bitsize == cd->base_insn_bitsize) ; /* This is ok. */ else cd->base_insn_bitsize = CGEN_SIZE_UNKNOWN; /* Set min,max insn sizes. */ if (isa->min_insn_bitsize < cd->min_insn_bitsize) cd->min_insn_bitsize = isa->min_insn_bitsize; if (isa->max_insn_bitsize > cd->max_insn_bitsize) cd->max_insn_bitsize = isa->max_insn_bitsize; } /* Data derived from the mach spec. */ for (i = 0; i < MAX_MACHS; ++i) if (((1 << i) & machs) != 0) { const CGEN_MACH *mach = & m32r_cgen_mach_table[i]; if (mach->insn_chunk_bitsize != 0) { if (cd->insn_chunk_bitsize != 0 && cd->insn_chunk_bitsize != mach->insn_chunk_bitsize) { fprintf (stderr, "m32r_cgen_rebuild_tables: conflicting insn-chunk-bitsize values: `%d' vs. `%d'\n", cd->insn_chunk_bitsize, mach->insn_chunk_bitsize); abort (); } cd->insn_chunk_bitsize = mach->insn_chunk_bitsize; } } /* Determine which hw elements are used by MACH. */ build_hw_table (cd); /* Build the ifield table. */ build_ifield_table (cd); /* Determine which operands are used by MACH/ISA. */ build_operand_table (cd); /* Build the instruction table. */ build_insn_table (cd); } /* Initialize a cpu table and return a descriptor. It's much like opening a file, and must be the first function called. The arguments are a set of (type/value) pairs, terminated with CGEN_CPU_OPEN_END. Currently supported values: CGEN_CPU_OPEN_ISAS: bitmap of values in enum isa_attr CGEN_CPU_OPEN_MACHS: bitmap of values in enum mach_attr CGEN_CPU_OPEN_BFDMACH: specify 1 mach using bfd name CGEN_CPU_OPEN_ENDIAN: specify endian choice CGEN_CPU_OPEN_END: terminates arguments ??? Simultaneous multiple isas might not make sense, but it's not (yet) precluded. */ CGEN_CPU_DESC m32r_cgen_cpu_open (enum cgen_cpu_open_arg arg_type, ...) { CGEN_CPU_TABLE *cd = (CGEN_CPU_TABLE *) xmalloc (sizeof (CGEN_CPU_TABLE)); static int init_p; CGEN_BITSET *isas = 0; /* 0 = "unspecified" */ unsigned int machs = 0; /* 0 = "unspecified" */ enum cgen_endian endian = CGEN_ENDIAN_UNKNOWN; va_list ap; if (! init_p) { init_tables (); init_p = 1; } memset (cd, 0, sizeof (*cd)); va_start (ap, arg_type); while (arg_type != CGEN_CPU_OPEN_END) { switch (arg_type) { case CGEN_CPU_OPEN_ISAS : isas = va_arg (ap, CGEN_BITSET *); break; case CGEN_CPU_OPEN_MACHS : machs = va_arg (ap, unsigned int); break; case CGEN_CPU_OPEN_BFDMACH : { const char *name = va_arg (ap, const char *); const CGEN_MACH *mach = lookup_mach_via_bfd_name (m32r_cgen_mach_table, name); machs |= 1 << mach->num; break; } case CGEN_CPU_OPEN_ENDIAN : endian = va_arg (ap, enum cgen_endian); break; default : fprintf (stderr, "m32r_cgen_cpu_open: unsupported argument `%d'\n", arg_type); abort (); /* ??? return NULL? */ } arg_type = va_arg (ap, enum cgen_cpu_open_arg); } va_end (ap); /* Mach unspecified means "all". */ if (machs == 0) machs = (1 << MAX_MACHS) - 1; /* Base mach is always selected. */ machs |= 1; if (endian == CGEN_ENDIAN_UNKNOWN) { /* ??? If target has only one, could have a default. */ fprintf (stderr, "m32r_cgen_cpu_open: no endianness specified\n"); abort (); } cd->isas = cgen_bitset_copy (isas); cd->machs = machs; cd->endian = endian; /* FIXME: for the sparc case we can determine insn-endianness statically. The worry here is where both data and insn endian can be independently chosen, in which case this function will need another argument. Actually, will want to allow for more arguments in the future anyway. */ cd->insn_endian = endian; /* Table (re)builder. */ cd->rebuild_tables = m32r_cgen_rebuild_tables; m32r_cgen_rebuild_tables (cd); /* Default to not allowing signed overflow. */ cd->signed_overflow_ok_p = 0; return (CGEN_CPU_DESC) cd; } /* Cover fn to m32r_cgen_cpu_open to handle the simple case of 1 isa, 1 mach. MACH_NAME is the bfd name of the mach. */ CGEN_CPU_DESC m32r_cgen_cpu_open_1 (const char *mach_name, enum cgen_endian endian) { return m32r_cgen_cpu_open (CGEN_CPU_OPEN_BFDMACH, mach_name, CGEN_CPU_OPEN_ENDIAN, endian, CGEN_CPU_OPEN_END); } /* Close a cpu table. ??? This can live in a machine independent file, but there's currently no place to put this file (there's no libcgen). libopcodes is the wrong place as some simulator ports use this but they don't use libopcodes. */ void m32r_cgen_cpu_close (CGEN_CPU_DESC cd) { unsigned int i; const CGEN_INSN *insns; if (cd->macro_insn_table.init_entries) { insns = cd->macro_insn_table.init_entries; for (i = 0; i < cd->macro_insn_table.num_init_entries; ++i, ++insns) if (CGEN_INSN_RX ((insns))) regfree (CGEN_INSN_RX (insns)); } if (cd->insn_table.init_entries) { insns = cd->insn_table.init_entries; for (i = 0; i < cd->insn_table.num_init_entries; ++i, ++insns) if (CGEN_INSN_RX (insns)) regfree (CGEN_INSN_RX (insns)); } if (cd->macro_insn_table.init_entries) free ((CGEN_INSN *) cd->macro_insn_table.init_entries); if (cd->insn_table.init_entries) free ((CGEN_INSN *) cd->insn_table.init_entries); if (cd->hw_table.entries) free ((CGEN_HW_ENTRY *) cd->hw_table.entries); if (cd->operand_table.entries) free ((CGEN_HW_ENTRY *) cd->operand_table.entries); free (cd); }
gpl-2.0
goodwinos/linux-dm
drivers/net/wireless/ath/ath5k/ahb.c
155
6280
/* * Copyright (c) 2008-2009 Atheros Communications Inc. * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org> * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/nl80211.h> #include <linux/platform_device.h> #include <linux/etherdevice.h> #include <linux/export.h> #include <ar231x_platform.h> #include "ath5k.h" #include "debug.h" #include "base.h" #include "reg.h" /* return bus cachesize in 4B word units */ static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz) { *csz = L1_CACHE_BYTES >> 2; } static bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) { struct ath5k_hw *ah = common->priv; struct platform_device *pdev = to_platform_device(ah->dev); struct ar231x_board_config *bcfg = pdev->dev.platform_data; u16 *eeprom, *eeprom_end; bcfg = pdev->dev.platform_data; eeprom = (u16 *) bcfg->radio; eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ; eeprom += off; if (eeprom > eeprom_end) return false; *data = *eeprom; return true; } int ath5k_hw_read_srev(struct ath5k_hw *ah) { struct platform_device *pdev = to_platform_device(ah->dev); struct ar231x_board_config *bcfg = pdev->dev.platform_data; ah->ah_mac_srev = bcfg->devid; return 0; } static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac) { struct platform_device *pdev = to_platform_device(ah->dev); struct ar231x_board_config *bcfg = pdev->dev.platform_data; u8 *cfg_mac; if (to_platform_device(ah->dev)->id == 0) cfg_mac = bcfg->config->wlan0_mac; else cfg_mac = bcfg->config->wlan1_mac; memcpy(mac, cfg_mac, ETH_ALEN); return 0; } static const struct ath_bus_ops ath_ahb_bus_ops = { .ath_bus_type = ATH_AHB, .read_cachesize = ath5k_ahb_read_cachesize, .eeprom_read = ath5k_ahb_eeprom_read, .eeprom_read_mac = ath5k_ahb_eeprom_read_mac, }; /*Initialization*/ static int ath_ahb_probe(struct platform_device *pdev) { struct ar231x_board_config *bcfg = pdev->dev.platform_data; struct ath5k_hw *ah; struct ieee80211_hw *hw; struct resource *res; void __iomem *mem; int irq; int ret = 0; u32 reg; if (!pdev->dev.platform_data) { dev_err(&pdev->dev, "no platform data specified\n"); ret = -EINVAL; goto err_out; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no memory resource found\n"); ret = -ENXIO; goto err_out; } mem = ioremap_nocache(res->start, resource_size(res)); if (mem == NULL) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto err_out; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(&pdev->dev, "no IRQ resource found\n"); ret = -ENXIO; goto err_iounmap; } irq = res->start; hw = ieee80211_alloc_hw(sizeof(struct ath5k_hw), &ath5k_hw_ops); if (hw == NULL) { dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); ret = -ENOMEM; goto err_iounmap; } ah = hw->priv; ah->hw = hw; ah->dev = &pdev->dev; ah->iobase = mem; ah->irq = irq; ah->devid = bcfg->devid; if (bcfg->devid >= AR5K_SREV_AR2315_R6) { /* Enable WMAC AHB arbitration */ reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL); reg |= AR5K_AR2315_AHB_ARB_CTL_WLAN; iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL); /* Enable global WMAC swapping */ reg = ioread32((void __iomem *) AR5K_AR2315_BYTESWAP); reg |= AR5K_AR2315_BYTESWAP_WMAC; iowrite32(reg, (void __iomem *) AR5K_AR2315_BYTESWAP); } else { /* Enable WMAC DMA access (assuming 5312 or 231x*/ /* TODO: check other platforms */ reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE); if (to_platform_device(ah->dev)->id == 0) reg |= AR5K_AR5312_ENABLE_WLAN0; else reg |= AR5K_AR5312_ENABLE_WLAN1; iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE); /* * On a dual-band AR5312, the multiband radio is only * used as pass-through. Disable 2 GHz support in the * driver for it */ if (to_platform_device(ah->dev)->id == 0 && (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) == (BD_WLAN1 | BD_WLAN0)) ah->ah_capabilities.cap_needs_2GHz_ovr = true; else ah->ah_capabilities.cap_needs_2GHz_ovr = false; } ret = ath5k_init_ah(ah, &ath_ahb_bus_ops); if (ret != 0) { dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret); ret = -ENODEV; goto err_free_hw; } platform_set_drvdata(pdev, hw); return 0; err_free_hw: ieee80211_free_hw(hw); err_iounmap: iounmap(mem); err_out: return ret; } static int ath_ahb_remove(struct platform_device *pdev) { struct ar231x_board_config *bcfg = pdev->dev.platform_data; struct ieee80211_hw *hw = platform_get_drvdata(pdev); struct ath5k_hw *ah; u32 reg; if (!hw) return 0; ah = hw->priv; if (bcfg->devid >= AR5K_SREV_AR2315_R6) { /* Disable WMAC AHB arbitration */ reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL); reg &= ~AR5K_AR2315_AHB_ARB_CTL_WLAN; iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL); } else { /*Stop DMA access */ reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE); if (to_platform_device(ah->dev)->id == 0) reg &= ~AR5K_AR5312_ENABLE_WLAN0; else reg &= ~AR5K_AR5312_ENABLE_WLAN1; iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE); } ath5k_deinit_ah(ah); iounmap(ah->iobase); ieee80211_free_hw(hw); return 0; } static struct platform_driver ath_ahb_driver = { .probe = ath_ahb_probe, .remove = ath_ahb_remove, .driver = { .name = "ar231x-wmac", .owner = THIS_MODULE, }, }; module_platform_driver(ath_ahb_driver);
gpl-2.0
CyanogenMod/android_kernel_xiaomi_aries
drivers/staging/prima/CORE/MAC/src/pe/lim/limApi.c
155
71400
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Airgo Networks, Inc proprietary. All rights reserved. * This file limApi.cc contains the functions that are * exported by LIM to other modules. * * Author: Chandra Modumudi * Date: 02/11/02 * History:- * Date Modified by Modification Information * -------------------------------------------------------------------- * */ #include "palTypes.h" #include "wniCfgSta.h" #include "wniApi.h" #include "sirCommon.h" #include "sirDebug.h" #include "cfgApi.h" #include "schApi.h" #include "utilsApi.h" #include "limApi.h" #include "limGlobal.h" #include "limTypes.h" #include "limUtils.h" #include "limAssocUtils.h" #include "limPropExtsUtils.h" #include "limSerDesUtils.h" #include "limIbssPeerMgmt.h" #include "limAdmitControl.h" #include "pmmApi.h" #include "logDump.h" #include "limSendSmeRspMessages.h" #include "wmmApsd.h" #include "limTrace.h" #include "limSession.h" #include "wlan_qct_wda.h" #if defined WLAN_FEATURE_VOWIFI #include "rrmApi.h" #endif #include <limFT.h> #include "vos_types.h" #include "vos_packet.h" #include "wlan_qct_tl.h" #include "sysStartup.h" static void __limInitScanVars(tpAniSirGlobal pMac) { pMac->lim.gLimUseScanModeForLearnMode = 1; pMac->lim.gLimSystemInScanLearnMode = 0; // Scan related globals on STA pMac->lim.gLimReturnAfterFirstMatch = 0; pMac->lim.gLim24Band11dScanDone = 0; pMac->lim.gLim50Band11dScanDone = 0; pMac->lim.gLimReturnUniqueResults = 0; // Background Scan related globals on STA pMac->lim.gLimNumOfBackgroundScanSuccess = 0; pMac->lim.gLimNumOfConsecutiveBkgndScanFailure = 0; pMac->lim.gLimNumOfForcedBkgndScan = 0; pMac->lim.gLimBackgroundScanDisable = false; //based on BG timer pMac->lim.gLimForceBackgroundScanDisable = false; //debug control flag pMac->lim.gLimBackgroundScanTerminate = TRUE; //controlled by SME pMac->lim.gLimReportBackgroundScanResults = FALSE; //controlled by SME pMac->lim.gLimCurrentScanChannelId = 0; pMac->lim.gpLimMlmScanReq = NULL; pMac->lim.gDeferMsgTypeForNOA = 0; pMac->lim.gpDefdSmeMsgForNOA = NULL; pMac->lim.gLimMlmScanResultLength = 0; pMac->lim.gLimSmeScanResultLength = 0; palZeroMemory(pMac->hHdd, pMac->lim.gLimCachedScanHashTable, sizeof(pMac->lim.gLimCachedScanHashTable)); #ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD pMac->lim.gLimMlmLfrScanResultLength = 0; pMac->lim.gLimSmeLfrScanResultLength = 0; palZeroMemory(pMac->hHdd, pMac->lim.gLimCachedLfrScanHashTable, sizeof(pMac->lim.gLimCachedLfrScanHashTable)); #endif pMac->lim.gLimBackgroundScanChannelId = 0; pMac->lim.gLimBackgroundScanStarted = 0; pMac->lim.gLimRestoreCBNumScanInterval = LIM_RESTORE_CB_NUM_SCAN_INTERVAL_DEFAULT; pMac->lim.gLimRestoreCBCount = 0; palZeroMemory(pMac->hHdd, pMac->lim.gLimLegacyBssidList, sizeof(pMac->lim.gLimLegacyBssidList)); /* Fill in default values */ pMac->lim.gLimTriggerBackgroundScanDuringQuietBss = 0; // abort scan is used to abort an on-going scan pMac->lim.abortScan = 0; palZeroMemory(pMac->hHdd, &pMac->lim.scanChnInfo, sizeof(tLimScanChnInfo)); palZeroMemory(pMac->hHdd, &pMac->lim.dfschannelList, sizeof(tSirDFSChannelList)); //WLAN_SUSPEND_LINK Related pMac->lim.gpLimSuspendCallback = NULL; pMac->lim.gpLimResumeCallback = NULL; //end WLAN_SUSPEND_LINK Related } static void __limInitBssVars(tpAniSirGlobal pMac) { palZeroMemory(pMac->hHdd, (void*)pMac->lim.gpSession, sizeof(*pMac->lim.gpSession)*pMac->lim.maxBssId); //pMac->lim.gpLimStartBssReq = NULL; /* These global variables are moved to session table and intialization is done during session creation Oct 9th Review */ #if 0 // Place holder for BSS description that we're // currently joined with palZeroMemory(pMac->hHdd, &pMac->lim.gLimCurrentBssId, sizeof(tSirMacAddr)); pMac->lim.gLimCurrentChannelId = HAL_INVALID_CHANNEL_ID; palZeroMemory(pMac->hHdd, &pMac->lim.gLimCurrentSSID, sizeof(tSirMacSSid)); pMac->lim.gLimCurrentBssCaps = 0; QosCaps is a bit map of various qos capabilities - see defn above pMac->lim.gLimCurrentBssQosCaps = 0; pMac->lim.gLimCurrentBssPropCap = 0; pMac->lim.gLimSentCapsChangeNtf = 0; // Place holder for BSS description that // we're currently Reassociating palZeroMemory(pMac->hHdd, &pMac->lim.gLimReassocBssId, sizeof(tSirMacAddr)); pMac->lim.gLimReassocChannelId = 0; palZeroMemory(pMac->hHdd, &pMac->lim.gLimReassocSSID, sizeof(tSirMacSSid)); pMac->lim.gLimReassocBssCaps = 0; pMac->lim.gLimReassocBssQosCaps = 0; pMac->lim.gLimReassocBssPropCap = 0; #endif /* This is for testing purposes only, be default should always be off */ pMac->lim.gLimForceNoPropIE = 0; // pMac->lim.gLimBssIdx = 0; pMac->lim.gpLimMlmSetKeysReq = NULL; pMac->lim.gpLimMlmRemoveKeyReq = NULL; // pMac->lim.gLimStaid = 0; //TO SUPPORT BT-AMP } static void __limInitStatsVars(tpAniSirGlobal pMac) { pMac->lim.gLimNumBeaconsRcvd = 0; pMac->lim.gLimNumBeaconsIgnored = 0; pMac->lim.gLimNumDeferredMsgs = 0; /// Variable to keep track of number of currently associated STAs //pMac->lim.gLimNumOfCurrentSTAs = 0; pMac->lim.gLimNumOfAniSTAs = 0; // count of ANI peers /// This indicates number of RXed Beacons during HB period //pMac->lim.gLimRxedBeaconCntDuringHB = 0; // Heart-Beat interval value pMac->lim.gLimHeartBeatCount = 0; // Statistics to keep track of no. beacons rcvd in heart beat interval palZeroMemory(pMac->hHdd, pMac->lim.gLimHeartBeatBeaconStats, sizeof(pMac->lim.gLimHeartBeatBeaconStats)); #ifdef WLAN_DEBUG // Debug counters pMac->lim.numTot = 0; pMac->lim.numBbt = 0; pMac->lim.numProtErr = 0; pMac->lim.numLearn = 0; pMac->lim.numLearnIgnore = 0; pMac->lim.numSme = 0; palZeroMemory(pMac->hHdd, pMac->lim.numMAC, sizeof(pMac->lim.numMAC)); pMac->lim.gLimNumAssocReqDropInvldState = 0; pMac->lim.gLimNumAssocReqDropACRejectTS = 0; pMac->lim.gLimNumAssocReqDropACRejectSta = 0; pMac->lim.gLimNumReassocReqDropInvldState = 0; pMac->lim.gLimNumHashMissIgnored = 0; pMac->lim.gLimUnexpBcnCnt = 0; pMac->lim.gLimBcnSSIDMismatchCnt = 0; pMac->lim.gLimNumLinkEsts = 0; pMac->lim.gLimNumRxCleanup = 0; pMac->lim.gLim11bStaAssocRejectCount = 0; #endif } static void __limInitStates(tpAniSirGlobal pMac) { // Counts Heartbeat failures pMac->lim.gLimHBfailureCntInLinkEstState = 0; pMac->lim.gLimProbeFailureAfterHBfailedCnt = 0; pMac->lim.gLimHBfailureCntInOtherStates = 0; pMac->lim.gLimRspReqd = 0; pMac->lim.gLimPrevSmeState = eLIM_SME_OFFLINE_STATE; /// MLM State visible across all Sirius modules MTRACE(macTrace(pMac, TRACE_CODE_MLM_STATE, NO_SESSION, eLIM_MLM_IDLE_STATE)); pMac->lim.gLimMlmState = eLIM_MLM_IDLE_STATE; /// Previous MLM State pMac->lim.gLimPrevMlmState = eLIM_MLM_OFFLINE_STATE; #ifdef GEN4_SCAN // LIM to HAL SCAN Management Message Interface states pMac->lim.gLimHalScanState = eLIM_HAL_IDLE_SCAN_STATE; #endif // GEN4_SCAN /** * Initialize state to eLIM_SME_OFFLINE_STATE */ pMac->lim.gLimSmeState = eLIM_SME_OFFLINE_STATE; /** * By default assume 'unknown' role. This will be updated * when SME_START_BSS_REQ is received. */ palZeroMemory(pMac->hHdd, &pMac->lim.gLimOverlap11gParams, sizeof(tLimProtStaParams)); palZeroMemory(pMac->hHdd, &pMac->lim.gLimOverlap11aParams, sizeof(tLimProtStaParams)); palZeroMemory(pMac->hHdd, &pMac->lim.gLimOverlapHt20Params, sizeof(tLimProtStaParams)); palZeroMemory(pMac->hHdd, &pMac->lim.gLimOverlapNonGfParams, sizeof(tLimProtStaParams)); palZeroMemory(pMac->hHdd, &pMac->lim.gLimNoShortParams, sizeof(tLimNoShortParams)); palZeroMemory(pMac->hHdd, &pMac->lim.gLimNoShortSlotParams, sizeof(tLimNoShortSlotParams)); pMac->lim.gLimPhyMode = 0; pMac->lim.scanStartTime = 0; // used to measure scan time palZeroMemory(pMac->hHdd, pMac->lim.gLimMyMacAddr, sizeof(pMac->lim.gLimMyMacAddr)); pMac->lim.ackPolicy = 0; #if 0 /* Moving all these to session specific elements */ pMac->lim.gLimQosEnabled = 0; //11E pMac->lim.gLimWmeEnabled = 0; //WME pMac->lim.gLimWsmEnabled = 0; //WSM pMac->lim.gLimHcfEnabled = 0; pMac->lim.gLim11dEnabled = 0; #endif pMac->lim.gLimProbeRespDisableFlag = 0; // control over probe response } static void __limInitVars(tpAniSirGlobal pMac) { // Place holder for Measurement Req/Rsp/Ind related info // WDS info pMac->lim.gLimNumWdsInfoInd = 0; pMac->lim.gLimNumWdsInfoSet = 0; palZeroMemory(pMac->hHdd, &pMac->lim.gLimWdsInfo, sizeof(tSirWdsInfo)); /* initialize some parameters */ limInitWdsInfoParams(pMac); // Deferred Queue Paramters palZeroMemory(pMac->hHdd, &pMac->lim.gLimDeferredMsgQ, sizeof(tSirAddtsReq)); // addts request if any - only one can be outstanding at any time palZeroMemory(pMac->hHdd, &pMac->lim.gLimAddtsReq, sizeof(tSirAddtsReq)); pMac->lim.gLimAddtsSent = 0; pMac->lim.gLimAddtsRspTimerCount = 0; //protection related config cache palZeroMemory(pMac->hHdd, &pMac->lim.cfgProtection, sizeof(tCfgProtection)); pMac->lim.gLimProtectionControl = 0; palZeroMemory(pMac->hHdd, &pMac->lim.gLimAlternateRadio, sizeof(tSirAlternateRadioInfo)); SET_LIM_PROCESS_DEFD_MESGS(pMac, true); #if 0 // 11h Spectrum Management Related Flag LIM_SET_RADAR_DETECTED(pMac, eANI_BOOLEAN_FALSE); pMac->sys.gSysEnableLearnMode = eANI_BOOLEAN_TRUE; #endif // WMM Related Flag pMac->lim.gUapsdEnable = 0; pMac->lim.gUapsdPerAcBitmask = 0; pMac->lim.gUapsdPerAcTriggerEnableMask = 0; pMac->lim.gUapsdPerAcDeliveryEnableMask = 0; // QoS-AC Downgrade: Initially, no AC is admitted pMac->lim.gAcAdmitMask[SIR_MAC_DIRECTION_UPLINK] = 0; pMac->lim.gAcAdmitMask[SIR_MAC_DIRECTION_DNLINK] = 0; //dialogue token List head/tail for Action frames request sent. pMac->lim.pDialogueTokenHead = NULL; pMac->lim.pDialogueTokenTail = NULL; palZeroMemory(pMac->hHdd, &pMac->lim.tspecInfo, sizeof(tLimTspecInfo) * LIM_NUM_TSPEC_MAX); // admission control policy information palZeroMemory(pMac->hHdd, &pMac->lim.admitPolicyInfo, sizeof(tLimAdmitPolicyInfo)); pMac->lim.gLastBeaconDtimCount = 0; pMac->lim.gLastBeaconDtimPeriod = 0; //Scan in Power Save Flag pMac->lim.gScanInPowersave = 0; } static void __limInitAssocVars(tpAniSirGlobal pMac) { tANI_U32 val; #if 0 palZeroMemory(pMac->hHdd, pMac->lim.gpLimAIDpool, sizeof(*pMac->lim.gpLimAIDpool) * (WNI_CFG_ASSOC_STA_LIMIT_STAMAX+1)); pMac->lim.freeAidHead = 0; pMac->lim.freeAidTail = 0; #endif if(wlan_cfgGetInt(pMac, WNI_CFG_ASSOC_STA_LIMIT, &val) != eSIR_SUCCESS) { limLog( pMac, LOGP, FL( "cfg get assoc sta limit failed" )); } pMac->lim.gLimAssocStaLimit = val; // Place holder for current authentication request // being handled pMac->lim.gpLimMlmAuthReq = NULL; //pMac->lim.gpLimMlmJoinReq = NULL; /// MAC level Pre-authentication related globals pMac->lim.gLimPreAuthChannelNumber = 0; pMac->lim.gLimPreAuthType = eSIR_OPEN_SYSTEM; palZeroMemory(pMac->hHdd, &pMac->lim.gLimPreAuthPeerAddr, sizeof(tSirMacAddr)); pMac->lim.gLimNumPreAuthContexts = 0; palZeroMemory(pMac->hHdd, &pMac->lim.gLimPreAuthTimerTable, sizeof(tLimPreAuthTable)); // Placed holder to deauth reason pMac->lim.gLimDeauthReasonCode = 0; // Place holder for Pre-authentication node list pMac->lim.pLimPreAuthList = NULL; // Send Disassociate frame threshold parameters pMac->lim.gLimDisassocFrameThreshold = LIM_SEND_DISASSOC_FRAME_THRESHOLD; pMac->lim.gLimDisassocFrameCredit = 0; //One cache for each overlap and associated case. palZeroMemory(pMac->hHdd, pMac->lim.protStaOverlapCache, sizeof(tCacheParams) * LIM_PROT_STA_OVERLAP_CACHE_SIZE); palZeroMemory(pMac->hHdd, pMac->lim.protStaCache, sizeof(tCacheParams) * LIM_PROT_STA_CACHE_SIZE); #if defined (WLAN_FEATURE_VOWIFI_11R) || defined (FEATURE_WLAN_CCX) || defined(FEATURE_WLAN_LFR) pMac->lim.pSessionEntry = NULL; pMac->lim.reAssocRetryAttempt = 0; #endif } static void __limInitTitanVars(tpAniSirGlobal pMac) { #if 0 palZeroMemory(pMac->hHdd, &pMac->lim.gLimChannelSwitch, sizeof(tLimChannelSwitchInfo)); pMac->lim.gLimChannelSwitch.state = eLIM_CHANNEL_SWITCH_IDLE; pMac->lim.gLimChannelSwitch.secondarySubBand = PHY_SINGLE_CHANNEL_CENTERED; #endif // Debug workaround for BEACON's // State change triggered by "dump 222" pMac->lim.gLimScanOverride = 1; pMac->lim.gLimScanOverrideSaved = eSIR_ACTIVE_SCAN; pMac->lim.gLimTitanStaCount = 0; pMac->lim.gLimBlockNonTitanSta = 0; } static void __limInitHTVars(tpAniSirGlobal pMac) { pMac->lim.htCapabilityPresentInBeacon = 0; pMac->lim.gHTGreenfield = 0; pMac->lim.gHTShortGI40Mhz = 0; pMac->lim.gHTShortGI20Mhz = 0; pMac->lim.gHTMaxAmsduLength = 0; pMac->lim.gHTDsssCckRate40MHzSupport = 0; pMac->lim.gHTPSMPSupport = 0; pMac->lim.gHTLsigTXOPProtection = 0; pMac->lim.gHTMIMOPSState = eSIR_HT_MIMO_PS_STATIC; pMac->lim.gHTAMpduDensity = 0; pMac->lim.gMaxAmsduSizeEnabled = false; pMac->lim.gHTMaxRxAMpduFactor = 0; pMac->lim.gHTServiceIntervalGranularity = 0; pMac->lim.gHTControlledAccessOnly = 0; pMac->lim.gHTOperMode = eSIR_HT_OP_MODE_PURE; pMac->lim.gHTPCOActive = 0; pMac->lim.gHTPCOPhase = 0; pMac->lim.gHTSecondaryBeacon = 0; pMac->lim.gHTDualCTSProtection = 0; pMac->lim.gHTSTBCBasicMCS = 0; pMac->lim.gAddBA_Declined = 0; // Flag to Decline the BAR if the particular bit (0-7) is being set } static tSirRetStatus __limInitConfig( tpAniSirGlobal pMac ) { tANI_U32 val1, val2, val3; tANI_U16 val16; tANI_U8 val8; tSirMacHTCapabilityInfo *pHTCapabilityInfo; tSirMacHTInfoField1 *pHTInfoField1; tpSirPowerSaveCfg pPowerSaveConfig; tSirMacHTParametersInfo *pAmpduParamInfo; /* Read all the CFGs here that were updated before peStart is called */ /* All these CFG READS/WRITES are only allowed in init, at start when there is no session * and they will be used throughout when there is no session */ if(wlan_cfgGetInt(pMac, WNI_CFG_HT_CAP_INFO, &val1) != eSIR_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("could not retrieve HT Cap CFG"));) return eSIR_FAILURE; } if(wlan_cfgGetInt(pMac, WNI_CFG_CHANNEL_BONDING_MODE, &val2) != eSIR_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("could not retrieve Channel Bonding CFG"));) return eSIR_FAILURE; } val16 = ( tANI_U16 ) val1; pHTCapabilityInfo = ( tSirMacHTCapabilityInfo* ) &val16; //channel bonding mode could be set to anything from 0 to 4(Titan had these // modes But for Taurus we have only two modes: enable(>0) or disable(=0) pHTCapabilityInfo->supportedChannelWidthSet = val2 ? WNI_CFG_CHANNEL_BONDING_MODE_ENABLE : WNI_CFG_CHANNEL_BONDING_MODE_DISABLE; if(cfgSetInt(pMac, WNI_CFG_HT_CAP_INFO, *(tANI_U16*)pHTCapabilityInfo) != eSIR_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("could not update HT Cap Info CFG"));) return eSIR_FAILURE; } if(wlan_cfgGetInt(pMac, WNI_CFG_HT_INFO_FIELD1, &val1) != eSIR_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("could not retrieve HT INFO Field1 CFG"));) return eSIR_FAILURE; } val8 = ( tANI_U8 ) val1; pHTInfoField1 = ( tSirMacHTInfoField1* ) &val8; pHTInfoField1->recommendedTxWidthSet = (tANI_U8)pHTCapabilityInfo->supportedChannelWidthSet; if(cfgSetInt(pMac, WNI_CFG_HT_INFO_FIELD1, *(tANI_U8*)pHTInfoField1) != eSIR_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("could not update HT Info Field"));) return eSIR_FAILURE; } /* WNI_CFG_HEART_BEAT_THRESHOLD */ if( wlan_cfgGetInt(pMac, WNI_CFG_HEART_BEAT_THRESHOLD, &val1) != eSIR_SUCCESS ) { PELOGE(limLog(pMac, LOGE, FL("could not retrieve WNI_CFG_HEART_BEAT_THRESHOLD CFG"));) return eSIR_FAILURE; } if(!val1) { limDeactivateAndChangeTimer(pMac, eLIM_HEART_BEAT_TIMER); pMac->sys.gSysEnableLinkMonitorMode = 0; } else { //No need to activate the timer during init time. pMac->sys.gSysEnableLinkMonitorMode = 1; } /* WNI_CFG_SHORT_GI_20MHZ */ if (wlan_cfgGetInt(pMac, WNI_CFG_HT_CAP_INFO, &val1) != eSIR_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("could not retrieve HT Cap CFG"));) return eSIR_FAILURE; } if (wlan_cfgGetInt(pMac, WNI_CFG_SHORT_GI_20MHZ, &val2) != eSIR_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("could not retrieve shortGI 20Mhz CFG"));) return eSIR_FAILURE; } if (wlan_cfgGetInt(pMac, WNI_CFG_SHORT_GI_40MHZ, &val3) != eSIR_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("could not retrieve shortGI 40Mhz CFG"));) return eSIR_FAILURE; } val16 = ( tANI_U16 ) val1; pHTCapabilityInfo = ( tSirMacHTCapabilityInfo* ) &val16; pHTCapabilityInfo->shortGI20MHz = (tANI_U16)val2; pHTCapabilityInfo->shortGI40MHz = (tANI_U16)val3; if(cfgSetInt(pMac, WNI_CFG_HT_CAP_INFO, *(tANI_U16*)pHTCapabilityInfo) != eSIR_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("could not update HT Cap Info CFG"));) return eSIR_FAILURE; } /* WNI_CFG_MAX_RX_AMPDU_FACTOR */ if (wlan_cfgGetInt(pMac, WNI_CFG_HT_AMPDU_PARAMS, &val1) != eSIR_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("could not retrieve HT AMPDU Param CFG"));) return eSIR_FAILURE; } if (wlan_cfgGetInt(pMac, WNI_CFG_MAX_RX_AMPDU_FACTOR, &val2) != eSIR_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("could not retrieve AMPDU Factor CFG"));) return eSIR_FAILURE; } val16 = ( tANI_U16 ) val1; pAmpduParamInfo = ( tSirMacHTParametersInfo* ) &val16; pAmpduParamInfo->maxRxAMPDUFactor = (tANI_U8)val2; if(cfgSetInt(pMac, WNI_CFG_HT_AMPDU_PARAMS, *(tANI_U8*)pAmpduParamInfo) != eSIR_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("could not update HT AMPDU Param CFG"));) return eSIR_FAILURE; } /* WNI_CFG_SHORT_PREAMBLE - this one is not updated in limHandleCFGparamUpdate do we want to update this? */ if(wlan_cfgGetInt(pMac, WNI_CFG_SHORT_PREAMBLE, &val1) != eSIR_SUCCESS) { limLog(pMac, LOGP, FL("cfg get short preamble failed")); return eSIR_FAILURE; } /* WNI_CFG_MAX_PS_POLL */ /* Allocate and fill in power save configuration. */ if (palAllocateMemory(pMac->hHdd, (void **)&pPowerSaveConfig, sizeof(tSirPowerSaveCfg)) != eHAL_STATUS_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("LIM: Cannot allocate memory for power save " "configuration"));) return eSIR_FAILURE; } /* This context should be valid if power-save configuration message has been * already dispatched during initialization process. Re-using the present * configuration mask */ palCopyMemory(pMac->hHdd, pPowerSaveConfig, (tANI_U8 *)&pMac->pmm.gPmmCfg, sizeof(tSirPowerSaveCfg)); /* Note: it is okay to do this since DAL/HAL is alrady started */ if ( (pmmSendPowerSaveCfg(pMac, pPowerSaveConfig)) != eSIR_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("LIM: pmmSendPowerSaveCfg() failed "));) return eSIR_FAILURE; } /* WNI_CFG_BG_SCAN_CHANNEL_LIST_CHANNEL_LIST */ PELOG1(limLog(pMac, LOG1, FL("VALID_CHANNEL_LIST has changed, reset next bg scan channel"));) pMac->lim.gLimBackgroundScanChannelId = 0; /* WNI_CFG_PROBE_RSP_BCN_ADDNIE_DATA - not needed */ /* This was initially done after resume notification from HAL. Now, DAL is started before PE so this can be done here */ handleHTCapabilityandHTInfo(pMac, NULL); if(wlan_cfgGetInt(pMac, WNI_CFG_DISABLE_LDPC_WITH_TXBF_AP,(tANI_U32 *) &pMac->lim.disableLDPCWithTxbfAP) != eSIR_SUCCESS) { limLog(pMac, LOGP, FL("cfg get disableLDPCWithTxbfAP failed")); return eSIR_FAILURE; } return eSIR_SUCCESS; } /* limStart This function is to replace the __limProcessSmeStartReq since there is no eWNI_SME_START_REQ post to PE. */ tSirRetStatus limStart(tpAniSirGlobal pMac) { tSirResultCodes retCode = eSIR_SUCCESS; PELOG1(limLog(pMac, LOG1, FL(" enter"));) if (pMac->lim.gLimSmeState == eLIM_SME_OFFLINE_STATE) { pMac->lim.gLimSmeState = eLIM_SME_IDLE_STATE; MTRACE(macTrace(pMac, TRACE_CODE_SME_STATE, NO_SESSION, pMac->lim.gLimSmeState)); // By default do not return after first scan match pMac->lim.gLimReturnAfterFirstMatch = 0; // Initialize MLM state machine limInitMlm(pMac); // By default return unique scan results pMac->lim.gLimReturnUniqueResults = true; pMac->lim.gLimSmeScanResultLength = 0; #ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD pMac->lim.gLimSmeLfrScanResultLength = 0; #endif } else { /** * Should not have received eWNI_SME_START_REQ in states * other than OFFLINE. Return response to host and * log error */ limLog(pMac, LOGE, FL("Invalid SME state %X"),pMac->lim.gLimSmeState ); retCode = eSIR_FAILURE; } return retCode; } /** * limInitialize() * *FUNCTION: * This function is called from LIM thread entry function. * LIM related global data structures are initialized in this function. * *LOGIC: * NA * *ASSUMPTIONS: * NA * *NOTE: * NA * * @param pMac - Pointer to global MAC structure * @return None */ tSirRetStatus limInitialize(tpAniSirGlobal pMac) { tSirRetStatus status = eSIR_SUCCESS; __limInitAssocVars(pMac); __limInitVars(pMac); __limInitStates(pMac); __limInitStatsVars(pMac); __limInitBssVars(pMac); __limInitScanVars(pMac); __limInitHTVars(pMac); __limInitTitanVars(pMac); status = limStart(pMac); if(eSIR_SUCCESS != status) { return status; } /* * MLM will be intitalized when 'START' request comes from SME. * limInitMlm calls limCreateTimers, which actually relies on * CFG to be downloaded. So it should not be called as part of * peStart, as CFG download is happening after peStart. */ //limInitMlm(pMac); // Initializations for maintaining peers in IBSS limIbssInit(pMac); pmmInitialize(pMac); #if defined WLAN_FEATURE_VOWIFI rrmInitialize(pMac); #endif #if defined WLAN_FEATURE_VOWIFI_11R limFTOpen(pMac); #endif vos_list_init(&pMac->lim.gLimMgmtFrameRegistratinQueue); #if 0 vos_trace_setLevel(VOS_MODULE_ID_PE, VOS_TRACE_LEVEL_ERROR); vos_trace_setLevel(VOS_MODULE_ID_PE, VOS_TRACE_LEVEL_WARN); vos_trace_setLevel(VOS_MODULE_ID_PE, VOS_TRACE_LEVEL_FATAL); vos_trace_setLevel(VOS_MODULE_ID_HAL, VOS_TRACE_LEVEL_WARN); vos_trace_setLevel(VOS_MODULE_ID_HAL, VOS_TRACE_LEVEL_ERROR); vos_trace_setLevel(VOS_MODULE_ID_SYS, VOS_TRACE_LEVEL_WARN); vos_trace_setLevel(VOS_MODULE_ID_SYS, VOS_TRACE_LEVEL_ERROR); vos_trace_setLevel(VOS_MODULE_ID_TL, VOS_TRACE_LEVEL_ERROR); vos_trace_setLevel(VOS_MODULE_ID_SAL, VOS_TRACE_LEVEL_ERROR); vos_trace_setLevel(VOS_MODULE_ID_SSC, VOS_TRACE_LEVEL_ERROR); vos_trace_setLevel(VOS_MODULE_ID_SAL, VOS_TRACE_LEVEL_ERROR); vos_trace_setLevel(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR); vos_trace_setLevel(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR); vos_trace_setLevel(VOS_MODULE_ID_BAL, VOS_TRACE_LEVEL_ERROR); vos_trace_setLevel(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR); #endif //Initialize the configurations needed by PE if( eSIR_FAILURE == __limInitConfig(pMac)) { //We need to undo everything in limStart limCleanupMlm(pMac); return eSIR_FAILURE; } //initialize the TSPEC admission control table. //Note that this was initially done after resume notification from HAL. //Now, DAL is started before PE so this can be done here limAdmitControlInit(pMac); limRegisterHalIndCallBack(pMac); return status; } /*** end limInitialize() ***/ /** * limCleanup() * *FUNCTION: * This function is called upon reset or persona change * to cleanup LIM state * *LOGIC: * NA * *ASSUMPTIONS: * NA * *NOTE: * NA * * @param pMac - Pointer to Global MAC structure * @return None */ void limCleanup(tpAniSirGlobal pMac) { v_PVOID_t pvosGCTx; VOS_STATUS retStatus; //Before destroying the list making sure all the nodes have been deleted. //Which should be the normal case, but a memory leak has been reported. tpLimMgmtFrameRegistration pLimMgmtRegistration = NULL; while(vos_list_remove_front(&pMac->lim.gLimMgmtFrameRegistratinQueue, (vos_list_node_t**)&pLimMgmtRegistration) == VOS_STATUS_SUCCESS) { VOS_TRACE(VOS_MODULE_ID_PE, VOS_TRACE_LEVEL_INFO, FL("Fixing leak! Deallocating pLimMgmtRegistration node")); palFreeMemory(pMac, pLimMgmtRegistration); } vos_list_destroy(&pMac->lim.gLimMgmtFrameRegistratinQueue); limCleanupMlm(pMac); limCleanupLmm(pMac); // free up preAuth table if (pMac->lim.gLimPreAuthTimerTable.pTable != NULL) { palFreeMemory(pMac->hHdd, pMac->lim.gLimPreAuthTimerTable.pTable); pMac->lim.gLimPreAuthTimerTable.pTable = NULL; pMac->lim.gLimPreAuthTimerTable.numEntry = 0; } if(NULL != pMac->lim.pDialogueTokenHead) { limDeleteDialogueTokenList(pMac); } if(NULL != pMac->lim.pDialogueTokenTail) { palFreeMemory(pMac->hHdd, (void *) pMac->lim.pDialogueTokenTail); pMac->lim.pDialogueTokenTail = NULL; } # if 0 if (pMac->lim.gpLimStartBssReq != NULL) { palFreeMemory(pMac->hHdd, pMac->lim.gpLimStartBssReq); pMac->lim.gpLimStartBssReq = NULL; } #endif if (pMac->lim.gpLimMlmSetKeysReq != NULL) { palFreeMemory(pMac->hHdd, pMac->lim.gpLimMlmSetKeysReq); pMac->lim.gpLimMlmSetKeysReq = NULL; } #if 0 if (pMac->lim.gpLimJoinReq != NULL) { palFreeMemory(pMac->hHdd, pMac->lim.gpLimJoinReq); pMac->lim.gpLimJoinReq = NULL; } #endif if (pMac->lim.gpLimMlmAuthReq != NULL) { palFreeMemory(pMac->hHdd, pMac->lim.gpLimMlmAuthReq); pMac->lim.gpLimMlmAuthReq = NULL; } #if 0 if (pMac->lim.gpLimMlmJoinReq != NULL) { palFreeMemory(pMac->hHdd, pMac->lim.gpLimMlmJoinReq); pMac->lim.gpLimMlmJoinReq = NULL; } #endif #if 0 if (pMac->lim.gpLimReassocReq != NULL) { palFreeMemory(pMac->hHdd, pMac->lim.gpLimReassocReq); pMac->lim.gpLimReassocReq = NULL; } #endif if (pMac->lim.gpLimMlmRemoveKeyReq != NULL) { palFreeMemory(pMac->hHdd, pMac->lim.gpLimMlmRemoveKeyReq); pMac->lim.gpLimMlmRemoveKeyReq = NULL; } if (pMac->lim.gpDefdSmeMsgForNOA != NULL) { palFreeMemory(pMac->hHdd, pMac->lim.gpDefdSmeMsgForNOA); pMac->lim.gpDefdSmeMsgForNOA = NULL; } if (pMac->lim.gpLimMlmScanReq != NULL) { palFreeMemory(pMac->hHdd, pMac->lim.gpLimMlmScanReq); pMac->lim.gpLimMlmScanReq = NULL; } #if 0 if(NULL != pMac->lim.beacon) { palFreeMemory(pMac->hHdd, (void*) pMac->lim.beacon); pMac->lim.beacon = NULL; } #endif #if 0 if(NULL != pMac->lim.assocReq) { palFreeMemory(pMac->hHdd, (void*) pMac->lim.assocReq); pMac->lim.assocReq= NULL; } #endif #if 0 if(NULL != pMac->lim.assocRsp) { palFreeMemory(pMac->hHdd, (void*) pMac->lim.assocRsp); pMac->lim.assocRsp= NULL; } #endif // Now, finally reset the deferred message queue pointers limResetDeferredMsgQ(pMac); pvosGCTx = vos_get_global_context(VOS_MODULE_ID_PE, (v_VOID_t *) pMac); retStatus = WLANTL_DeRegisterMgmtFrmClient(pvosGCTx); if ( retStatus != VOS_STATUS_SUCCESS ) PELOGE(limLog(pMac, LOGE, FL("DeRegistering the PE Handle with TL has failed bailing out..."));) #if defined WLAN_FEATURE_VOWIFI rrmCleanup(pMac); #endif #if defined WLAN_FEATURE_VOWIFI_11R limFTCleanup(pMac); #endif } /*** end limCleanup() ***/ /** ------------------------------------------------------------- \fn peOpen \brief will be called in Open sequence from macOpen \param tpAniSirGlobal pMac \param tHalOpenParameters *pHalOpenParam \return tSirRetStatus -------------------------------------------------------------*/ tSirRetStatus peOpen(tpAniSirGlobal pMac, tMacOpenParameters *pMacOpenParam) { pMac->lim.maxBssId = pMacOpenParam->maxBssId; pMac->lim.maxStation = pMacOpenParam->maxStation; if ((pMac->lim.maxBssId == 0) || (pMac->lim.maxStation == 0)) { PELOGE(limLog(pMac, LOGE, FL("max number of Bssid or Stations cannot be zero!"));) return eSIR_FAILURE; } if (eHAL_STATUS_SUCCESS != palAllocateMemory(pMac->hHdd, (void **) &pMac->lim.limTimers.gpLimCnfWaitTimer, sizeof(TX_TIMER)*pMac->lim.maxStation)) { PELOGE(limLog(pMac, LOGE, FL("memory allocate failed!"));) return eSIR_FAILURE; } #if 0 if (eHAL_STATUS_SUCCESS != palAllocateMemory(pMac->hHdd, (void **) &pMac->lim.gpLimAIDpool, sizeof(*pMac->lim.gpLimAIDpool) * (WNI_CFG_ASSOC_STA_LIMIT_STAMAX+1))) { PELOGE(limLog(pMac, LOGE, FL("memory allocate failed!"));) return eSIR_FAILURE; } #endif if (eHAL_STATUS_SUCCESS != palAllocateMemory(pMac->hHdd, (void **) &pMac->lim.gpSession, sizeof(tPESession)* pMac->lim.maxBssId)) { limLog(pMac, LOGE, FL("memory allocate failed!")); return eSIR_FAILURE; } palZeroMemory(pMac->hHdd, pMac->lim.gpSession, sizeof(tPESession)*pMac->lim.maxBssId); /* if (eHAL_STATUS_SUCCESS != palAllocateMemory(pMac->hHdd, (void **) &pMac->dph.dphHashTable.pHashTable, sizeof(tpDphHashNode)*pMac->lim.maxStation)) { PELOGE(limLog(pMac, LOGE, FL("memory allocate failed!"));) return eSIR_FAILURE; } if (eHAL_STATUS_SUCCESS != palAllocateMemory(pMac->hHdd, (void **) &pMac->dph.dphHashTable.pDphNodeArray, sizeof(tDphHashNode)*pMac->lim.maxStation)) { PELOGE(limLog(pMac, LOGE, FL("memory allocate failed!"));) return eSIR_FAILURE; } */ if (eHAL_STATUS_SUCCESS != palAllocateMemory(pMac->hHdd, (void **) &pMac->pmm.gPmmTim.pTim, sizeof(tANI_U8)*pMac->lim.maxStation)) { PELOGE(limLog(pMac, LOGE, FL("memory allocate failed for pTim!"));) return eSIR_FAILURE; } palZeroMemory(pMac->hHdd, pMac->pmm.gPmmTim.pTim, sizeof(tANI_U8)*pMac->lim.maxStation); pMac->lim.mgmtFrameSessionId = 0xff; pMac->lim.deferredMsgCnt = 0; if( !VOS_IS_STATUS_SUCCESS( vos_lock_init( &pMac->lim.lkPeGlobalLock ) ) ) { PELOGE(limLog(pMac, LOGE, FL("pe lock init failed!"));) return eSIR_FAILURE; } /* * peOpen is successful by now, so it is right time to initialize * MTRACE for PE module. if LIM_TRACE_RECORD is not defined in build file * then nothing will be logged for PE module. */ #ifdef LIM_TRACE_RECORD MTRACE(limTraceInit(pMac)); #endif return eSIR_SUCCESS; } /** ------------------------------------------------------------- \fn peClose \brief will be called in close sequence from macClose \param tpAniSirGlobal pMac \return tSirRetStatus -------------------------------------------------------------*/ tSirRetStatus peClose(tpAniSirGlobal pMac) { tANI_U8 i; if (ANI_DRIVER_TYPE(pMac) == eDRIVER_TYPE_MFG) return eSIR_SUCCESS; for(i =0; i < pMac->lim.maxBssId; i++) { if(pMac->lim.gpSession[i].valid == TRUE) { peDeleteSession(pMac,&pMac->lim.gpSession[i]); } } palFreeMemory(pMac->hHdd, pMac->lim.limTimers.gpLimCnfWaitTimer); pMac->lim.limTimers.gpLimCnfWaitTimer = NULL; #if 0 palFreeMemory(pMac->hHdd, pMac->lim.gpLimAIDpool); pMac->lim.gpLimAIDpool = NULL; #endif palFreeMemory(pMac->hHdd, pMac->lim.gpSession); pMac->lim.gpSession = NULL; /* palFreeMemory(pMac->hHdd, pMac->dph.dphHashTable.pHashTable); pMac->dph.dphHashTable.pHashTable = NULL; palFreeMemory(pMac->hHdd, pMac->dph.dphHashTable.pDphNodeArray); pMac->dph.dphHashTable.pDphNodeArray = NULL; */ palFreeMemory(pMac->hHdd, pMac->pmm.gPmmTim.pTim); pMac->pmm.gPmmTim.pTim = NULL; if( !VOS_IS_STATUS_SUCCESS( vos_lock_destroy( &pMac->lim.lkPeGlobalLock ) ) ) { return eSIR_FAILURE; } return eSIR_SUCCESS; } /** ------------------------------------------------------------- \fn peStart \brief will be called in start sequence from macStart \param tpAniSirGlobal pMac \return none -------------------------------------------------------------*/ tSirRetStatus peStart(tpAniSirGlobal pMac) { tSirRetStatus status = eSIR_SUCCESS; status = limInitialize(pMac); #if defined(ANI_LOGDUMP) limDumpInit(pMac); #endif //#if defined(ANI_LOGDUMP) return status; } /** ------------------------------------------------------------- \fn peStop \brief will be called in stop sequence from macStop \param tpAniSirGlobal pMac \return none -------------------------------------------------------------*/ void peStop(tpAniSirGlobal pMac) { limCleanup(pMac); SET_LIM_MLM_STATE(pMac, eLIM_MLM_OFFLINE_STATE); return; } /** ------------------------------------------------------------- \fn peFreeMsg \brief Called by VOS scheduler (function vos_sched_flush_mc_mqs) \ to free a given PE message on the TX and MC thread. \ This happens when there are messages pending in the PE \ queue when system is being stopped and reset. \param tpAniSirGlobal pMac \param tSirMsgQ pMsg \return none -----------------------------------------------------------------*/ v_VOID_t peFreeMsg( tpAniSirGlobal pMac, tSirMsgQ* pMsg) { if (pMsg != NULL) { if (NULL != pMsg->bodyptr) { if (SIR_BB_XPORT_MGMT_MSG == pMsg->type) { vos_pkt_return_packet((vos_pkt_t *)pMsg->bodyptr); } else { vos_mem_free((v_VOID_t*)pMsg->bodyptr); } } pMsg->bodyptr = 0; pMsg->bodyval = 0; pMsg->type = 0; } return; } /** * The function checks if a particular timer should be allowed * into LIM while device is sleeping */ tANI_U8 limIsTimerAllowedInPowerSaveState(tpAniSirGlobal pMac, tSirMsgQ *pMsg) { tANI_U8 retStatus = TRUE; if(!limIsSystemInActiveState(pMac)) { switch(pMsg->type) { /* Don't allow following timer messages if in sleep */ case SIR_LIM_MIN_CHANNEL_TIMEOUT: case SIR_LIM_MAX_CHANNEL_TIMEOUT: case SIR_LIM_PERIODIC_PROBE_REQ_TIMEOUT: retStatus = FALSE; break; /* May allow following timer messages in sleep mode */ case SIR_LIM_HASH_MISS_THRES_TIMEOUT: /* Safe to allow as of today, this triggers background scan * which will not be started if the device is in power-save mode * might need to block in the future if we decide to implement * spectrum management */ case SIR_LIM_QUIET_TIMEOUT: /* Safe to allow as of today, this triggers background scan * which will not be started if the device is in power-save mode * might need to block in the future if we decide to implement * spectrum management */ case SIR_LIM_QUIET_BSS_TIMEOUT: /* Safe to allow this timermessage, triggers background scan * which is blocked in sleep mode */ case SIR_LIM_CHANNEL_SCAN_TIMEOUT: /* Safe to allow this timer, since, while in IMPS this timer will not * be started. In case of BMPS sleep, SoftMAC handles the heart-beat * when heart-beat control is handled back to PE, device would have * already woken-up due to EXIT_BMPS_IND mesage from SoftMAC */ case SIR_LIM_HEART_BEAT_TIMEOUT: case SIR_LIM_PROBE_HB_FAILURE_TIMEOUT: /* Safe to allow, PE is not handling this message as of now. May need * to block it, basically, free the buffer and restart the timer */ case SIR_LIM_REASSOC_FAIL_TIMEOUT: case SIR_LIM_JOIN_FAIL_TIMEOUT: case SIR_LIM_PERIODIC_JOIN_PROBE_REQ_TIMEOUT: case SIR_LIM_ASSOC_FAIL_TIMEOUT: case SIR_LIM_AUTH_FAIL_TIMEOUT: case SIR_LIM_ADDTS_RSP_TIMEOUT: retStatus = TRUE; break; /* by default allow rest of messages */ default: retStatus = TRUE; break; } } return retStatus; } /** * limPostMsgApi() * *FUNCTION: * This function is called from other thread while posting a * message to LIM message Queue gSirLimMsgQ. * *LOGIC: * NA * *ASSUMPTIONS: * NA * *NOTE: * NA * * @param pMac - Pointer to Global MAC structure * @param pMsg - Pointer to the message structure * @return None */ tANI_U32 limPostMsgApi(tpAniSirGlobal pMac, tSirMsgQ *pMsg) { return vos_mq_post_message(VOS_MQ_ID_PE, (vos_msg_t *) pMsg); } /*** end limPostMsgApi() ***/ /*-------------------------------------------------------------------------- \brief pePostMsgApi() - A wrapper function to post message to Voss msg queues This function can be called by legacy code to post message to voss queues OR legacy code may keep on invoking 'limPostMsgApi' to post the message to voss queue for dispatching it later. \param pMac - Pointer to Global MAC structure \param pMsg - Pointer to the message structure \return tANI_U32 - TX_SUCCESS for success. --------------------------------------------------------------------------*/ tSirRetStatus pePostMsgApi(tpAniSirGlobal pMac, tSirMsgQ *pMsg) { return (tSirRetStatus)limPostMsgApi(pMac, pMsg); } /*-------------------------------------------------------------------------- \brief peProcessMessages() - Message Processor for PE Voss calls this function to dispatch the message to PE \param pMac - Pointer to Global MAC structure \param pMsg - Pointer to the message structure \return tANI_U32 - TX_SUCCESS for success. --------------------------------------------------------------------------*/ tSirRetStatus peProcessMessages(tpAniSirGlobal pMac, tSirMsgQ* pMsg) { if(pMac->gDriverType == eDRIVER_TYPE_MFG) { return eSIR_SUCCESS; } /** * If the Message to be handled is for CFG Module call the CFG Msg Handler and * for all the other cases post it to LIM */ if ( SIR_CFG_PARAM_UPDATE_IND != pMsg->type && IS_CFG_MSG(pMsg->type)) cfgProcessMbMsg(pMac, (tSirMbMsg*)pMsg->bodyptr); else limMessageProcessor(pMac, pMsg); return eSIR_SUCCESS; } // --------------------------------------------------------------------------- /** * peHandleMgmtFrame * * FUNCTION: * Process the Management frames from TL * * LOGIC: * * ASSUMPTIONS: TL sends the packet along with the VOS GlobalContext * * NOTE: * * @param pvosGCtx Global Vos Context * @param vossBuff Packet * @return None */ VOS_STATUS peHandleMgmtFrame( v_PVOID_t pvosGCtx, v_PVOID_t vosBuff) { tpAniSirGlobal pMac; tpSirMacMgmtHdr mHdr; tSirMsgQ msg; vos_pkt_t *pVosPkt; VOS_STATUS vosStatus; v_U8_t *pRxPacketInfo; pVosPkt = (vos_pkt_t *)vosBuff; if (NULL == pVosPkt) { return VOS_STATUS_E_FAILURE; } pMac = (tpAniSirGlobal)vos_get_context(VOS_MODULE_ID_PE, pvosGCtx); if (NULL == pMac) { // cannot log a failure without a valid pMac vos_pkt_return_packet(pVosPkt); return VOS_STATUS_E_FAILURE; } vosStatus = WDA_DS_PeekRxPacketInfo( pVosPkt, (void *)&pRxPacketInfo, VOS_FALSE ); if(!VOS_IS_STATUS_SUCCESS(vosStatus)) { vos_pkt_return_packet(pVosPkt); return VOS_STATUS_E_FAILURE; } // // The MPDU header is now present at a certain "offset" in // the BD and is specified in the BD itself // mHdr = WDA_GET_RX_MAC_HEADER(pRxPacketInfo); if(mHdr->fc.type == SIR_MAC_MGMT_FRAME) { PELOG1(limLog( pMac, LOG1, FL ( "RxBd=%p mHdr=%p Type: %d Subtype: %d Sizes:FC%d Mgmt%d"), pRxPacketInfo, mHdr, mHdr->fc.type, mHdr->fc.subType, sizeof(tSirMacFrameCtl), sizeof(tSirMacMgmtHdr) );) MTRACE(macTrace(pMac, TRACE_CODE_RX_MGMT, NO_SESSION, LIM_TRACE_MAKE_RXMGMT(mHdr->fc.subType, (tANI_U16) (((tANI_U16) (mHdr->seqControl.seqNumHi << 4)) | mHdr->seqControl.seqNumLo)));) } // Forward to MAC via mesg = SIR_BB_XPORT_MGMT_MSG msg.type = SIR_BB_XPORT_MGMT_MSG; msg.bodyptr = vosBuff; msg.bodyval = 0; if( eSIR_SUCCESS != sysBbtProcessMessageCore( pMac, &msg, mHdr->fc.type, mHdr->fc.subType )) { vos_pkt_return_packet(pVosPkt); limLog( pMac, LOGW, FL ( "sysBbtProcessMessageCore failed to process SIR_BB_XPORT_MGMT_MSG" )); return VOS_STATUS_E_FAILURE; } return VOS_STATUS_SUCCESS; } // --------------------------------------------------------------------------- /** * peRegisterTLHandle * * FUNCTION: * Registers the Handler which, process the Management frames from TL * * LOGIC: * * ASSUMPTIONS: * * NOTE: * * @return None */ void peRegisterTLHandle(tpAniSirGlobal pMac) { v_PVOID_t pvosGCTx; VOS_STATUS retStatus; pvosGCTx = vos_get_global_context(VOS_MODULE_ID_PE, (v_VOID_t *) pMac); retStatus = WLANTL_RegisterMgmtFrmClient(pvosGCTx, peHandleMgmtFrame); if (retStatus != VOS_STATUS_SUCCESS) limLog( pMac, LOGP, FL("Registering the PE Handle with TL has failed bailing out...")); } /** * limIsSystemInScanState() * *FUNCTION: * This function is called by various MAC software modules to * determine if System is in Scan/Learn state * *LOGIC: * NA * *ASSUMPTIONS: * NA * *NOTE: * * @param pMac - Pointer to Global MAC structure * @return true - System is in Scan/Learn state * false - System is NOT in Scan/Learn state */ tANI_U8 limIsSystemInScanState(tpAniSirGlobal pMac) { switch (pMac->lim.gLimSmeState) { case eLIM_SME_CHANNEL_SCAN_STATE: case eLIM_SME_NORMAL_CHANNEL_SCAN_STATE: case eLIM_SME_LINK_EST_WT_SCAN_STATE: case eLIM_SME_WT_SCAN_STATE: // System is in Learn mode return true; default: // System is NOT in Learn mode return false; } } /*** end limIsSystemInScanState() ***/ /** * limIsSystemInActiveState() * *FUNCTION: * This function is called by various MAC software modules to * determine if System is in Active/Wakeup state * *LOGIC: * NA * *ASSUMPTIONS: * NA * *NOTE: * * @param pMac - Pointer to Global MAC structure * @return true - System is in Active state * false - System is not in Active state */ tANI_U8 limIsSystemInActiveState(tpAniSirGlobal pMac) { switch (pMac->pmm.gPmmState) { case ePMM_STATE_BMPS_WAKEUP: case ePMM_STATE_IMPS_WAKEUP: case ePMM_STATE_READY: // System is in Active mode return true; default: return false; // System is NOT in Active mode } } /** *\brief limReceivedHBHandler() * * This function is called by schBeaconProcess() upon * receiving a Beacon on STA. This also gets called upon * receiving Probe Response after heat beat failure is * detected. * * param pMac - global mac structure * param channel - channel number indicated in Beacon, Probe Response * return - none */ void limReceivedHBHandler(tpAniSirGlobal pMac, tANI_U8 channelId, tpPESession psessionEntry) { if((channelId == 0 ) || (channelId == psessionEntry->currentOperChannel) ) psessionEntry->LimRxedBeaconCntDuringHB++; pMac->pmm.inMissedBeaconScenario = FALSE; } /*** end limReceivedHBHandler() ***/ #if 0 void limResetHBPktCount(tpPESession psessionEntry) { psessionEntry->LimRxedBeaconCntDuringHB = 0; } #endif /* * limProcessWdsInfo() * *FUNCTION: * This function is called from schBeaconProcess in BP * *PARAMS: * @param pMac - Pointer to Global MAC structure * @param propIEInfo - proprietary IE info * *LOGIC: * *ASSUMPTIONS: * NA * *NOTE: * * *RETURNS: * */ void limProcessWdsInfo(tpAniSirGlobal pMac, tSirPropIEStruct propIEInfo) { } /** * limInitWdsInfoParams() * *FUNCTION: * This function is called while processing * START_BSS/JOIN/REASSOC_REQ to initialize WDS info * ind/set related parameters. * *LOGIC: * *ASSUMPTIONS: * *NOTE: * * @param pMac Pointer to Global MAC structure * @return None */ void limInitWdsInfoParams(tpAniSirGlobal pMac) { pMac->lim.gLimWdsInfo.wdsLength = 0; pMac->lim.gLimNumWdsInfoInd = 0; pMac->lim.gLimNumWdsInfoSet = 0; } /*** limInitWdsInfoParams() ***/ /** ------------------------------------------------------------- \fn limUpdateOverlapStaParam \brief Updates overlap cache and param data structure \param tpAniSirGlobal pMac \param tSirMacAddr bssId \param tpLimProtStaParams pStaParams \return None -------------------------------------------------------------*/ void limUpdateOverlapStaParam(tpAniSirGlobal pMac, tSirMacAddr bssId, tpLimProtStaParams pStaParams) { int i; if (!pStaParams->numSta) { palCopyMemory( pMac->hHdd, pMac->lim.protStaOverlapCache[0].addr, bssId, sizeof(tSirMacAddr)); pMac->lim.protStaOverlapCache[0].active = true; pStaParams->numSta = 1; return; } for (i=0; i<LIM_PROT_STA_OVERLAP_CACHE_SIZE; i++) { if (pMac->lim.protStaOverlapCache[i].active) { if (palEqualMemory( pMac->hHdd,pMac->lim.protStaOverlapCache[i].addr, bssId, sizeof(tSirMacAddr))) { return; } } else break; } if (i == LIM_PROT_STA_OVERLAP_CACHE_SIZE) { PELOG1(limLog(pMac, LOG1, FL("Overlap cache is full"));) } else { palCopyMemory( pMac->hHdd, pMac->lim.protStaOverlapCache[i].addr, bssId, sizeof(tSirMacAddr)); pMac->lim.protStaOverlapCache[i].active = true; pStaParams->numSta++; } } /** * limHandleIBSScoalescing() * *FUNCTION: * This function is called upon receiving Beacon/Probe Response * while operating in IBSS mode. * *LOGIC: * *ASSUMPTIONS: * *NOTE: * * @param pMac - Pointer to Global MAC structure * @param pBeacon - Parsed Beacon Frame structure * @param pRxPacketInfo - Pointer to RX packet info structure * * @return Status whether to process or ignore received Beacon Frame */ tSirRetStatus limHandleIBSScoalescing( tpAniSirGlobal pMac, tpSchBeaconStruct pBeacon, tANI_U8 *pRxPacketInfo,tpPESession psessionEntry) { tpSirMacMgmtHdr pHdr; tSirRetStatus retCode; pHdr = WDA_GET_RX_MAC_HEADER(pRxPacketInfo); if ( (!pBeacon->capabilityInfo.ibss) || (limCmpSSid(pMac, &pBeacon->ssId,psessionEntry) != true) ) /* Received SSID does not match => Ignore received Beacon frame. */ retCode = eSIR_LIM_IGNORE_BEACON; else { tANI_U32 ieLen; tANI_U16 tsfLater; tANI_U8 *pIEs; ieLen = WDA_GET_RX_PAYLOAD_LEN(pRxPacketInfo); tsfLater = WDA_GET_RX_TSF_LATER(pRxPacketInfo); pIEs = WDA_GET_RX_MPDU_DATA(pRxPacketInfo); PELOG3(limLog(pMac, LOG3, FL("BEFORE Coalescing tsfLater val :%d"), tsfLater);) retCode = limIbssCoalesce(pMac, pHdr, pBeacon, pIEs, ieLen, tsfLater,psessionEntry); } return retCode; } /*** end limHandleIBSScoalescing() ***/ /** * limDetectChangeInApCapabilities() * *FUNCTION: * This function is called while SCH is processing * received Beacon from AP on STA to detect any * change in AP's capabilities. If there any change * is detected, Roaming is informed of such change * so that it can trigger reassociation. * *LOGIC: * *ASSUMPTIONS: * *NOTE: * Notification is enabled for STA product only since * it is not a requirement on BP side. * * @param pMac Pointer to Global MAC structure * @param pBeacon Pointer to parsed Beacon structure * @return None */ void limDetectChangeInApCapabilities(tpAniSirGlobal pMac, tpSirProbeRespBeacon pBeacon, tpPESession psessionEntry) { tANI_U8 len; tSirSmeApNewCaps apNewCaps; tANI_U8 newChannel; tSirRetStatus status = eSIR_SUCCESS; apNewCaps.capabilityInfo = limGetU16((tANI_U8 *) &pBeacon->capabilityInfo); newChannel = (tANI_U8) pBeacon->channelNumber; if ( ( false == psessionEntry->limSentCapsChangeNtf ) && ( ( ( limIsNullSsid(&pBeacon->ssId) ) || ( ( !limIsNullSsid(&pBeacon->ssId) ) && ( false == limCmpSSid(pMac, &pBeacon->ssId, psessionEntry) ) ) ) || ( (SIR_MAC_GET_ESS(apNewCaps.capabilityInfo) != SIR_MAC_GET_ESS(psessionEntry->limCurrentBssCaps) ) || ( SIR_MAC_GET_PRIVACY(apNewCaps.capabilityInfo) != SIR_MAC_GET_PRIVACY(psessionEntry->limCurrentBssCaps) ) || ( SIR_MAC_GET_SHORT_PREAMBLE(apNewCaps.capabilityInfo) != SIR_MAC_GET_SHORT_PREAMBLE(psessionEntry->limCurrentBssCaps) ) || ( SIR_MAC_GET_QOS(apNewCaps.capabilityInfo) != SIR_MAC_GET_QOS(psessionEntry->limCurrentBssCaps) ) || ( newChannel != psessionEntry->currentOperChannel ) ) ) ) { if( false == psessionEntry->fWaitForProbeRsp ) { /* If Beacon capabilities is not matching with the current capability, * then send unicast probe request to AP and take decision after * receiving probe response */ if ( true == psessionEntry->fIgnoreCapsChange ) { limLog(pMac, LOGW, FL("Ignoring the Capability change as it is false alarm")); return; } psessionEntry->fWaitForProbeRsp = true; limLog(pMac, LOGW, FL("AP capabilities are not matching," "sending directed probe request.. ")); status = limSendProbeReqMgmtFrame(pMac, &psessionEntry->ssId, psessionEntry->bssId, psessionEntry->currentOperChannel,psessionEntry->selfMacAddr, psessionEntry->dot11mode, 0, NULL); if ( eSIR_SUCCESS != status ) { limLog(pMac, LOGE, FL("send ProbeReq failed")); psessionEntry->fWaitForProbeRsp = false; } return; } /** * BSS capabilities have changed. * Inform Roaming. */ len = sizeof(tSirMacCapabilityInfo) + sizeof(tSirMacAddr) + sizeof(tANI_U8) + 3 * sizeof(tANI_U8) + // reserved fields pBeacon->ssId.length + 1; palCopyMemory( pMac->hHdd, apNewCaps.bssId, psessionEntry->bssId, sizeof(tSirMacAddr)); if (newChannel != psessionEntry->currentOperChannel) { PELOGE(limLog(pMac, LOGE, FL("Channel Change from %d --> %d - " "Ignoring beacon!"), psessionEntry->currentOperChannel, newChannel);) return; } /** * When Cisco 1262 Enterprise APs are configured with WPA2-PSK with * AES+TKIP Pairwise ciphers and WEP-40 Group cipher, they do not set * the privacy bit in Beacons (wpa/rsnie is still present in beacons), * the privacy bit is set in Probe and association responses. * Due to this anomaly, we detect a change in * AP capabilities when we receive a beacon after association and * disconnect from the AP. The following check makes sure that we can * connect to such APs */ else if ((SIR_MAC_GET_PRIVACY(apNewCaps.capabilityInfo) == 0) && (pBeacon->rsnPresent || pBeacon->wpaPresent)) { PELOGE(limLog(pMac, LOGE, FL("BSS Caps (Privacy) bit 0 in beacon," " but WPA or RSN IE present, Ignore Beacon!"));) return; } else apNewCaps.channelId = psessionEntry->currentOperChannel; palCopyMemory( pMac->hHdd, (tANI_U8 *) &apNewCaps.ssId, (tANI_U8 *) &pBeacon->ssId, pBeacon->ssId.length + 1); psessionEntry->fIgnoreCapsChange = false; psessionEntry->fWaitForProbeRsp = false; psessionEntry->limSentCapsChangeNtf = true; limSendSmeWmStatusChangeNtf(pMac, eSIR_SME_AP_CAPS_CHANGED, (tANI_U32 *) &apNewCaps, len, psessionEntry->smeSessionId); } else if ( true == psessionEntry->fWaitForProbeRsp ) { /* Only for probe response frames and matching capabilities the control * will come here. If beacon is with broadcast ssid then fWaitForProbeRsp * will be false, the control will not come here*/ limLog(pMac, LOG1, FL("capabilities in probe response are" "matching with the current setting," "Ignoring subsequent capability" "mismatch")); psessionEntry->fIgnoreCapsChange = true; psessionEntry->fWaitForProbeRsp = false; } } /*** limDetectChangeInApCapabilities() ***/ // --------------------------------------------------------------------- /** * limUpdateShortSlot * * FUNCTION: * Enable/Disable short slot * * LOGIC: * * ASSUMPTIONS: * * NOTE: * * @param enable Flag to enable/disable short slot * @return None */ tSirRetStatus limUpdateShortSlot(tpAniSirGlobal pMac, tpSirProbeRespBeacon pBeacon, tpUpdateBeaconParams pBeaconParams,tpPESession psessionEntry) { tSirSmeApNewCaps apNewCaps; tANI_U32 nShortSlot; tANI_U32 val = 0; tANI_U32 phyMode; // Check Admin mode first. If it is disabled just return if (wlan_cfgGetInt(pMac, WNI_CFG_11G_SHORT_SLOT_TIME_ENABLED, &val) != eSIR_SUCCESS) { limLog(pMac, LOGP, FL("cfg get WNI_CFG_11G_SHORT_SLOT_TIME failed")); return eSIR_FAILURE; } if (val == false) return eSIR_SUCCESS; // Check for 11a mode or 11b mode. In both cases return since slot time is constant and cannot/should not change in beacon limGetPhyMode(pMac, &phyMode, psessionEntry); if ((phyMode == WNI_CFG_PHY_MODE_11A) || (phyMode == WNI_CFG_PHY_MODE_11B)) return eSIR_SUCCESS; apNewCaps.capabilityInfo = limGetU16((tANI_U8 *) &pBeacon->capabilityInfo); // Earlier implementation: determine the appropriate short slot mode based on AP advertised modes // when erp is present, apply short slot always unless, prot=on && shortSlot=off // if no erp present, use short slot based on current ap caps // Issue with earlier implementation : Cisco 1231 BG has shortSlot = 0, erpIEPresent and useProtection = 0 (Case4); //Resolution : always use the shortSlot setting the capability info to decide slot time. // The difference between the earlier implementation and the new one is only Case4. /* ERP IE Present | useProtection | shortSlot = QC STA Short Slot Case1 1 1 1 1 //AP should not advertise this combination. Case2 1 1 0 0 Case3 1 0 1 1 Case4 1 0 0 0 Case5 0 1 1 1 Case6 0 1 0 0 Case7 0 0 1 1 Case8 0 0 0 0 */ nShortSlot = SIR_MAC_GET_SHORT_SLOT_TIME(apNewCaps.capabilityInfo); if (nShortSlot != psessionEntry->shortSlotTimeSupported) { // Short slot time capability of AP has changed. Adopt to it. PELOG1(limLog(pMac, LOG1, FL("Shortslot capability of AP changed: %d"), nShortSlot);) ((tpSirMacCapabilityInfo)&psessionEntry->limCurrentBssCaps)->shortSlotTime = (tANI_U16)nShortSlot; psessionEntry->shortSlotTimeSupported = nShortSlot; pBeaconParams->fShortSlotTime = (tANI_U8) nShortSlot; pBeaconParams->paramChangeBitmap |= PARAM_SHORT_SLOT_TIME_CHANGED; } return eSIR_SUCCESS; } /** ----------------------------------------------------------------- \brief limHandleLowRssiInd() - handles low rssi indication This function process the SIR_HAL_LOW_RSSI_IND message from HAL, and sends a eWNI_SME_LOW_RSSI_IND to CSR. \param pMac - global mac structure \return \sa ----------------------------------------------------------------- */ void limHandleLowRssiInd(tpAniSirGlobal pMac) { #if 0 //RSSI related indications will now go to TL and not PE if ( (pMac->pmm.gPmmState == ePMM_STATE_BMPS_SLEEP) || (pMac->pmm.gPmmState == ePMM_STATE_UAPSD_SLEEP)|| (pMac->pmm.gPmmState == ePMM_STATE_WOWLAN) ) { PELOG1(limLog(pMac, LOG1, FL("Sending LOW_RSSI_IND to SME "));) limSendSmeRsp(pMac, eWNI_SME_LOW_RSSI_IND, eSIR_SME_SUCCESS, 0, 0); } else { limLog(pMac, LOGE, FL("Received SIR_HAL_LOW_RSSI_IND while in incorrect state: %d"), pMac->pmm.gPmmState); } return; #endif } /** ----------------------------------------------------------------- \brief limHandleBmpsStatusInd() - handles BMPS status indication This function process the SIR_HAL_BMPS_STATUS_IND message from HAL, and invokes limSendExitBmpsInd( ) to send an eWNI_PMC_EXIT_BMPS_IND to SME with reason code 'eSME_EXIT_BMPS_IND_RCVD'. HAL sends this message when Firmware fails to enter BMPS mode 'AFTER' HAL had already send PE a SIR_HAL_ENTER_BMPS_RSP with status code "success". Hence, HAL needs to notify PE to get out of BMPS mode. This message can also come from FW anytime after we have entered BMPS. This means we should handle it in WoWL and UAPSD states as well \param pMac - global mac structure \return - none \sa ----------------------------------------------------------------- */ void limHandleBmpsStatusInd(tpAniSirGlobal pMac) { switch(pMac->pmm.gPmmState) { case ePMM_STATE_BMPS_SLEEP: case ePMM_STATE_UAPSD_WT_SLEEP_RSP: case ePMM_STATE_UAPSD_SLEEP: case ePMM_STATE_UAPSD_WT_WAKEUP_RSP: case ePMM_STATE_WOWLAN: PELOG1(limLog(pMac, LOG1, FL("Sending EXIT_BMPS_IND to SME "));) limSendExitBmpsInd(pMac, eSME_BMPS_STATUS_IND_RCVD); break; default: limLog(pMac, LOGE, FL("Received SIR_HAL_BMPS_STATUS_IND while in incorrect state: %d"), pMac->pmm.gPmmState); break; } return; } /** ----------------------------------------------------------------- \brief limHandleMissedBeaconInd() - handles missed beacon indication This function process the SIR_HAL_MISSED_BEACON_IND message from HAL, and invokes limSendExitBmpsInd( ) to send an eWNI_PMC_EXIT_BMPS_IND to SME with reason code 'eSME_MISSED_BEACON_IND_RCVD'. \param pMac - global mac structure \return - none \sa ----------------------------------------------------------------- */ void limHandleMissedBeaconInd(tpAniSirGlobal pMac, tpSirMsgQ pMsg) { #ifdef WLAN_ACTIVEMODE_OFFLOAD_FEATURE tpSirSmeMissedBeaconInd pSirMissedBeaconInd = (tpSirSmeMissedBeaconInd)pMsg->bodyptr; tpPESession psessionEntry = peFindSessionByBssIdx(pMac,pSirMissedBeaconInd->bssIdx); if (psessionEntry == NULL) { limLog(pMac, LOGE, FL("session does not exist for given BSSIdx:%d"), pSirMissedBeaconInd->bssIdx); return; } #endif if ( (pMac->pmm.gPmmState == ePMM_STATE_BMPS_SLEEP) || (pMac->pmm.gPmmState == ePMM_STATE_UAPSD_SLEEP)|| (pMac->pmm.gPmmState == ePMM_STATE_WOWLAN) ) { pMac->pmm.inMissedBeaconScenario = TRUE; PELOG1(limLog(pMac, LOG1, FL("Sending EXIT_BMPS_IND to SME "));) limSendExitBmpsInd(pMac, eSME_MISSED_BEACON_IND_RCVD); } /* ACTIVE_MODE_HB_OFFLOAD */ #ifdef WLAN_ACTIVEMODE_OFFLOAD_FEATURE else if(((pMac->pmm.gPmmState == ePMM_STATE_READY) || (pMac->pmm.gPmmState == ePMM_STATE_BMPS_WAKEUP)) && (IS_ACTIVEMODE_OFFLOAD_FEATURE_ENABLE)) { pMac->pmm.inMissedBeaconScenario = TRUE; PELOGE(limLog(pMac, LOGE, FL("Received Heart Beat Failure"));) limMissedBeaconInActiveMode(pMac, psessionEntry); } #endif else { limLog(pMac, LOGE, FL("Received SIR_HAL_MISSED_BEACON_IND while in incorrect state: %d"), pMac->pmm.gPmmState); } return; } /** ----------------------------------------------------------------- \brief limMicFailureInd() - handles mic failure indication This function process the SIR_HAL_MIC_FAILURE_IND message from HAL, \param pMac - global mac structure \return - none \sa ----------------------------------------------------------------- */ void limMicFailureInd(tpAniSirGlobal pMac, tpSirMsgQ pMsg) { tpSirSmeMicFailureInd pSirSmeMicFailureInd; tpSirSmeMicFailureInd pSirMicFailureInd = (tpSirSmeMicFailureInd)pMsg->bodyptr; tSirMsgQ mmhMsg; tpPESession psessionEntry ; tANI_U8 sessionId; if((psessionEntry = peFindSessionByBssid(pMac,pSirMicFailureInd->bssId,&sessionId))== NULL) { limLog(pMac, LOGE, FL("session does not exist for given BSSId")); return; } if (eHAL_STATUS_SUCCESS != palAllocateMemory(pMac->hHdd, (void **) &pSirSmeMicFailureInd, sizeof(tSirSmeMicFailureInd))) { // Log error limLog(pMac, LOGP, FL("memory allocate failed for eWNI_SME_MIC_FAILURE_IND")); return; } pSirSmeMicFailureInd->messageType = eWNI_SME_MIC_FAILURE_IND; pSirSmeMicFailureInd->length = sizeof(pSirSmeMicFailureInd); pSirSmeMicFailureInd->sessionId = psessionEntry->smeSessionId; vos_mem_copy(pSirSmeMicFailureInd->bssId, pSirMicFailureInd->bssId, sizeof(tSirMacAddr)); vos_mem_copy(pSirSmeMicFailureInd->info.srcMacAddr, pSirMicFailureInd->info.srcMacAddr, sizeof(tSirMacAddr)); vos_mem_copy(pSirSmeMicFailureInd->info.taMacAddr, pSirMicFailureInd->info.taMacAddr, sizeof(tSirMacAddr)); vos_mem_copy(pSirSmeMicFailureInd->info.dstMacAddr, pSirMicFailureInd->info.dstMacAddr, sizeof(tSirMacAddr)); vos_mem_copy(pSirSmeMicFailureInd->info.rxMacAddr, pSirMicFailureInd->info.rxMacAddr, sizeof(tSirMacAddr)); pSirSmeMicFailureInd->info.multicast = pSirMicFailureInd->info.multicast; pSirSmeMicFailureInd->info.keyId= pSirMicFailureInd->info.keyId; pSirSmeMicFailureInd->info.IV1= pSirMicFailureInd->info.IV1; vos_mem_copy(pSirSmeMicFailureInd->info.TSC, pSirMicFailureInd->info.TSC,SIR_CIPHER_SEQ_CTR_SIZE); mmhMsg.type = eWNI_SME_MIC_FAILURE_IND; mmhMsg.bodyptr = pSirSmeMicFailureInd; mmhMsg.bodyval = 0; MTRACE(macTraceMsgTx(pMac, sessionId, mmhMsg.type)); limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT); return; } /** ----------------------------------------------------------------- \brief limIsPktCandidateForDrop() - decides whether to drop the frame or not This function is called before enqueuing the frame to PE queue for further processing. This prevents unnecessary frames getting into PE Queue and drops them right away. Frames will be droped in the following scenarios: - In Scan State, drop the frames which are not marked as scan frames - In non-Scan state, drop the frames which are marked as scan frames. - Drop INFRA Beacons and Probe Responses in IBSS Mode - Drop the Probe Request in IBSS mode, if STA did not send out the last beacon \param pMac - global mac structure \return - none \sa ----------------------------------------------------------------- */ tMgmtFrmDropReason limIsPktCandidateForDrop(tpAniSirGlobal pMac, tANI_U8 *pRxPacketInfo, tANI_U32 subType) { tANI_U32 framelen; tANI_U8 *pBody; tSirMacCapabilityInfo capabilityInfo; /* * * In scan mode, drop only Beacon/Probe Response which are NOT marked as scan-frames. * In non-scan mode, drop only Beacon/Probe Response which are marked as scan frames. * Allow other mgmt frames, they must be from our own AP, as we don't allow * other than beacons or probe responses in scan state. */ if( (subType == SIR_MAC_MGMT_BEACON) || (subType == SIR_MAC_MGMT_PROBE_RSP)) { if(pMac->pmm.inMissedBeaconScenario) { MTRACE(macTrace(pMac, TRACE_CODE_INFO_LOG, 0, eLOG_NODROP_MISSED_BEACON_SCENARIO)); return eMGMT_DROP_NO_DROP; } if (limIsSystemInScanState(pMac)) { return eMGMT_DROP_NO_DROP; } #ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD else if (WDA_GET_OFFLOADSCANLEARN(pRxPacketInfo) || WDA_GET_ROAMCANDIDATEIND(pRxPacketInfo)) { return eMGMT_DROP_NO_DROP; } #endif else if (WDA_IS_RX_IN_SCAN(pRxPacketInfo)) { return eMGMT_DROP_SCAN_MODE_FRAME; } } framelen = WDA_GET_RX_PAYLOAD_LEN(pRxPacketInfo); pBody = WDA_GET_RX_MPDU_DATA(pRxPacketInfo); /* Note sure if this is sufficient, basically this condition allows all probe responses and * beacons from an infrastructure network */ *((tANI_U16*) &capabilityInfo) = sirReadU16(pBody+ LIM_BCN_PR_CAPABILITY_OFFSET); if(!capabilityInfo.ibss) return eMGMT_DROP_NO_DROP; #if 0 //Allow the mgmt frames to be queued if STA not in IBSS mode. if (pMac->lim.gLimSystemRole != eLIM_STA_IN_IBSS_ROLE) return eMGMT_DROP_NO_DROP; #endif //Drop INFRA Beacons and Probe Responses in IBSS Mode if( (subType == SIR_MAC_MGMT_BEACON) || (subType == SIR_MAC_MGMT_PROBE_RSP)) { //drop the frame if length is less than 12 if(framelen < LIM_MIN_BCN_PR_LENGTH) return eMGMT_DROP_INVALID_SIZE; *((tANI_U16*) &capabilityInfo) = sirReadU16(pBody+ LIM_BCN_PR_CAPABILITY_OFFSET); //This can be enhanced to even check the SSID before deciding to enque the frame. if(capabilityInfo.ess) return eMGMT_DROP_INFRA_BCN_IN_IBSS; } else if( (subType == SIR_MAC_MGMT_PROBE_REQ) && (!WDA_GET_RX_BEACON_SENT(pRxPacketInfo))) { //Drop the Probe Request in IBSS mode, if STA did not send out the last beacon //In IBSS, the node which sends out the beacon, is supposed to respond to ProbeReq return eMGMT_DROP_NOT_LAST_IBSS_BCN; } return eMGMT_DROP_NO_DROP; } eHalStatus pe_AcquireGlobalLock( tAniSirLim *psPe) { eHalStatus status = eHAL_STATUS_INVALID_PARAMETER; if(psPe) { if( VOS_IS_STATUS_SUCCESS( vos_lock_acquire( &psPe->lkPeGlobalLock) ) ) { status = eHAL_STATUS_SUCCESS; } } return (status); } eHalStatus pe_ReleaseGlobalLock( tAniSirLim *psPe) { eHalStatus status = eHAL_STATUS_INVALID_PARAMETER; if(psPe) { if( VOS_IS_STATUS_SUCCESS( vos_lock_release( &psPe->lkPeGlobalLock) ) ) { status = eHAL_STATUS_SUCCESS; } } return (status); }
gpl-2.0
tlodge/dreamplug_kernel
drivers/video/backlight/ams369fg06.c
411
13952
/* * ams369fg06 AMOLED LCD panel driver. * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * Author: Jingoo Han <jg1.han@samsung.com> * * Derived from drivers/video/s6e63m0.c * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/wait.h> #include <linux/fb.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/spi/spi.h> #include <linux/lcd.h> #include <linux/backlight.h> #define SLEEPMSEC 0x1000 #define ENDDEF 0x2000 #define DEFMASK 0xFF00 #define COMMAND_ONLY 0xFE #define DATA_ONLY 0xFF #define MAX_GAMMA_LEVEL 5 #define GAMMA_TABLE_COUNT 21 #define MIN_BRIGHTNESS 0 #define MAX_BRIGHTNESS 255 #define DEFAULT_BRIGHTNESS 150 struct ams369fg06 { struct device *dev; struct spi_device *spi; unsigned int power; struct lcd_device *ld; struct backlight_device *bd; struct lcd_platform_data *lcd_pd; }; static const unsigned short seq_display_on[] = { 0x14, 0x03, ENDDEF, 0x0000 }; static const unsigned short seq_display_off[] = { 0x14, 0x00, ENDDEF, 0x0000 }; static const unsigned short seq_stand_by_on[] = { 0x1D, 0xA1, SLEEPMSEC, 200, ENDDEF, 0x0000 }; static const unsigned short seq_stand_by_off[] = { 0x1D, 0xA0, SLEEPMSEC, 250, ENDDEF, 0x0000 }; static const unsigned short seq_setting[] = { 0x31, 0x08, 0x32, 0x14, 0x30, 0x02, 0x27, 0x01, 0x12, 0x08, 0x13, 0x08, 0x15, 0x00, 0x16, 0x00, 0xef, 0xd0, DATA_ONLY, 0xe8, 0x39, 0x44, 0x40, 0x00, 0x41, 0x3f, 0x42, 0x2a, 0x43, 0x27, 0x44, 0x27, 0x45, 0x1f, 0x46, 0x44, 0x50, 0x00, 0x51, 0x00, 0x52, 0x17, 0x53, 0x24, 0x54, 0x26, 0x55, 0x1f, 0x56, 0x43, 0x60, 0x00, 0x61, 0x3f, 0x62, 0x2a, 0x63, 0x25, 0x64, 0x24, 0x65, 0x1b, 0x66, 0x5c, 0x17, 0x22, 0x18, 0x33, 0x19, 0x03, 0x1a, 0x01, 0x22, 0xa4, 0x23, 0x00, 0x26, 0xa0, 0x1d, 0xa0, SLEEPMSEC, 300, 0x14, 0x03, ENDDEF, 0x0000 }; /* gamma value: 2.2 */ static const unsigned int ams369fg06_22_250[] = { 0x00, 0x3f, 0x2a, 0x27, 0x27, 0x1f, 0x44, 0x00, 0x00, 0x17, 0x24, 0x26, 0x1f, 0x43, 0x00, 0x3f, 0x2a, 0x25, 0x24, 0x1b, 0x5c, }; static const unsigned int ams369fg06_22_200[] = { 0x00, 0x3f, 0x28, 0x29, 0x27, 0x21, 0x3e, 0x00, 0x00, 0x10, 0x25, 0x27, 0x20, 0x3d, 0x00, 0x3f, 0x28, 0x27, 0x25, 0x1d, 0x53, }; static const unsigned int ams369fg06_22_150[] = { 0x00, 0x3f, 0x2d, 0x29, 0x28, 0x23, 0x37, 0x00, 0x00, 0x0b, 0x25, 0x28, 0x22, 0x36, 0x00, 0x3f, 0x2b, 0x28, 0x26, 0x1f, 0x4a, }; static const unsigned int ams369fg06_22_100[] = { 0x00, 0x3f, 0x30, 0x2a, 0x2b, 0x24, 0x2f, 0x00, 0x00, 0x00, 0x25, 0x29, 0x24, 0x2e, 0x00, 0x3f, 0x2f, 0x29, 0x29, 0x21, 0x3f, }; static const unsigned int ams369fg06_22_50[] = { 0x00, 0x3f, 0x3c, 0x2c, 0x2d, 0x27, 0x24, 0x00, 0x00, 0x00, 0x22, 0x2a, 0x27, 0x23, 0x00, 0x3f, 0x3b, 0x2c, 0x2b, 0x24, 0x31, }; struct ams369fg06_gamma { unsigned int *gamma_22_table[MAX_GAMMA_LEVEL]; }; static struct ams369fg06_gamma gamma_table = { .gamma_22_table[0] = (unsigned int *)&ams369fg06_22_50, .gamma_22_table[1] = (unsigned int *)&ams369fg06_22_100, .gamma_22_table[2] = (unsigned int *)&ams369fg06_22_150, .gamma_22_table[3] = (unsigned int *)&ams369fg06_22_200, .gamma_22_table[4] = (unsigned int *)&ams369fg06_22_250, }; static int ams369fg06_spi_write_byte(struct ams369fg06 *lcd, int addr, int data) { u16 buf[1]; struct spi_message msg; struct spi_transfer xfer = { .len = 2, .tx_buf = buf, }; buf[0] = (addr << 8) | data; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); return spi_sync(lcd->spi, &msg); } static int ams369fg06_spi_write(struct ams369fg06 *lcd, unsigned char address, unsigned char command) { int ret = 0; if (address != DATA_ONLY) ret = ams369fg06_spi_write_byte(lcd, 0x70, address); if (command != COMMAND_ONLY) ret = ams369fg06_spi_write_byte(lcd, 0x72, command); return ret; } static int ams369fg06_panel_send_sequence(struct ams369fg06 *lcd, const unsigned short *wbuf) { int ret = 0, i = 0; while ((wbuf[i] & DEFMASK) != ENDDEF) { if ((wbuf[i] & DEFMASK) != SLEEPMSEC) { ret = ams369fg06_spi_write(lcd, wbuf[i], wbuf[i+1]); if (ret) break; } else mdelay(wbuf[i+1]); i += 2; } return ret; } static int _ams369fg06_gamma_ctl(struct ams369fg06 *lcd, const unsigned int *gamma) { unsigned int i = 0; int ret = 0; for (i = 0 ; i < GAMMA_TABLE_COUNT / 3; i++) { ret = ams369fg06_spi_write(lcd, 0x40 + i, gamma[i]); ret = ams369fg06_spi_write(lcd, 0x50 + i, gamma[i+7*1]); ret = ams369fg06_spi_write(lcd, 0x60 + i, gamma[i+7*2]); if (ret) { dev_err(lcd->dev, "failed to set gamma table.\n"); goto gamma_err; } } gamma_err: return ret; } static int ams369fg06_gamma_ctl(struct ams369fg06 *lcd, int brightness) { int ret = 0; int gamma = 0; if ((brightness >= 0) && (brightness <= 50)) gamma = 0; else if ((brightness > 50) && (brightness <= 100)) gamma = 1; else if ((brightness > 100) && (brightness <= 150)) gamma = 2; else if ((brightness > 150) && (brightness <= 200)) gamma = 3; else if ((brightness > 200) && (brightness <= 255)) gamma = 4; ret = _ams369fg06_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]); return ret; } static int ams369fg06_ldi_init(struct ams369fg06 *lcd) { int ret, i; static const unsigned short *init_seq[] = { seq_setting, seq_stand_by_off, }; for (i = 0; i < ARRAY_SIZE(init_seq); i++) { ret = ams369fg06_panel_send_sequence(lcd, init_seq[i]); if (ret) break; } return ret; } static int ams369fg06_ldi_enable(struct ams369fg06 *lcd) { int ret, i; static const unsigned short *init_seq[] = { seq_stand_by_off, seq_display_on, }; for (i = 0; i < ARRAY_SIZE(init_seq); i++) { ret = ams369fg06_panel_send_sequence(lcd, init_seq[i]); if (ret) break; } return ret; } static int ams369fg06_ldi_disable(struct ams369fg06 *lcd) { int ret, i; static const unsigned short *init_seq[] = { seq_display_off, seq_stand_by_on, }; for (i = 0; i < ARRAY_SIZE(init_seq); i++) { ret = ams369fg06_panel_send_sequence(lcd, init_seq[i]); if (ret) break; } return ret; } static int ams369fg06_power_is_on(int power) { return ((power) <= FB_BLANK_NORMAL); } static int ams369fg06_power_on(struct ams369fg06 *lcd) { int ret = 0; struct lcd_platform_data *pd = NULL; struct backlight_device *bd = NULL; pd = lcd->lcd_pd; if (!pd) { dev_err(lcd->dev, "platform data is NULL.\n"); return -EFAULT; } bd = lcd->bd; if (!bd) { dev_err(lcd->dev, "backlight device is NULL.\n"); return -EFAULT; } if (!pd->power_on) { dev_err(lcd->dev, "power_on is NULL.\n"); return -EFAULT; } else { pd->power_on(lcd->ld, 1); mdelay(pd->power_on_delay); } if (!pd->reset) { dev_err(lcd->dev, "reset is NULL.\n"); return -EFAULT; } else { pd->reset(lcd->ld); mdelay(pd->reset_delay); } ret = ams369fg06_ldi_init(lcd); if (ret) { dev_err(lcd->dev, "failed to initialize ldi.\n"); return ret; } ret = ams369fg06_ldi_enable(lcd); if (ret) { dev_err(lcd->dev, "failed to enable ldi.\n"); return ret; } /* set brightness to current value after power on or resume. */ ret = ams369fg06_gamma_ctl(lcd, bd->props.brightness); if (ret) { dev_err(lcd->dev, "lcd gamma setting failed.\n"); return ret; } return 0; } static int ams369fg06_power_off(struct ams369fg06 *lcd) { int ret = 0; struct lcd_platform_data *pd = NULL; pd = lcd->lcd_pd; if (!pd) { dev_err(lcd->dev, "platform data is NULL\n"); return -EFAULT; } ret = ams369fg06_ldi_disable(lcd); if (ret) { dev_err(lcd->dev, "lcd setting failed.\n"); return -EIO; } mdelay(pd->power_off_delay); if (!pd->power_on) { dev_err(lcd->dev, "power_on is NULL.\n"); return -EFAULT; } else pd->power_on(lcd->ld, 0); return 0; } static int ams369fg06_power(struct ams369fg06 *lcd, int power) { int ret = 0; if (ams369fg06_power_is_on(power) && !ams369fg06_power_is_on(lcd->power)) ret = ams369fg06_power_on(lcd); else if (!ams369fg06_power_is_on(power) && ams369fg06_power_is_on(lcd->power)) ret = ams369fg06_power_off(lcd); if (!ret) lcd->power = power; return ret; } static int ams369fg06_get_power(struct lcd_device *ld) { struct ams369fg06 *lcd = lcd_get_data(ld); return lcd->power; } static int ams369fg06_set_power(struct lcd_device *ld, int power) { struct ams369fg06 *lcd = lcd_get_data(ld); if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN && power != FB_BLANK_NORMAL) { dev_err(lcd->dev, "power value should be 0, 1 or 4.\n"); return -EINVAL; } return ams369fg06_power(lcd, power); } static int ams369fg06_get_brightness(struct backlight_device *bd) { return bd->props.brightness; } static int ams369fg06_set_brightness(struct backlight_device *bd) { int ret = 0; int brightness = bd->props.brightness; struct ams369fg06 *lcd = dev_get_drvdata(&bd->dev); if (brightness < MIN_BRIGHTNESS || brightness > bd->props.max_brightness) { dev_err(&bd->dev, "lcd brightness should be %d to %d.\n", MIN_BRIGHTNESS, MAX_BRIGHTNESS); return -EINVAL; } ret = ams369fg06_gamma_ctl(lcd, bd->props.brightness); if (ret) { dev_err(&bd->dev, "lcd brightness setting failed.\n"); return -EIO; } return ret; } static struct lcd_ops ams369fg06_lcd_ops = { .get_power = ams369fg06_get_power, .set_power = ams369fg06_set_power, }; static const struct backlight_ops ams369fg06_backlight_ops = { .get_brightness = ams369fg06_get_brightness, .update_status = ams369fg06_set_brightness, }; static int __devinit ams369fg06_probe(struct spi_device *spi) { int ret = 0; struct ams369fg06 *lcd = NULL; struct lcd_device *ld = NULL; struct backlight_device *bd = NULL; struct backlight_properties props; lcd = kzalloc(sizeof(struct ams369fg06), GFP_KERNEL); if (!lcd) return -ENOMEM; /* ams369fg06 lcd panel uses 3-wire 16bits SPI Mode. */ spi->bits_per_word = 16; ret = spi_setup(spi); if (ret < 0) { dev_err(&spi->dev, "spi setup failed.\n"); goto out_free_lcd; } lcd->spi = spi; lcd->dev = &spi->dev; lcd->lcd_pd = spi->dev.platform_data; if (!lcd->lcd_pd) { dev_err(&spi->dev, "platform data is NULL\n"); goto out_free_lcd; } ld = lcd_device_register("ams369fg06", &spi->dev, lcd, &ams369fg06_lcd_ops); if (IS_ERR(ld)) { ret = PTR_ERR(ld); goto out_free_lcd; } lcd->ld = ld; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = MAX_BRIGHTNESS; bd = backlight_device_register("ams369fg06-bl", &spi->dev, lcd, &ams369fg06_backlight_ops, &props); if (IS_ERR(bd)) { ret = PTR_ERR(bd); goto out_lcd_unregister; } bd->props.brightness = DEFAULT_BRIGHTNESS; lcd->bd = bd; if (!lcd->lcd_pd->lcd_enabled) { /* * if lcd panel was off from bootloader then * current lcd status is powerdown and then * it enables lcd panel. */ lcd->power = FB_BLANK_POWERDOWN; ams369fg06_power(lcd, FB_BLANK_UNBLANK); } else lcd->power = FB_BLANK_UNBLANK; dev_set_drvdata(&spi->dev, lcd); dev_info(&spi->dev, "ams369fg06 panel driver has been probed.\n"); return 0; out_lcd_unregister: lcd_device_unregister(ld); out_free_lcd: kfree(lcd); return ret; } static int __devexit ams369fg06_remove(struct spi_device *spi) { struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev); ams369fg06_power(lcd, FB_BLANK_POWERDOWN); backlight_device_unregister(lcd->bd); lcd_device_unregister(lcd->ld); kfree(lcd); return 0; } #if defined(CONFIG_PM) static unsigned int before_power; static int ams369fg06_suspend(struct spi_device *spi, pm_message_t mesg) { int ret = 0; struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev); dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power); before_power = lcd->power; /* * when lcd panel is suspend, lcd panel becomes off * regardless of status. */ ret = ams369fg06_power(lcd, FB_BLANK_POWERDOWN); return ret; } static int ams369fg06_resume(struct spi_device *spi) { int ret = 0; struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev); /* * after suspended, if lcd panel status is FB_BLANK_UNBLANK * (at that time, before_power is FB_BLANK_UNBLANK) then * it changes that status to FB_BLANK_POWERDOWN to get lcd on. */ if (before_power == FB_BLANK_UNBLANK) lcd->power = FB_BLANK_POWERDOWN; dev_dbg(&spi->dev, "before_power = %d\n", before_power); ret = ams369fg06_power(lcd, before_power); return ret; } #else #define ams369fg06_suspend NULL #define ams369fg06_resume NULL #endif static void ams369fg06_shutdown(struct spi_device *spi) { struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev); ams369fg06_power(lcd, FB_BLANK_POWERDOWN); } static struct spi_driver ams369fg06_driver = { .driver = { .name = "ams369fg06", .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = ams369fg06_probe, .remove = __devexit_p(ams369fg06_remove), .shutdown = ams369fg06_shutdown, .suspend = ams369fg06_suspend, .resume = ams369fg06_resume, }; static int __init ams369fg06_init(void) { return spi_register_driver(&ams369fg06_driver); } static void __exit ams369fg06_exit(void) { spi_unregister_driver(&ams369fg06_driver); } module_init(ams369fg06_init); module_exit(ams369fg06_exit); MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); MODULE_DESCRIPTION("ams369fg06 LCD Driver"); MODULE_LICENSE("GPL");
gpl-2.0
Snuzzo/max_od
drivers/gpu/drm/radeon/radeon_irq_kms.c
667
7548
/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #include "drmP.h" #include "drm_crtc_helper.h" #include "radeon_drm.h" #include "radeon_reg.h" #include "radeon.h" #include "atom.h" irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; struct radeon_device *rdev = dev->dev_private; return radeon_irq_process(rdev); } /* * Handle hotplug events outside the interrupt handler proper. */ static void radeon_hotplug_work_func(struct work_struct *work) { struct radeon_device *rdev = container_of(work, struct radeon_device, hotplug_work); struct drm_device *dev = rdev->ddev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; if (mode_config->num_connector) { list_for_each_entry(connector, &mode_config->connector_list, head) radeon_connector_hotplug(connector); } /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); } void radeon_driver_irq_preinstall_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; unsigned i; /* Disable *all* interrupts */ rdev->irq.sw_int = false; rdev->irq.gui_idle = false; for (i = 0; i < rdev->num_crtc; i++) rdev->irq.crtc_vblank_int[i] = false; for (i = 0; i < 6; i++) { rdev->irq.hpd[i] = false; rdev->irq.pflip[i] = false; } radeon_irq_set(rdev); /* Clear bits */ radeon_irq_process(rdev); } int radeon_driver_irq_postinstall_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; dev->max_vblank_count = 0x001fffff; rdev->irq.sw_int = true; radeon_irq_set(rdev); return 0; } void radeon_driver_irq_uninstall_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; unsigned i; if (rdev == NULL) { return; } /* Disable *all* interrupts */ rdev->irq.sw_int = false; rdev->irq.gui_idle = false; for (i = 0; i < rdev->num_crtc; i++) rdev->irq.crtc_vblank_int[i] = false; for (i = 0; i < 6; i++) { rdev->irq.hpd[i] = false; rdev->irq.pflip[i] = false; } radeon_irq_set(rdev); } static bool radeon_msi_ok(struct radeon_device *rdev) { /* RV370/RV380 was first asic with MSI support */ if (rdev->family < CHIP_RV380) return false; /* MSIs don't work on AGP */ if (rdev->flags & RADEON_IS_AGP) return false; /* force MSI on */ if (radeon_msi == 1) return true; else if (radeon_msi == 0) return false; /* Quirks */ /* HP RS690 only seems to work with MSIs. */ if ((rdev->pdev->device == 0x791f) && (rdev->pdev->subsystem_vendor == 0x103c) && (rdev->pdev->subsystem_device == 0x30c2)) return true; /* Dell RS690 only seems to work with MSIs. */ if ((rdev->pdev->device == 0x791f) && (rdev->pdev->subsystem_vendor == 0x1028) && (rdev->pdev->subsystem_device == 0x01fc)) return true; /* Dell RS690 only seems to work with MSIs. */ if ((rdev->pdev->device == 0x791f) && (rdev->pdev->subsystem_vendor == 0x1028) && (rdev->pdev->subsystem_device == 0x01fd)) return true; /* Gateway RS690 only seems to work with MSIs. */ if ((rdev->pdev->device == 0x791f) && (rdev->pdev->subsystem_vendor == 0x107b) && (rdev->pdev->subsystem_device == 0x0185)) return true; /* try and enable MSIs by default on all RS690s */ if (rdev->family == CHIP_RS690) return true; /* RV515 seems to have MSI issues where it loses * MSI rearms occasionally. This leads to lockups and freezes. * disable it by default. */ if (rdev->family == CHIP_RV515) return false; if (rdev->flags & RADEON_IS_IGP) { /* APUs work fine with MSIs */ if (rdev->family >= CHIP_PALM) return true; /* lots of IGPs have problems with MSIs */ return false; } return true; } int radeon_irq_kms_init(struct radeon_device *rdev) { int i; int r = 0; INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); spin_lock_init(&rdev->irq.sw_lock); for (i = 0; i < rdev->num_crtc; i++) spin_lock_init(&rdev->irq.pflip_lock[i]); r = drm_vblank_init(rdev->ddev, rdev->num_crtc); if (r) { return r; } /* enable msi */ rdev->msi_enabled = 0; if (radeon_msi_ok(rdev)) { int ret = pci_enable_msi(rdev->pdev); if (!ret) { rdev->msi_enabled = 1; dev_info(rdev->dev, "radeon: using MSI.\n"); } } rdev->irq.installed = true; r = drm_irq_install(rdev->ddev); if (r) { rdev->irq.installed = false; return r; } DRM_INFO("radeon: irq initialized.\n"); return 0; } void radeon_irq_kms_fini(struct radeon_device *rdev) { drm_vblank_cleanup(rdev->ddev); if (rdev->irq.installed) { drm_irq_uninstall(rdev->ddev); rdev->irq.installed = false; if (rdev->msi_enabled) pci_disable_msi(rdev->pdev); } flush_work_sync(&rdev->hotplug_work); } void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev) { unsigned long irqflags; spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) { rdev->irq.sw_int = true; radeon_irq_set(rdev); } spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); } void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev) { unsigned long irqflags; spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0); if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) { rdev->irq.sw_int = false; radeon_irq_set(rdev); } spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); } void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) { unsigned long irqflags; if (crtc < 0 || crtc >= rdev->num_crtc) return; spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags); if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) { rdev->irq.pflip[crtc] = true; radeon_irq_set(rdev); } spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags); } void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) { unsigned long irqflags; if (crtc < 0 || crtc >= rdev->num_crtc) return; spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags); BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0); if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) { rdev->irq.pflip[crtc] = false; radeon_irq_set(rdev); } spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags); }
gpl-2.0
0xD34D/kernel_omap
drivers/pci/hotplug/shpchp_core.c
923
10097
/* * Standard Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com> * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/workqueue.h> #include "shpchp.h" /* Global variables */ int shpchp_debug; int shpchp_poll_mode; int shpchp_poll_time; struct workqueue_struct *shpchp_wq; #define DRIVER_VERSION "0.4" #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" #define DRIVER_DESC "Standard Hot Plug PCI Controller Driver" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(shpchp_debug, bool, 0644); module_param(shpchp_poll_mode, bool, 0644); module_param(shpchp_poll_time, int, 0644); MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not"); MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not"); MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds"); #define SHPC_MODULE_NAME "shpchp" static int set_attention_status (struct hotplug_slot *slot, u8 value); static int enable_slot (struct hotplug_slot *slot); static int disable_slot (struct hotplug_slot *slot); static int get_power_status (struct hotplug_slot *slot, u8 *value); static int get_attention_status (struct hotplug_slot *slot, u8 *value); static int get_latch_status (struct hotplug_slot *slot, u8 *value); static int get_adapter_status (struct hotplug_slot *slot, u8 *value); static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { .set_attention_status = set_attention_status, .enable_slot = enable_slot, .disable_slot = disable_slot, .get_power_status = get_power_status, .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_status, }; /** * release_slot - free up the memory used by a slot * @hotplug_slot: slot to free */ static void release_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); kfree(slot->hotplug_slot->info); kfree(slot->hotplug_slot); kfree(slot); } static int init_slots(struct controller *ctrl) { struct slot *slot; struct hotplug_slot *hotplug_slot; struct hotplug_slot_info *info; char name[SLOT_NAME_SIZE]; int retval = -ENOMEM; int i; for (i = 0; i < ctrl->num_slots; i++) { slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) goto error; hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL); if (!hotplug_slot) goto error_slot; slot->hotplug_slot = hotplug_slot; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) goto error_hpslot; hotplug_slot->info = info; slot->hp_slot = i; slot->ctrl = ctrl; slot->bus = ctrl->pci_dev->subordinate->number; slot->device = ctrl->slot_device_offset + i; slot->hpc_ops = ctrl->hpc_ops; slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i); mutex_init(&slot->lock); INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work); /* register this slot with the hotplug pci core */ hotplug_slot->private = slot; hotplug_slot->release = &release_slot; snprintf(name, SLOT_NAME_SIZE, "%d", slot->number); hotplug_slot->ops = &shpchp_hotplug_slot_ops; ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x " "hp_slot=%x sun=%x slot_device_offset=%x\n", pci_domain_nr(ctrl->pci_dev->subordinate), slot->bus, slot->device, slot->hp_slot, slot->number, ctrl->slot_device_offset); retval = pci_hp_register(slot->hotplug_slot, ctrl->pci_dev->subordinate, slot->device, name); if (retval) { ctrl_err(ctrl, "pci_hp_register failed with error %d\n", retval); goto error_info; } get_power_status(hotplug_slot, &info->power_status); get_attention_status(hotplug_slot, &info->attention_status); get_latch_status(hotplug_slot, &info->latch_status); get_adapter_status(hotplug_slot, &info->adapter_status); list_add(&slot->slot_list, &ctrl->slot_list); } return 0; error_info: kfree(info); error_hpslot: kfree(hotplug_slot); error_slot: kfree(slot); error: return retval; } void cleanup_slots(struct controller *ctrl) { struct list_head *tmp; struct list_head *next; struct slot *slot; list_for_each_safe(tmp, next, &ctrl->slot_list) { slot = list_entry(tmp, struct slot, slot_list); list_del(&slot->slot_list); cancel_delayed_work(&slot->work); flush_scheduled_work(); flush_workqueue(shpchp_wq); pci_hp_deregister(slot->hotplug_slot); } } /* * set_attention_status - Turns the Amber LED for a slot on, off or blink */ static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status) { struct slot *slot = get_slot(hotplug_slot); ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); hotplug_slot->info->attention_status = status; slot->hpc_ops->set_attention_status(slot, status); return 0; } static int enable_slot (struct hotplug_slot *hotplug_slot) { struct slot *slot = get_slot(hotplug_slot); ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); return shpchp_sysfs_enable_slot(slot); } static int disable_slot (struct hotplug_slot *hotplug_slot) { struct slot *slot = get_slot(hotplug_slot); ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); return shpchp_sysfs_disable_slot(slot); } static int get_power_status (struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = get_slot(hotplug_slot); int retval; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); retval = slot->hpc_ops->get_power_status(slot, value); if (retval < 0) *value = hotplug_slot->info->power_status; return 0; } static int get_attention_status (struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = get_slot(hotplug_slot); int retval; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); retval = slot->hpc_ops->get_attention_status(slot, value); if (retval < 0) *value = hotplug_slot->info->attention_status; return 0; } static int get_latch_status (struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = get_slot(hotplug_slot); int retval; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); retval = slot->hpc_ops->get_latch_status(slot, value); if (retval < 0) *value = hotplug_slot->info->latch_status; return 0; } static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = get_slot(hotplug_slot); int retval; ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); retval = slot->hpc_ops->get_adapter_status(slot, value); if (retval < 0) *value = hotplug_slot->info->adapter_status; return 0; } static int is_shpc_capable(struct pci_dev *dev) { if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)) return 1; if (!pci_find_capability(dev, PCI_CAP_ID_SHPC)) return 0; if (get_hp_hw_control_from_firmware(dev)) return 0; return 1; } static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc; struct controller *ctrl; if (!is_shpc_capable(pdev)) return -ENODEV; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) { dev_err(&pdev->dev, "%s: Out of memory\n", __func__); goto err_out_none; } INIT_LIST_HEAD(&ctrl->slot_list); rc = shpc_init(ctrl, pdev); if (rc) { ctrl_dbg(ctrl, "Controller initialization failed\n"); goto err_out_free_ctrl; } pci_set_drvdata(pdev, ctrl); /* Setup the slot information structures */ rc = init_slots(ctrl); if (rc) { ctrl_err(ctrl, "Slot initialization failed\n"); goto err_out_release_ctlr; } rc = shpchp_create_ctrl_files(ctrl); if (rc) goto err_cleanup_slots; return 0; err_cleanup_slots: cleanup_slots(ctrl); err_out_release_ctlr: ctrl->hpc_ops->release_ctlr(ctrl); err_out_free_ctrl: kfree(ctrl); err_out_none: return -ENODEV; } static void shpc_remove(struct pci_dev *dev) { struct controller *ctrl = pci_get_drvdata(dev); shpchp_remove_ctrl_files(ctrl); ctrl->hpc_ops->release_ctlr(ctrl); kfree(ctrl); } static struct pci_device_id shpcd_pci_tbl[] = { {PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0)}, { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE(pci, shpcd_pci_tbl); static struct pci_driver shpc_driver = { .name = SHPC_MODULE_NAME, .id_table = shpcd_pci_tbl, .probe = shpc_probe, .remove = shpc_remove, }; static int __init shpcd_init(void) { int retval = 0; retval = pci_register_driver(&shpc_driver); dbg("%s: pci_register_driver = %d\n", __func__, retval); info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); return retval; } static void __exit shpcd_cleanup(void) { dbg("unload_shpchpd()\n"); pci_unregister_driver(&shpc_driver); info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); } module_init(shpcd_init); module_exit(shpcd_cleanup);
gpl-2.0
htc-mirror/villec2-ics-crc-3.0.16-f4c3948
drivers/misc/mpu3050/mlsl-kernel.c
1179
9648
/* $License: Copyright (C) 2010 InvenSense Corporation, All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. $ */ #include "mlsl.h" #include "mpu-i2c.h" /* ------------ */ /* - Defines. - */ /* ------------ */ /* ---------------------- */ /* - Types definitions. - */ /* ---------------------- */ /* --------------------- */ /* - Function p-types. - */ /* --------------------- */ /** * @brief used to open the I2C or SPI serial port. * This port is used to send and receive data to the MPU device. * @param portNum * The COM port number associated with the device in use. * @return ML_SUCCESS if successful, a non-zero error code otherwise. */ tMLError MLSLSerialOpen(char const *port, void **sl_handle) { return ML_SUCCESS; } /** * @brief used to reset any buffering the driver may be doing * @return ML_SUCCESS if successful, a non-zero error code otherwise. */ tMLError MLSLSerialReset(void *sl_handle) { return ML_SUCCESS; } /** * @brief used to close the I2C or SPI serial port. * This port is used to send and receive data to the MPU device. * @return ML_SUCCESS if successful, a non-zero error code otherwise. */ tMLError MLSLSerialClose(void *sl_handle) { return ML_SUCCESS; } /** * @brief used to read a single byte of data. * This should be sent by I2C or SPI. * * @param slaveAddr I2C slave address of device. * @param registerAddr Register address to read. * @param data Single byte of data to read. * * @return ML_SUCCESS if the command is successful, an error code otherwise. */ tMLError MLSLSerialWriteSingle(void *sl_handle, unsigned char slaveAddr, unsigned char registerAddr, unsigned char data) { return sensor_i2c_write_register((struct i2c_adapter *) sl_handle, slaveAddr, registerAddr, data); } /** * @brief used to write multiple bytes of data from registers. * This should be sent by I2C. * * @param slaveAddr I2C slave address of device. * @param registerAddr Register address to write. * @param length Length of burst of data. * @param data Pointer to block of data. * * @return ML_SUCCESS if successful, a non-zero error code otherwise. */ tMLError MLSLSerialWrite(void *sl_handle, unsigned char slaveAddr, unsigned short length, unsigned char const *data) { tMLError result; const unsigned short dataLength = length - 1; const unsigned char startRegAddr = data[0]; unsigned char i2cWrite[SERIAL_MAX_TRANSFER_SIZE + 1]; unsigned short bytesWritten = 0; while (bytesWritten < dataLength) { unsigned short thisLen = min(SERIAL_MAX_TRANSFER_SIZE, dataLength - bytesWritten); if (bytesWritten == 0) { result = sensor_i2c_write((struct i2c_adapter *) sl_handle, slaveAddr, 1 + thisLen, data); } else { /* manually increment register addr between chunks */ i2cWrite[0] = startRegAddr + bytesWritten; memcpy(&i2cWrite[1], &data[1 + bytesWritten], thisLen); result = sensor_i2c_write((struct i2c_adapter *) sl_handle, slaveAddr, 1 + thisLen, i2cWrite); } if (ML_SUCCESS != result) return result; bytesWritten += thisLen; } return ML_SUCCESS; } /** * @brief used to read multiple bytes of data from registers. * This should be sent by I2C. * * @param slaveAddr I2C slave address of device. * @param registerAddr Register address to read. * @param length Length of burst of data. * @param data Pointer to block of data. * * @return Zero if successful; an error code otherwise */ tMLError MLSLSerialRead(void *sl_handle, unsigned char slaveAddr, unsigned char registerAddr, unsigned short length, unsigned char *data) { tMLError result; unsigned short bytesRead = 0; if (registerAddr == MPUREG_FIFO_R_W || registerAddr == MPUREG_MEM_R_W) { return ML_ERROR_INVALID_PARAMETER; } while (bytesRead < length) { unsigned short thisLen = min(SERIAL_MAX_TRANSFER_SIZE, length - bytesRead); result = sensor_i2c_read((struct i2c_adapter *) sl_handle, slaveAddr, registerAddr + bytesRead, thisLen, &data[bytesRead]); if (ML_SUCCESS != result) return result; bytesRead += thisLen; } return ML_SUCCESS; } /** * @brief used to write multiple bytes of data to the memory. * This should be sent by I2C. * * @param slaveAddr I2C slave address of device. * @param memAddr The location in the memory to write to. * @param length Length of burst data. * @param data Pointer to block of data. * * @return Zero if successful; an error code otherwise */ tMLError MLSLSerialWriteMem(void *sl_handle, unsigned char slaveAddr, unsigned short memAddr, unsigned short length, unsigned char const *data) { tMLError result; unsigned short bytesWritten = 0; if ((memAddr & 0xFF) + length > MPU_MEM_BANK_SIZE) { printk ("memory read length (%d B) extends beyond its limits (%d) " "if started at location %d\n", length, MPU_MEM_BANK_SIZE, memAddr & 0xFF); return ML_ERROR_INVALID_PARAMETER; } while (bytesWritten < length) { unsigned short thisLen = min(SERIAL_MAX_TRANSFER_SIZE, length - bytesWritten); result = mpu_memory_write((struct i2c_adapter *) sl_handle, slaveAddr, memAddr + bytesWritten, thisLen, &data[bytesWritten]); if (ML_SUCCESS != result) return result; bytesWritten += thisLen; } return ML_SUCCESS; } /** * @brief used to read multiple bytes of data from the memory. * This should be sent by I2C. * * @param slaveAddr I2C slave address of device. * @param memAddr The location in the memory to read from. * @param length Length of burst data. * @param data Pointer to block of data. * * @return Zero if successful; an error code otherwise */ tMLError MLSLSerialReadMem(void *sl_handle, unsigned char slaveAddr, unsigned short memAddr, unsigned short length, unsigned char *data) { tMLError result; unsigned short bytesRead = 0; if ((memAddr & 0xFF) + length > MPU_MEM_BANK_SIZE) { printk ("memory read length (%d B) extends beyond its limits (%d) " "if started at location %d\n", length, MPU_MEM_BANK_SIZE, memAddr & 0xFF); return ML_ERROR_INVALID_PARAMETER; } while (bytesRead < length) { unsigned short thisLen = min(SERIAL_MAX_TRANSFER_SIZE, length - bytesRead); result = mpu_memory_read((struct i2c_adapter *) sl_handle, slaveAddr, memAddr + bytesRead, thisLen, &data[bytesRead]); if (ML_SUCCESS != result) return result; bytesRead += thisLen; } return ML_SUCCESS; } /** * @brief used to write multiple bytes of data to the fifo. * This should be sent by I2C. * * @param slaveAddr I2C slave address of device. * @param length Length of burst of data. * @param data Pointer to block of data. * * @return Zero if successful; an error code otherwise */ tMLError MLSLSerialWriteFifo(void *sl_handle, unsigned char slaveAddr, unsigned short length, unsigned char const *data) { tMLError result; unsigned char i2cWrite[SERIAL_MAX_TRANSFER_SIZE + 1]; unsigned short bytesWritten = 0; if (length > FIFO_HW_SIZE) { printk(KERN_ERR "maximum fifo write length is %d\n", FIFO_HW_SIZE); return ML_ERROR_INVALID_PARAMETER; } while (bytesWritten < length) { unsigned short thisLen = min(SERIAL_MAX_TRANSFER_SIZE, length - bytesWritten); i2cWrite[0] = MPUREG_FIFO_R_W; memcpy(&i2cWrite[1], &data[bytesWritten], thisLen); result = sensor_i2c_write((struct i2c_adapter *) sl_handle, slaveAddr, thisLen + 1, i2cWrite); if (ML_SUCCESS != result) return result; bytesWritten += thisLen; } return ML_SUCCESS; } /** * @brief used to read multiple bytes of data from the fifo. * This should be sent by I2C. * * @param slaveAddr I2C slave address of device. * @param length Length of burst of data. * @param data Pointer to block of data. * * @return Zero if successful; an error code otherwise */ tMLError MLSLSerialReadFifo(void *sl_handle, unsigned char slaveAddr, unsigned short length, unsigned char *data) { tMLError result; unsigned short bytesRead = 0; if (length > FIFO_HW_SIZE) { printk(KERN_ERR "maximum fifo read length is %d\n", FIFO_HW_SIZE); return ML_ERROR_INVALID_PARAMETER; } while (bytesRead < length) { unsigned short thisLen = min(SERIAL_MAX_TRANSFER_SIZE, length - bytesRead); result = sensor_i2c_read((struct i2c_adapter *) sl_handle, slaveAddr, MPUREG_FIFO_R_W, thisLen, &data[bytesRead]); if (ML_SUCCESS != result) return result; bytesRead += thisLen; } return ML_SUCCESS; } /** * @} */
gpl-2.0
msm8916-zte/android_kernel_zte_msm8916
drivers/media/platform/omap3isp/ispccdc.c
2203
73060
/* * ispccdc.c * * TI OMAP3 ISP - CCDC module * * Copyright (C) 2009-2010 Nokia Corporation * Copyright (C) 2009 Texas Instruments, Inc. * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/module.h> #include <linux/uaccess.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/omap-iommu.h> #include <linux/sched.h> #include <linux/slab.h> #include <media/v4l2-event.h> #include "isp.h" #include "ispreg.h" #include "ispccdc.h" #define CCDC_MIN_WIDTH 32 #define CCDC_MIN_HEIGHT 32 static struct v4l2_mbus_framefmt * __ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh, unsigned int pad, enum v4l2_subdev_format_whence which); static const unsigned int ccdc_fmts[] = { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_YUYV8_2X8, V4L2_MBUS_FMT_UYVY8_2X8, }; /* * ccdc_print_status - Print current CCDC Module register values. * @ccdc: Pointer to ISP CCDC device. * * Also prints other debug information stored in the CCDC module. */ #define CCDC_PRINT_REGISTER(isp, name)\ dev_dbg(isp->dev, "###CCDC " #name "=0x%08x\n", \ isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_##name)) static void ccdc_print_status(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); dev_dbg(isp->dev, "-------------CCDC Register dump-------------\n"); CCDC_PRINT_REGISTER(isp, PCR); CCDC_PRINT_REGISTER(isp, SYN_MODE); CCDC_PRINT_REGISTER(isp, HD_VD_WID); CCDC_PRINT_REGISTER(isp, PIX_LINES); CCDC_PRINT_REGISTER(isp, HORZ_INFO); CCDC_PRINT_REGISTER(isp, VERT_START); CCDC_PRINT_REGISTER(isp, VERT_LINES); CCDC_PRINT_REGISTER(isp, CULLING); CCDC_PRINT_REGISTER(isp, HSIZE_OFF); CCDC_PRINT_REGISTER(isp, SDOFST); CCDC_PRINT_REGISTER(isp, SDR_ADDR); CCDC_PRINT_REGISTER(isp, CLAMP); CCDC_PRINT_REGISTER(isp, DCSUB); CCDC_PRINT_REGISTER(isp, COLPTN); CCDC_PRINT_REGISTER(isp, BLKCMP); CCDC_PRINT_REGISTER(isp, FPC); CCDC_PRINT_REGISTER(isp, FPC_ADDR); CCDC_PRINT_REGISTER(isp, VDINT); CCDC_PRINT_REGISTER(isp, ALAW); CCDC_PRINT_REGISTER(isp, REC656IF); CCDC_PRINT_REGISTER(isp, CFG); CCDC_PRINT_REGISTER(isp, FMTCFG); CCDC_PRINT_REGISTER(isp, FMT_HORZ); CCDC_PRINT_REGISTER(isp, FMT_VERT); CCDC_PRINT_REGISTER(isp, PRGEVEN0); CCDC_PRINT_REGISTER(isp, PRGEVEN1); CCDC_PRINT_REGISTER(isp, PRGODD0); CCDC_PRINT_REGISTER(isp, PRGODD1); CCDC_PRINT_REGISTER(isp, VP_OUT); CCDC_PRINT_REGISTER(isp, LSC_CONFIG); CCDC_PRINT_REGISTER(isp, LSC_INITIAL); CCDC_PRINT_REGISTER(isp, LSC_TABLE_BASE); CCDC_PRINT_REGISTER(isp, LSC_TABLE_OFFSET); dev_dbg(isp->dev, "--------------------------------------------\n"); } /* * omap3isp_ccdc_busy - Get busy state of the CCDC. * @ccdc: Pointer to ISP CCDC device. */ int omap3isp_ccdc_busy(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); return isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR) & ISPCCDC_PCR_BUSY; } /* ----------------------------------------------------------------------------- * Lens Shading Compensation */ /* * ccdc_lsc_validate_config - Check that LSC configuration is valid. * @ccdc: Pointer to ISP CCDC device. * @lsc_cfg: the LSC configuration to check. * * Returns 0 if the LSC configuration is valid, or -EINVAL if invalid. */ static int ccdc_lsc_validate_config(struct isp_ccdc_device *ccdc, struct omap3isp_ccdc_lsc_config *lsc_cfg) { struct isp_device *isp = to_isp_device(ccdc); struct v4l2_mbus_framefmt *format; unsigned int paxel_width, paxel_height; unsigned int paxel_shift_x, paxel_shift_y; unsigned int min_width, min_height, min_size; unsigned int input_width, input_height; paxel_shift_x = lsc_cfg->gain_mode_m; paxel_shift_y = lsc_cfg->gain_mode_n; if ((paxel_shift_x < 2) || (paxel_shift_x > 6) || (paxel_shift_y < 2) || (paxel_shift_y > 6)) { dev_dbg(isp->dev, "CCDC: LSC: Invalid paxel size\n"); return -EINVAL; } if (lsc_cfg->offset & 3) { dev_dbg(isp->dev, "CCDC: LSC: Offset must be a multiple of " "4\n"); return -EINVAL; } if ((lsc_cfg->initial_x & 1) || (lsc_cfg->initial_y & 1)) { dev_dbg(isp->dev, "CCDC: LSC: initial_x and y must be even\n"); return -EINVAL; } format = __ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK, V4L2_SUBDEV_FORMAT_ACTIVE); input_width = format->width; input_height = format->height; /* Calculate minimum bytesize for validation */ paxel_width = 1 << paxel_shift_x; min_width = ((input_width + lsc_cfg->initial_x + paxel_width - 1) >> paxel_shift_x) + 1; paxel_height = 1 << paxel_shift_y; min_height = ((input_height + lsc_cfg->initial_y + paxel_height - 1) >> paxel_shift_y) + 1; min_size = 4 * min_width * min_height; if (min_size > lsc_cfg->size) { dev_dbg(isp->dev, "CCDC: LSC: too small table\n"); return -EINVAL; } if (lsc_cfg->offset < (min_width * 4)) { dev_dbg(isp->dev, "CCDC: LSC: Offset is too small\n"); return -EINVAL; } if ((lsc_cfg->size / lsc_cfg->offset) < min_height) { dev_dbg(isp->dev, "CCDC: LSC: Wrong size/offset combination\n"); return -EINVAL; } return 0; } /* * ccdc_lsc_program_table - Program Lens Shading Compensation table address. * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_lsc_program_table(struct isp_ccdc_device *ccdc, u32 addr) { isp_reg_writel(to_isp_device(ccdc), addr, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE); } /* * ccdc_lsc_setup_regs - Configures the lens shading compensation module * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_lsc_setup_regs(struct isp_ccdc_device *ccdc, struct omap3isp_ccdc_lsc_config *cfg) { struct isp_device *isp = to_isp_device(ccdc); int reg; isp_reg_writel(isp, cfg->offset, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_OFFSET); reg = 0; reg |= cfg->gain_mode_n << ISPCCDC_LSC_GAIN_MODE_N_SHIFT; reg |= cfg->gain_mode_m << ISPCCDC_LSC_GAIN_MODE_M_SHIFT; reg |= cfg->gain_format << ISPCCDC_LSC_GAIN_FORMAT_SHIFT; isp_reg_writel(isp, reg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG); reg = 0; reg &= ~ISPCCDC_LSC_INITIAL_X_MASK; reg |= cfg->initial_x << ISPCCDC_LSC_INITIAL_X_SHIFT; reg &= ~ISPCCDC_LSC_INITIAL_Y_MASK; reg |= cfg->initial_y << ISPCCDC_LSC_INITIAL_Y_SHIFT; isp_reg_writel(isp, reg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_INITIAL); } static int ccdc_lsc_wait_prefetch(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); unsigned int wait; isp_reg_writel(isp, IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); /* timeout 1 ms */ for (wait = 0; wait < 1000; wait++) { if (isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS) & IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ) { isp_reg_writel(isp, IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); return 0; } rmb(); udelay(1); } return -ETIMEDOUT; } /* * __ccdc_lsc_enable - Enables/Disables the Lens Shading Compensation module. * @ccdc: Pointer to ISP CCDC device. * @enable: 0 Disables LSC, 1 Enables LSC. */ static int __ccdc_lsc_enable(struct isp_ccdc_device *ccdc, int enable) { struct isp_device *isp = to_isp_device(ccdc); const struct v4l2_mbus_framefmt *format = __ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK, V4L2_SUBDEV_FORMAT_ACTIVE); if ((format->code != V4L2_MBUS_FMT_SGRBG10_1X10) && (format->code != V4L2_MBUS_FMT_SRGGB10_1X10) && (format->code != V4L2_MBUS_FMT_SBGGR10_1X10) && (format->code != V4L2_MBUS_FMT_SGBRG10_1X10)) return -EINVAL; if (enable) omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_LSC_READ); isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG, ISPCCDC_LSC_ENABLE, enable ? ISPCCDC_LSC_ENABLE : 0); if (enable) { if (ccdc_lsc_wait_prefetch(ccdc) < 0) { isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG, ISPCCDC_LSC_ENABLE); ccdc->lsc.state = LSC_STATE_STOPPED; dev_warn(to_device(ccdc), "LSC prefecth timeout\n"); return -ETIMEDOUT; } ccdc->lsc.state = LSC_STATE_RUNNING; } else { ccdc->lsc.state = LSC_STATE_STOPPING; } return 0; } static int ccdc_lsc_busy(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); return isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG) & ISPCCDC_LSC_BUSY; } /* __ccdc_lsc_configure - Apply a new configuration to the LSC engine * @ccdc: Pointer to ISP CCDC device * @req: New configuration request * * context: in_interrupt() */ static int __ccdc_lsc_configure(struct isp_ccdc_device *ccdc, struct ispccdc_lsc_config_req *req) { if (!req->enable) return -EINVAL; if (ccdc_lsc_validate_config(ccdc, &req->config) < 0) { dev_dbg(to_device(ccdc), "Discard LSC configuration\n"); return -EINVAL; } if (ccdc_lsc_busy(ccdc)) return -EBUSY; ccdc_lsc_setup_regs(ccdc, &req->config); ccdc_lsc_program_table(ccdc, req->table); return 0; } /* * ccdc_lsc_error_handler - Handle LSC prefetch error scenario. * @ccdc: Pointer to ISP CCDC device. * * Disables LSC, and defers enablement to shadow registers update time. */ static void ccdc_lsc_error_handler(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); /* * From OMAP3 TRM: When this event is pending, the module * goes into transparent mode (output =input). Normal * operation can be resumed at the start of the next frame * after: * 1) Clearing this event * 2) Disabling the LSC module * 3) Enabling it */ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG, ISPCCDC_LSC_ENABLE); ccdc->lsc.state = LSC_STATE_STOPPED; } static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc, struct ispccdc_lsc_config_req *req) { struct isp_device *isp = to_isp_device(ccdc); if (req == NULL) return; if (req->iovm) dma_unmap_sg(isp->dev, req->iovm->sgt->sgl, req->iovm->sgt->nents, DMA_TO_DEVICE); if (req->table) omap_iommu_vfree(isp->domain, isp->dev, req->table); kfree(req); } static void ccdc_lsc_free_queue(struct isp_ccdc_device *ccdc, struct list_head *queue) { struct ispccdc_lsc_config_req *req, *n; unsigned long flags; spin_lock_irqsave(&ccdc->lsc.req_lock, flags); list_for_each_entry_safe(req, n, queue, list) { list_del(&req->list); spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); ccdc_lsc_free_request(ccdc, req); spin_lock_irqsave(&ccdc->lsc.req_lock, flags); } spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); } static void ccdc_lsc_free_table_work(struct work_struct *work) { struct isp_ccdc_device *ccdc; struct ispccdc_lsc *lsc; lsc = container_of(work, struct ispccdc_lsc, table_work); ccdc = container_of(lsc, struct isp_ccdc_device, lsc); ccdc_lsc_free_queue(ccdc, &lsc->free_queue); } /* * ccdc_lsc_config - Configure the LSC module from a userspace request * * Store the request LSC configuration in the LSC engine request pointer. The * configuration will be applied to the hardware when the CCDC will be enabled, * or at the next LSC interrupt if the CCDC is already running. */ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc, struct omap3isp_ccdc_update_config *config) { struct isp_device *isp = to_isp_device(ccdc); struct ispccdc_lsc_config_req *req; unsigned long flags; void *table; u16 update; int ret; update = config->update & (OMAP3ISP_CCDC_CONFIG_LSC | OMAP3ISP_CCDC_TBL_LSC); if (!update) return 0; if (update != (OMAP3ISP_CCDC_CONFIG_LSC | OMAP3ISP_CCDC_TBL_LSC)) { dev_dbg(to_device(ccdc), "%s: Both LSC configuration and table " "need to be supplied\n", __func__); return -EINVAL; } req = kzalloc(sizeof(*req), GFP_KERNEL); if (req == NULL) return -ENOMEM; if (config->flag & OMAP3ISP_CCDC_CONFIG_LSC) { if (copy_from_user(&req->config, config->lsc_cfg, sizeof(req->config))) { ret = -EFAULT; goto done; } req->enable = 1; req->table = omap_iommu_vmalloc(isp->domain, isp->dev, 0, req->config.size, IOMMU_FLAG); if (IS_ERR_VALUE(req->table)) { req->table = 0; ret = -ENOMEM; goto done; } req->iovm = omap_find_iovm_area(isp->dev, req->table); if (req->iovm == NULL) { ret = -ENOMEM; goto done; } if (!dma_map_sg(isp->dev, req->iovm->sgt->sgl, req->iovm->sgt->nents, DMA_TO_DEVICE)) { ret = -ENOMEM; req->iovm = NULL; goto done; } dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl, req->iovm->sgt->nents, DMA_TO_DEVICE); table = omap_da_to_va(isp->dev, req->table); if (copy_from_user(table, config->lsc, req->config.size)) { ret = -EFAULT; goto done; } dma_sync_sg_for_device(isp->dev, req->iovm->sgt->sgl, req->iovm->sgt->nents, DMA_TO_DEVICE); } spin_lock_irqsave(&ccdc->lsc.req_lock, flags); if (ccdc->lsc.request) { list_add_tail(&ccdc->lsc.request->list, &ccdc->lsc.free_queue); schedule_work(&ccdc->lsc.table_work); } ccdc->lsc.request = req; spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); ret = 0; done: if (ret < 0) ccdc_lsc_free_request(ccdc, req); return ret; } static inline int ccdc_lsc_is_configured(struct isp_ccdc_device *ccdc) { unsigned long flags; spin_lock_irqsave(&ccdc->lsc.req_lock, flags); if (ccdc->lsc.active) { spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); return 1; } spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); return 0; } static int ccdc_lsc_enable(struct isp_ccdc_device *ccdc) { struct ispccdc_lsc *lsc = &ccdc->lsc; if (lsc->state != LSC_STATE_STOPPED) return -EINVAL; if (lsc->active) { list_add_tail(&lsc->active->list, &lsc->free_queue); lsc->active = NULL; } if (__ccdc_lsc_configure(ccdc, lsc->request) < 0) { omap3isp_sbl_disable(to_isp_device(ccdc), OMAP3_ISP_SBL_CCDC_LSC_READ); list_add_tail(&lsc->request->list, &lsc->free_queue); lsc->request = NULL; goto done; } lsc->active = lsc->request; lsc->request = NULL; __ccdc_lsc_enable(ccdc, 1); done: if (!list_empty(&lsc->free_queue)) schedule_work(&lsc->table_work); return 0; } /* ----------------------------------------------------------------------------- * Parameters configuration */ /* * ccdc_configure_clamp - Configure optical-black or digital clamping * @ccdc: Pointer to ISP CCDC device. * * The CCDC performs either optical-black or digital clamp. Configure and enable * the selected clamp method. */ static void ccdc_configure_clamp(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); u32 clamp; if (ccdc->obclamp) { clamp = ccdc->clamp.obgain << ISPCCDC_CLAMP_OBGAIN_SHIFT; clamp |= ccdc->clamp.oblen << ISPCCDC_CLAMP_OBSLEN_SHIFT; clamp |= ccdc->clamp.oblines << ISPCCDC_CLAMP_OBSLN_SHIFT; clamp |= ccdc->clamp.obstpixel << ISPCCDC_CLAMP_OBST_SHIFT; isp_reg_writel(isp, clamp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP); } else { isp_reg_writel(isp, ccdc->clamp.dcsubval, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_DCSUB); } isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP, ISPCCDC_CLAMP_CLAMPEN, ccdc->obclamp ? ISPCCDC_CLAMP_CLAMPEN : 0); } /* * ccdc_configure_fpc - Configure Faulty Pixel Correction * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_configure_fpc(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC, ISPCCDC_FPC_FPCEN); if (!ccdc->fpc_en) return; isp_reg_writel(isp, ccdc->fpc.fpcaddr, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC_ADDR); /* The FPNUM field must be set before enabling FPC. */ isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT), OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC); isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT) | ISPCCDC_FPC_FPCEN, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC); } /* * ccdc_configure_black_comp - Configure Black Level Compensation. * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_configure_black_comp(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); u32 blcomp; blcomp = ccdc->blcomp.b_mg << ISPCCDC_BLKCMP_B_MG_SHIFT; blcomp |= ccdc->blcomp.gb_g << ISPCCDC_BLKCMP_GB_G_SHIFT; blcomp |= ccdc->blcomp.gr_cy << ISPCCDC_BLKCMP_GR_CY_SHIFT; blcomp |= ccdc->blcomp.r_ye << ISPCCDC_BLKCMP_R_YE_SHIFT; isp_reg_writel(isp, blcomp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_BLKCMP); } /* * ccdc_configure_lpf - Configure Low-Pass Filter (LPF). * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_configure_lpf(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE, ISPCCDC_SYN_MODE_LPF, ccdc->lpf ? ISPCCDC_SYN_MODE_LPF : 0); } /* * ccdc_configure_alaw - Configure A-law compression. * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_configure_alaw(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); const struct isp_format_info *info; u32 alaw = 0; info = omap3isp_video_format_info(ccdc->formats[CCDC_PAD_SINK].code); switch (info->width) { case 8: return; case 10: alaw = ISPCCDC_ALAW_GWDI_9_0; break; case 11: alaw = ISPCCDC_ALAW_GWDI_10_1; break; case 12: alaw = ISPCCDC_ALAW_GWDI_11_2; break; case 13: alaw = ISPCCDC_ALAW_GWDI_12_3; break; } if (ccdc->alaw) alaw |= ISPCCDC_ALAW_CCDTBL; isp_reg_writel(isp, alaw, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW); } /* * ccdc_config_imgattr - Configure sensor image specific attributes. * @ccdc: Pointer to ISP CCDC device. * @colptn: Color pattern of the sensor. */ static void ccdc_config_imgattr(struct isp_ccdc_device *ccdc, u32 colptn) { struct isp_device *isp = to_isp_device(ccdc); isp_reg_writel(isp, colptn, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_COLPTN); } /* * ccdc_config - Set CCDC configuration from userspace * @ccdc: Pointer to ISP CCDC device. * @userspace_add: Structure containing CCDC configuration sent from userspace. * * Returns 0 if successful, -EINVAL if the pointer to the configuration * structure is null, or the copy_from_user function fails to copy user space * memory to kernel space memory. */ static int ccdc_config(struct isp_ccdc_device *ccdc, struct omap3isp_ccdc_update_config *ccdc_struct) { struct isp_device *isp = to_isp_device(ccdc); unsigned long flags; spin_lock_irqsave(&ccdc->lock, flags); ccdc->shadow_update = 1; spin_unlock_irqrestore(&ccdc->lock, flags); if (OMAP3ISP_CCDC_ALAW & ccdc_struct->update) { ccdc->alaw = !!(OMAP3ISP_CCDC_ALAW & ccdc_struct->flag); ccdc->update |= OMAP3ISP_CCDC_ALAW; } if (OMAP3ISP_CCDC_LPF & ccdc_struct->update) { ccdc->lpf = !!(OMAP3ISP_CCDC_LPF & ccdc_struct->flag); ccdc->update |= OMAP3ISP_CCDC_LPF; } if (OMAP3ISP_CCDC_BLCLAMP & ccdc_struct->update) { if (copy_from_user(&ccdc->clamp, ccdc_struct->bclamp, sizeof(ccdc->clamp))) { ccdc->shadow_update = 0; return -EFAULT; } ccdc->obclamp = !!(OMAP3ISP_CCDC_BLCLAMP & ccdc_struct->flag); ccdc->update |= OMAP3ISP_CCDC_BLCLAMP; } if (OMAP3ISP_CCDC_BCOMP & ccdc_struct->update) { if (copy_from_user(&ccdc->blcomp, ccdc_struct->blcomp, sizeof(ccdc->blcomp))) { ccdc->shadow_update = 0; return -EFAULT; } ccdc->update |= OMAP3ISP_CCDC_BCOMP; } ccdc->shadow_update = 0; if (OMAP3ISP_CCDC_FPC & ccdc_struct->update) { u32 table_old = 0; u32 table_new; u32 size; if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED) return -EBUSY; ccdc->fpc_en = !!(OMAP3ISP_CCDC_FPC & ccdc_struct->flag); if (ccdc->fpc_en) { if (copy_from_user(&ccdc->fpc, ccdc_struct->fpc, sizeof(ccdc->fpc))) return -EFAULT; /* * table_new must be 64-bytes aligned, but it's * already done by omap_iommu_vmalloc(). */ size = ccdc->fpc.fpnum * 4; table_new = omap_iommu_vmalloc(isp->domain, isp->dev, 0, size, IOMMU_FLAG); if (IS_ERR_VALUE(table_new)) return -ENOMEM; if (copy_from_user(omap_da_to_va(isp->dev, table_new), (__force void __user *) ccdc->fpc.fpcaddr, size)) { omap_iommu_vfree(isp->domain, isp->dev, table_new); return -EFAULT; } table_old = ccdc->fpc.fpcaddr; ccdc->fpc.fpcaddr = table_new; } ccdc_configure_fpc(ccdc); if (table_old != 0) omap_iommu_vfree(isp->domain, isp->dev, table_old); } return ccdc_lsc_config(ccdc, ccdc_struct); } static void ccdc_apply_controls(struct isp_ccdc_device *ccdc) { if (ccdc->update & OMAP3ISP_CCDC_ALAW) { ccdc_configure_alaw(ccdc); ccdc->update &= ~OMAP3ISP_CCDC_ALAW; } if (ccdc->update & OMAP3ISP_CCDC_LPF) { ccdc_configure_lpf(ccdc); ccdc->update &= ~OMAP3ISP_CCDC_LPF; } if (ccdc->update & OMAP3ISP_CCDC_BLCLAMP) { ccdc_configure_clamp(ccdc); ccdc->update &= ~OMAP3ISP_CCDC_BLCLAMP; } if (ccdc->update & OMAP3ISP_CCDC_BCOMP) { ccdc_configure_black_comp(ccdc); ccdc->update &= ~OMAP3ISP_CCDC_BCOMP; } } /* * omap3isp_ccdc_restore_context - Restore values of the CCDC module registers * @dev: Pointer to ISP device */ void omap3isp_ccdc_restore_context(struct isp_device *isp) { struct isp_ccdc_device *ccdc = &isp->isp_ccdc; isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_VDLC); ccdc->update = OMAP3ISP_CCDC_ALAW | OMAP3ISP_CCDC_LPF | OMAP3ISP_CCDC_BLCLAMP | OMAP3ISP_CCDC_BCOMP; ccdc_apply_controls(ccdc); ccdc_configure_fpc(ccdc); } /* ----------------------------------------------------------------------------- * Format- and pipeline-related configuration helpers */ /* * ccdc_config_vp - Configure the Video Port. * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_config_vp(struct isp_ccdc_device *ccdc) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity); struct isp_device *isp = to_isp_device(ccdc); const struct isp_format_info *info; unsigned long l3_ick = pipe->l3_ick; unsigned int max_div = isp->revision == ISP_REVISION_15_0 ? 64 : 8; unsigned int div = 0; u32 fmtcfg_vp; fmtcfg_vp = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG) & ~(ISPCCDC_FMTCFG_VPIN_MASK | ISPCCDC_FMTCFG_VPIF_FRQ_MASK); info = omap3isp_video_format_info(ccdc->formats[CCDC_PAD_SINK].code); switch (info->width) { case 8: case 10: fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_9_0; break; case 11: fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_10_1; break; case 12: fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_11_2; break; case 13: fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_12_3; break; } if (pipe->input) div = DIV_ROUND_UP(l3_ick, pipe->max_rate); else if (pipe->external_rate) div = l3_ick / pipe->external_rate; div = clamp(div, 2U, max_div); fmtcfg_vp |= (div - 2) << ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT; isp_reg_writel(isp, fmtcfg_vp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG); } /* * ccdc_enable_vp - Enable Video Port. * @ccdc: Pointer to ISP CCDC device. * @enable: 0 Disables VP, 1 Enables VP * * This is needed for outputting image to Preview, H3A and HIST ISP submodules. */ static void ccdc_enable_vp(struct isp_ccdc_device *ccdc, u8 enable) { struct isp_device *isp = to_isp_device(ccdc); isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG, ISPCCDC_FMTCFG_VPEN, enable ? ISPCCDC_FMTCFG_VPEN : 0); } /* * ccdc_config_outlineoffset - Configure memory saving output line offset * @ccdc: Pointer to ISP CCDC device. * @offset: Address offset to start a new line. Must be twice the * Output width and aligned on 32 byte boundary * @oddeven: Specifies the odd/even line pattern to be chosen to store the * output. * @numlines: Set the value 0-3 for +1-4lines, 4-7 for -1-4lines. * * - Configures the output line offset when stored in memory * - Sets the odd/even line pattern to store the output * (EVENEVEN (1), ODDEVEN (2), EVENODD (3), ODDODD (4)) * - Configures the number of even and odd line fields in case of rearranging * the lines. */ static void ccdc_config_outlineoffset(struct isp_ccdc_device *ccdc, u32 offset, u8 oddeven, u8 numlines) { struct isp_device *isp = to_isp_device(ccdc); isp_reg_writel(isp, offset & 0xffff, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HSIZE_OFF); isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, ISPCCDC_SDOFST_FINV); isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, ISPCCDC_SDOFST_FOFST_4L); switch (oddeven) { case EVENEVEN: isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, (numlines & 0x7) << ISPCCDC_SDOFST_LOFST0_SHIFT); break; case ODDEVEN: isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, (numlines & 0x7) << ISPCCDC_SDOFST_LOFST1_SHIFT); break; case EVENODD: isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, (numlines & 0x7) << ISPCCDC_SDOFST_LOFST2_SHIFT); break; case ODDODD: isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST, (numlines & 0x7) << ISPCCDC_SDOFST_LOFST3_SHIFT); break; default: break; } } /* * ccdc_set_outaddr - Set memory address to save output image * @ccdc: Pointer to ISP CCDC device. * @addr: ISP MMU Mapped 32-bit memory address aligned on 32 byte boundary. * * Sets the memory address where the output will be saved. */ static void ccdc_set_outaddr(struct isp_ccdc_device *ccdc, u32 addr) { struct isp_device *isp = to_isp_device(ccdc); isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDR_ADDR); } /* * omap3isp_ccdc_max_rate - Calculate maximum input data rate based on the input * @ccdc: Pointer to ISP CCDC device. * @max_rate: Maximum calculated data rate. * * Returns in *max_rate less value between calculated and passed */ void omap3isp_ccdc_max_rate(struct isp_ccdc_device *ccdc, unsigned int *max_rate) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity); unsigned int rate; if (pipe == NULL) return; /* * TRM says that for parallel sensors the maximum data rate * should be 90% form L3/2 clock, otherwise just L3/2. */ if (ccdc->input == CCDC_INPUT_PARALLEL) rate = pipe->l3_ick / 2 * 9 / 10; else rate = pipe->l3_ick / 2; *max_rate = min(*max_rate, rate); } /* * ccdc_config_sync_if - Set CCDC sync interface configuration * @ccdc: Pointer to ISP CCDC device. * @pdata: Parallel interface platform data (may be NULL) * @data_size: Data size */ static void ccdc_config_sync_if(struct isp_ccdc_device *ccdc, struct isp_parallel_platform_data *pdata, unsigned int data_size) { struct isp_device *isp = to_isp_device(ccdc); const struct v4l2_mbus_framefmt *format; u32 syn_mode = ISPCCDC_SYN_MODE_VDHDEN; format = &ccdc->formats[CCDC_PAD_SINK]; if (format->code == V4L2_MBUS_FMT_YUYV8_2X8 || format->code == V4L2_MBUS_FMT_UYVY8_2X8) { /* The bridge is enabled for YUV8 formats. Configure the input * mode accordingly. */ syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR16; } switch (data_size) { case 8: syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_8; break; case 10: syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_10; break; case 11: syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_11; break; case 12: syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_12; break; } if (pdata && pdata->data_pol) syn_mode |= ISPCCDC_SYN_MODE_DATAPOL; if (pdata && pdata->hs_pol) syn_mode |= ISPCCDC_SYN_MODE_HDPOL; if (pdata && pdata->vs_pol) syn_mode |= ISPCCDC_SYN_MODE_VDPOL; isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); /* The CCDC_CFG.Y8POS bit is used in YCbCr8 input mode only. The * hardware seems to ignore it in all other input modes. */ if (format->code == V4L2_MBUS_FMT_UYVY8_2X8) isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_Y8POS); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_Y8POS); isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_REC656IF, ISPCCDC_REC656IF_R656ON); } /* CCDC formats descriptions */ static const u32 ccdc_sgrbg_pattern = ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC0_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC1_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC2_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC3_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC0_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC1_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC2_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC3_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC0_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC1_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC2_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC3_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC0_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC1_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC2_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC3_SHIFT; static const u32 ccdc_srggb_pattern = ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC0_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC1_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC2_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC3_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC0_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC1_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC2_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC3_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC0_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC1_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC2_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC3_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC0_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC1_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC2_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC3_SHIFT; static const u32 ccdc_sbggr_pattern = ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC0_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC1_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC2_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC3_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC0_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC1_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC2_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC3_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC0_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC1_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC2_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC3_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC0_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC1_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC2_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC3_SHIFT; static const u32 ccdc_sgbrg_pattern = ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC0_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC1_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC2_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC3_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC0_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC1_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC2_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC3_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC0_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC1_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC2_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC3_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC0_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC1_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC2_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC3_SHIFT; static void ccdc_configure(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); struct isp_parallel_platform_data *pdata = NULL; struct v4l2_subdev *sensor; struct v4l2_mbus_framefmt *format; const struct v4l2_rect *crop; const struct isp_format_info *fmt_info; struct v4l2_subdev_format fmt_src; unsigned int depth_out; unsigned int depth_in = 0; struct media_pad *pad; unsigned long flags; unsigned int bridge; unsigned int shift; u32 syn_mode; u32 ccdc_pattern; pad = media_entity_remote_source(&ccdc->pads[CCDC_PAD_SINK]); sensor = media_entity_to_v4l2_subdev(pad->entity); if (ccdc->input == CCDC_INPUT_PARALLEL) pdata = &((struct isp_v4l2_subdevs_group *)sensor->host_priv) ->bus.parallel; /* Compute the lane shifter shift value and enable the bridge when the * input format is YUV. */ fmt_src.pad = pad->index; fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE; if (!v4l2_subdev_call(sensor, pad, get_fmt, NULL, &fmt_src)) { fmt_info = omap3isp_video_format_info(fmt_src.format.code); depth_in = fmt_info->width; } fmt_info = omap3isp_video_format_info (isp->isp_ccdc.formats[CCDC_PAD_SINK].code); depth_out = fmt_info->width; shift = depth_in - depth_out; if (fmt_info->code == V4L2_MBUS_FMT_YUYV8_2X8) bridge = ISPCTRL_PAR_BRIDGE_LENDIAN; else if (fmt_info->code == V4L2_MBUS_FMT_UYVY8_2X8) bridge = ISPCTRL_PAR_BRIDGE_BENDIAN; else bridge = ISPCTRL_PAR_BRIDGE_DISABLE; omap3isp_configure_bridge(isp, ccdc->input, pdata, shift, bridge); ccdc_config_sync_if(ccdc, pdata, depth_out); syn_mode = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); /* Use the raw, unprocessed data when writing to memory. The H3A and * histogram modules are still fed with lens shading corrected data. */ syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR; if (ccdc->output & CCDC_OUTPUT_MEMORY) syn_mode |= ISPCCDC_SYN_MODE_WEN; else syn_mode &= ~ISPCCDC_SYN_MODE_WEN; if (ccdc->output & CCDC_OUTPUT_RESIZER) syn_mode |= ISPCCDC_SYN_MODE_SDR2RSZ; else syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ; /* CCDC_PAD_SINK */ format = &ccdc->formats[CCDC_PAD_SINK]; /* Mosaic filter */ switch (format->code) { case V4L2_MBUS_FMT_SRGGB10_1X10: case V4L2_MBUS_FMT_SRGGB12_1X12: ccdc_pattern = ccdc_srggb_pattern; break; case V4L2_MBUS_FMT_SBGGR10_1X10: case V4L2_MBUS_FMT_SBGGR12_1X12: ccdc_pattern = ccdc_sbggr_pattern; break; case V4L2_MBUS_FMT_SGBRG10_1X10: case V4L2_MBUS_FMT_SGBRG12_1X12: ccdc_pattern = ccdc_sgbrg_pattern; break; default: /* Use GRBG */ ccdc_pattern = ccdc_sgrbg_pattern; break; } ccdc_config_imgattr(ccdc, ccdc_pattern); /* Generate VD0 on the last line of the image and VD1 on the * 2/3 height line. */ isp_reg_writel(isp, ((format->height - 2) << ISPCCDC_VDINT_0_SHIFT) | ((format->height * 2 / 3) << ISPCCDC_VDINT_1_SHIFT), OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VDINT); /* CCDC_PAD_SOURCE_OF */ format = &ccdc->formats[CCDC_PAD_SOURCE_OF]; crop = &ccdc->crop; isp_reg_writel(isp, (crop->left << ISPCCDC_HORZ_INFO_SPH_SHIFT) | ((crop->width - 1) << ISPCCDC_HORZ_INFO_NPH_SHIFT), OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HORZ_INFO); isp_reg_writel(isp, crop->top << ISPCCDC_VERT_START_SLV0_SHIFT, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_START); isp_reg_writel(isp, (crop->height - 1) << ISPCCDC_VERT_LINES_NLV_SHIFT, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_LINES); ccdc_config_outlineoffset(ccdc, ccdc->video_out.bpl_value, 0, 0); /* The CCDC outputs data in UYVY order by default. Swap bytes to get * YUYV. */ if (format->code == V4L2_MBUS_FMT_YUYV8_1X16) isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_BSWD); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_BSWD); /* Use PACK8 mode for 1byte per pixel formats. */ if (omap3isp_video_format_info(format->code)->width <= 8) syn_mode |= ISPCCDC_SYN_MODE_PACK8; else syn_mode &= ~ISPCCDC_SYN_MODE_PACK8; isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); /* CCDC_PAD_SOURCE_VP */ format = &ccdc->formats[CCDC_PAD_SOURCE_VP]; isp_reg_writel(isp, (0 << ISPCCDC_FMT_HORZ_FMTSPH_SHIFT) | (format->width << ISPCCDC_FMT_HORZ_FMTLNH_SHIFT), OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_HORZ); isp_reg_writel(isp, (0 << ISPCCDC_FMT_VERT_FMTSLV_SHIFT) | ((format->height + 1) << ISPCCDC_FMT_VERT_FMTLNV_SHIFT), OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_VERT); isp_reg_writel(isp, (format->width << ISPCCDC_VP_OUT_HORZ_NUM_SHIFT) | (format->height << ISPCCDC_VP_OUT_VERT_NUM_SHIFT), OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VP_OUT); /* Lens shading correction. */ spin_lock_irqsave(&ccdc->lsc.req_lock, flags); if (ccdc->lsc.request == NULL) goto unlock; WARN_ON(ccdc->lsc.active); /* Get last good LSC configuration. If it is not supported for * the current active resolution discard it. */ if (ccdc->lsc.active == NULL && __ccdc_lsc_configure(ccdc, ccdc->lsc.request) == 0) { ccdc->lsc.active = ccdc->lsc.request; } else { list_add_tail(&ccdc->lsc.request->list, &ccdc->lsc.free_queue); schedule_work(&ccdc->lsc.table_work); } ccdc->lsc.request = NULL; unlock: spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); ccdc_apply_controls(ccdc); } static void __ccdc_enable(struct isp_ccdc_device *ccdc, int enable) { struct isp_device *isp = to_isp_device(ccdc); isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR, ISPCCDC_PCR_EN, enable ? ISPCCDC_PCR_EN : 0); } static int ccdc_disable(struct isp_ccdc_device *ccdc) { unsigned long flags; int ret = 0; spin_lock_irqsave(&ccdc->lock, flags); if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS) ccdc->stopping = CCDC_STOP_REQUEST; spin_unlock_irqrestore(&ccdc->lock, flags); ret = wait_event_timeout(ccdc->wait, ccdc->stopping == CCDC_STOP_FINISHED, msecs_to_jiffies(2000)); if (ret == 0) { ret = -ETIMEDOUT; dev_warn(to_device(ccdc), "CCDC stop timeout!\n"); } omap3isp_sbl_disable(to_isp_device(ccdc), OMAP3_ISP_SBL_CCDC_LSC_READ); mutex_lock(&ccdc->ioctl_lock); ccdc_lsc_free_request(ccdc, ccdc->lsc.request); ccdc->lsc.request = ccdc->lsc.active; ccdc->lsc.active = NULL; cancel_work_sync(&ccdc->lsc.table_work); ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue); mutex_unlock(&ccdc->ioctl_lock); ccdc->stopping = CCDC_STOP_NOT_REQUESTED; return ret > 0 ? 0 : ret; } static void ccdc_enable(struct isp_ccdc_device *ccdc) { if (ccdc_lsc_is_configured(ccdc)) __ccdc_lsc_enable(ccdc, 1); __ccdc_enable(ccdc, 1); } /* ----------------------------------------------------------------------------- * Interrupt handling */ /* * ccdc_sbl_busy - Poll idle state of CCDC and related SBL memory write bits * @ccdc: Pointer to ISP CCDC device. * * Returns zero if the CCDC is idle and the image has been written to * memory, too. */ static int ccdc_sbl_busy(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); return omap3isp_ccdc_busy(ccdc) | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_0) & ISPSBL_CCDC_WR_0_DATA_READY) | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_1) & ISPSBL_CCDC_WR_0_DATA_READY) | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_2) & ISPSBL_CCDC_WR_0_DATA_READY) | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_3) & ISPSBL_CCDC_WR_0_DATA_READY); } /* * ccdc_sbl_wait_idle - Wait until the CCDC and related SBL are idle * @ccdc: Pointer to ISP CCDC device. * @max_wait: Max retry count in us for wait for idle/busy transition. */ static int ccdc_sbl_wait_idle(struct isp_ccdc_device *ccdc, unsigned int max_wait) { unsigned int wait = 0; if (max_wait == 0) max_wait = 10000; /* 10 ms */ for (wait = 0; wait <= max_wait; wait++) { if (!ccdc_sbl_busy(ccdc)) return 0; rmb(); udelay(1); } return -EBUSY; } /* __ccdc_handle_stopping - Handle CCDC and/or LSC stopping sequence * @ccdc: Pointer to ISP CCDC device. * @event: Pointing which event trigger handler * * Return 1 when the event and stopping request combination is satisfied, * zero otherwise. */ static int __ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event) { int rval = 0; switch ((ccdc->stopping & 3) | event) { case CCDC_STOP_REQUEST | CCDC_EVENT_VD1: if (ccdc->lsc.state != LSC_STATE_STOPPED) __ccdc_lsc_enable(ccdc, 0); __ccdc_enable(ccdc, 0); ccdc->stopping = CCDC_STOP_EXECUTED; return 1; case CCDC_STOP_EXECUTED | CCDC_EVENT_VD0: ccdc->stopping |= CCDC_STOP_CCDC_FINISHED; if (ccdc->lsc.state == LSC_STATE_STOPPED) ccdc->stopping |= CCDC_STOP_LSC_FINISHED; rval = 1; break; case CCDC_STOP_EXECUTED | CCDC_EVENT_LSC_DONE: ccdc->stopping |= CCDC_STOP_LSC_FINISHED; rval = 1; break; case CCDC_STOP_EXECUTED | CCDC_EVENT_VD1: return 1; } if (ccdc->stopping == CCDC_STOP_FINISHED) { wake_up(&ccdc->wait); rval = 1; } return rval; } static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity); struct video_device *vdev = ccdc->subdev.devnode; struct v4l2_event event; /* Frame number propagation */ atomic_inc(&pipe->frame_number); memset(&event, 0, sizeof(event)); event.type = V4L2_EVENT_FRAME_SYNC; event.u.frame_sync.frame_sequence = atomic_read(&pipe->frame_number); v4l2_event_queue(vdev, &event); } /* * ccdc_lsc_isr - Handle LSC events * @ccdc: Pointer to ISP CCDC device. * @events: LSC events */ static void ccdc_lsc_isr(struct isp_ccdc_device *ccdc, u32 events) { unsigned long flags; if (events & IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity); ccdc_lsc_error_handler(ccdc); pipe->error = true; dev_dbg(to_device(ccdc), "lsc prefetch error\n"); } if (!(events & IRQ0STATUS_CCDC_LSC_DONE_IRQ)) return; /* LSC_DONE interrupt occur, there are two cases * 1. stopping for reconfiguration * 2. stopping because of STREAM OFF command */ spin_lock_irqsave(&ccdc->lsc.req_lock, flags); if (ccdc->lsc.state == LSC_STATE_STOPPING) ccdc->lsc.state = LSC_STATE_STOPPED; if (__ccdc_handle_stopping(ccdc, CCDC_EVENT_LSC_DONE)) goto done; if (ccdc->lsc.state != LSC_STATE_RECONFIG) goto done; /* LSC is in STOPPING state, change to the new state */ ccdc->lsc.state = LSC_STATE_STOPPED; /* This is an exception. Start of frame and LSC_DONE interrupt * have been received on the same time. Skip this event and wait * for better times. */ if (events & IRQ0STATUS_HS_VS_IRQ) goto done; /* The LSC engine is stopped at this point. Enable it if there's a * pending request. */ if (ccdc->lsc.request == NULL) goto done; ccdc_lsc_enable(ccdc); done: spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); } static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity); struct isp_device *isp = to_isp_device(ccdc); struct isp_buffer *buffer; int restart = 0; /* The CCDC generates VD0 interrupts even when disabled (the datasheet * doesn't explicitly state if that's supposed to happen or not, so it * can be considered as a hardware bug or as a feature, but we have to * deal with it anyway). Disabling the CCDC when no buffer is available * would thus not be enough, we need to handle the situation explicitly. */ if (list_empty(&ccdc->video_out.dmaqueue)) goto done; /* We're in continuous mode, and memory writes were disabled due to a * buffer underrun. Reenable them now that we have a buffer. The buffer * address has been set in ccdc_video_queue. */ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && ccdc->underrun) { restart = 1; ccdc->underrun = 0; goto done; } if (ccdc_sbl_wait_idle(ccdc, 1000)) { dev_info(isp->dev, "CCDC won't become idle!\n"); goto done; } buffer = omap3isp_video_buffer_next(&ccdc->video_out); if (buffer != NULL) { ccdc_set_outaddr(ccdc, buffer->isp_addr); restart = 1; } pipe->state |= ISP_PIPELINE_IDLE_OUTPUT; if (ccdc->state == ISP_PIPELINE_STREAM_SINGLESHOT && isp_pipeline_ready(pipe)) omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_SINGLESHOT); done: return restart; } /* * ccdc_vd0_isr - Handle VD0 event * @ccdc: Pointer to ISP CCDC device. * * Executes LSC deferred enablement before next frame starts. */ static void ccdc_vd0_isr(struct isp_ccdc_device *ccdc) { unsigned long flags; int restart = 0; if (ccdc->output & CCDC_OUTPUT_MEMORY) restart = ccdc_isr_buffer(ccdc); spin_lock_irqsave(&ccdc->lock, flags); if (__ccdc_handle_stopping(ccdc, CCDC_EVENT_VD0)) { spin_unlock_irqrestore(&ccdc->lock, flags); return; } if (!ccdc->shadow_update) ccdc_apply_controls(ccdc); spin_unlock_irqrestore(&ccdc->lock, flags); if (restart) ccdc_enable(ccdc); } /* * ccdc_vd1_isr - Handle VD1 event * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_vd1_isr(struct isp_ccdc_device *ccdc) { unsigned long flags; spin_lock_irqsave(&ccdc->lsc.req_lock, flags); /* * Depending on the CCDC pipeline state, CCDC stopping should be * handled differently. In SINGLESHOT we emulate an internal CCDC * stopping because the CCDC hw works only in continuous mode. * When CONTINUOUS pipeline state is used and the CCDC writes it's * data to memory the CCDC and LSC are stopped immediately but * without change the CCDC stopping state machine. The CCDC * stopping state machine should be used only when user request * for stopping is received (SINGLESHOT is an exeption). */ switch (ccdc->state) { case ISP_PIPELINE_STREAM_SINGLESHOT: ccdc->stopping = CCDC_STOP_REQUEST; break; case ISP_PIPELINE_STREAM_CONTINUOUS: if (ccdc->output & CCDC_OUTPUT_MEMORY) { if (ccdc->lsc.state != LSC_STATE_STOPPED) __ccdc_lsc_enable(ccdc, 0); __ccdc_enable(ccdc, 0); } break; case ISP_PIPELINE_STREAM_STOPPED: break; } if (__ccdc_handle_stopping(ccdc, CCDC_EVENT_VD1)) goto done; if (ccdc->lsc.request == NULL) goto done; /* * LSC need to be reconfigured. Stop it here and on next LSC_DONE IRQ * do the appropriate changes in registers */ if (ccdc->lsc.state == LSC_STATE_RUNNING) { __ccdc_lsc_enable(ccdc, 0); ccdc->lsc.state = LSC_STATE_RECONFIG; goto done; } /* LSC has been in STOPPED state, enable it */ if (ccdc->lsc.state == LSC_STATE_STOPPED) ccdc_lsc_enable(ccdc); done: spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); } /* * omap3isp_ccdc_isr - Configure CCDC during interframe time. * @ccdc: Pointer to ISP CCDC device. * @events: CCDC events */ int omap3isp_ccdc_isr(struct isp_ccdc_device *ccdc, u32 events) { if (ccdc->state == ISP_PIPELINE_STREAM_STOPPED) return 0; if (events & IRQ0STATUS_CCDC_VD1_IRQ) ccdc_vd1_isr(ccdc); ccdc_lsc_isr(ccdc, events); if (events & IRQ0STATUS_CCDC_VD0_IRQ) ccdc_vd0_isr(ccdc); if (events & IRQ0STATUS_HS_VS_IRQ) ccdc_hs_vs_isr(ccdc); return 0; } /* ----------------------------------------------------------------------------- * ISP video operations */ static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer) { struct isp_ccdc_device *ccdc = &video->isp->isp_ccdc; if (!(ccdc->output & CCDC_OUTPUT_MEMORY)) return -ENODEV; ccdc_set_outaddr(ccdc, buffer->isp_addr); /* We now have a buffer queued on the output, restart the pipeline * on the next CCDC interrupt if running in continuous mode (or when * starting the stream). */ ccdc->underrun = 1; return 0; } static const struct isp_video_operations ccdc_video_ops = { .queue = ccdc_video_queue, }; /* ----------------------------------------------------------------------------- * V4L2 subdev operations */ /* * ccdc_ioctl - CCDC module private ioctl's * @sd: ISP CCDC V4L2 subdevice * @cmd: ioctl command * @arg: ioctl argument * * Return 0 on success or a negative error code otherwise. */ static long ccdc_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); int ret; switch (cmd) { case VIDIOC_OMAP3ISP_CCDC_CFG: mutex_lock(&ccdc->ioctl_lock); ret = ccdc_config(ccdc, arg); mutex_unlock(&ccdc->ioctl_lock); break; default: return -ENOIOCTLCMD; } return ret; } static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { if (sub->type != V4L2_EVENT_FRAME_SYNC) return -EINVAL; /* line number is zero at frame start */ if (sub->id != 0) return -EINVAL; return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS, NULL); } static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { return v4l2_event_unsubscribe(fh, sub); } /* * ccdc_set_stream - Enable/Disable streaming on the CCDC module * @sd: ISP CCDC V4L2 subdevice * @enable: Enable/disable stream * * When writing to memory, the CCDC hardware can't be enabled without a memory * buffer to write to. As the s_stream operation is called in response to a * STREAMON call without any buffer queued yet, just update the enabled field * and return immediately. The CCDC will be enabled in ccdc_isr_buffer(). * * When not writing to memory enable the CCDC immediately. */ static int ccdc_set_stream(struct v4l2_subdev *sd, int enable) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct isp_device *isp = to_isp_device(ccdc); int ret = 0; if (ccdc->state == ISP_PIPELINE_STREAM_STOPPED) { if (enable == ISP_PIPELINE_STREAM_STOPPED) return 0; omap3isp_subclk_enable(isp, OMAP3_ISP_SUBCLK_CCDC); isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_VDLC); ccdc_configure(ccdc); /* TODO: Don't configure the video port if all of its output * links are inactive. */ ccdc_config_vp(ccdc); ccdc_enable_vp(ccdc, 1); ccdc_print_status(ccdc); } switch (enable) { case ISP_PIPELINE_STREAM_CONTINUOUS: if (ccdc->output & CCDC_OUTPUT_MEMORY) omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_WRITE); if (ccdc->underrun || !(ccdc->output & CCDC_OUTPUT_MEMORY)) ccdc_enable(ccdc); ccdc->underrun = 0; break; case ISP_PIPELINE_STREAM_SINGLESHOT: if (ccdc->output & CCDC_OUTPUT_MEMORY && ccdc->state != ISP_PIPELINE_STREAM_SINGLESHOT) omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_WRITE); ccdc_enable(ccdc); break; case ISP_PIPELINE_STREAM_STOPPED: ret = ccdc_disable(ccdc); if (ccdc->output & CCDC_OUTPUT_MEMORY) omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_CCDC_WRITE); omap3isp_subclk_disable(isp, OMAP3_ISP_SUBCLK_CCDC); ccdc->underrun = 0; break; } ccdc->state = enable; return ret; } static struct v4l2_mbus_framefmt * __ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh, unsigned int pad, enum v4l2_subdev_format_whence which) { if (which == V4L2_SUBDEV_FORMAT_TRY) return v4l2_subdev_get_try_format(fh, pad); else return &ccdc->formats[pad]; } static struct v4l2_rect * __ccdc_get_crop(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh, enum v4l2_subdev_format_whence which) { if (which == V4L2_SUBDEV_FORMAT_TRY) return v4l2_subdev_get_try_crop(fh, CCDC_PAD_SOURCE_OF); else return &ccdc->crop; } /* * ccdc_try_format - Try video format on a pad * @ccdc: ISP CCDC device * @fh : V4L2 subdev file handle * @pad: Pad number * @fmt: Format */ static void ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh, unsigned int pad, struct v4l2_mbus_framefmt *fmt, enum v4l2_subdev_format_whence which) { const struct isp_format_info *info; enum v4l2_mbus_pixelcode pixelcode; unsigned int width = fmt->width; unsigned int height = fmt->height; struct v4l2_rect *crop; unsigned int i; switch (pad) { case CCDC_PAD_SINK: for (i = 0; i < ARRAY_SIZE(ccdc_fmts); i++) { if (fmt->code == ccdc_fmts[i]) break; } /* If not found, use SGRBG10 as default */ if (i >= ARRAY_SIZE(ccdc_fmts)) fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10; /* Clamp the input size. */ fmt->width = clamp_t(u32, width, 32, 4096); fmt->height = clamp_t(u32, height, 32, 4096); break; case CCDC_PAD_SOURCE_OF: pixelcode = fmt->code; *fmt = *__ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, which); /* YUV formats are converted from 2X8 to 1X16 by the bridge and * can be byte-swapped. */ if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8 || fmt->code == V4L2_MBUS_FMT_UYVY8_2X8) { /* Use the user requested format if YUV. */ if (pixelcode == V4L2_MBUS_FMT_YUYV8_2X8 || pixelcode == V4L2_MBUS_FMT_UYVY8_2X8 || pixelcode == V4L2_MBUS_FMT_YUYV8_1X16 || pixelcode == V4L2_MBUS_FMT_UYVY8_1X16) fmt->code = pixelcode; if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8) fmt->code = V4L2_MBUS_FMT_YUYV8_1X16; else if (fmt->code == V4L2_MBUS_FMT_UYVY8_2X8) fmt->code = V4L2_MBUS_FMT_UYVY8_1X16; } /* Hardcode the output size to the crop rectangle size. */ crop = __ccdc_get_crop(ccdc, fh, which); fmt->width = crop->width; fmt->height = crop->height; break; case CCDC_PAD_SOURCE_VP: *fmt = *__ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, which); /* The video port interface truncates the data to 10 bits. */ info = omap3isp_video_format_info(fmt->code); fmt->code = info->truncated; /* YUV formats are not supported by the video port. */ if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8 || fmt->code == V4L2_MBUS_FMT_UYVY8_2X8) fmt->code = 0; /* The number of lines that can be clocked out from the video * port output must be at least one line less than the number * of input lines. */ fmt->width = clamp_t(u32, width, 32, fmt->width); fmt->height = clamp_t(u32, height, 32, fmt->height - 1); break; } /* Data is written to memory unpacked, each 10-bit or 12-bit pixel is * stored on 2 bytes. */ fmt->colorspace = V4L2_COLORSPACE_SRGB; fmt->field = V4L2_FIELD_NONE; } /* * ccdc_try_crop - Validate a crop rectangle * @ccdc: ISP CCDC device * @sink: format on the sink pad * @crop: crop rectangle to be validated */ static void ccdc_try_crop(struct isp_ccdc_device *ccdc, const struct v4l2_mbus_framefmt *sink, struct v4l2_rect *crop) { const struct isp_format_info *info; unsigned int max_width; /* For Bayer formats, restrict left/top and width/height to even values * to keep the Bayer pattern. */ info = omap3isp_video_format_info(sink->code); if (info->flavor != V4L2_MBUS_FMT_Y8_1X8) { crop->left &= ~1; crop->top &= ~1; } crop->left = clamp_t(u32, crop->left, 0, sink->width - CCDC_MIN_WIDTH); crop->top = clamp_t(u32, crop->top, 0, sink->height - CCDC_MIN_HEIGHT); /* The data formatter truncates the number of horizontal output pixels * to a multiple of 16. To avoid clipping data, allow callers to request * an output size bigger than the input size up to the nearest multiple * of 16. */ max_width = (sink->width - crop->left + 15) & ~15; crop->width = clamp_t(u32, crop->width, CCDC_MIN_WIDTH, max_width) & ~15; crop->height = clamp_t(u32, crop->height, CCDC_MIN_HEIGHT, sink->height - crop->top); /* Odd width/height values don't make sense for Bayer formats. */ if (info->flavor != V4L2_MBUS_FMT_Y8_1X8) { crop->width &= ~1; crop->height &= ~1; } } /* * ccdc_enum_mbus_code - Handle pixel format enumeration * @sd : pointer to v4l2 subdev structure * @fh : V4L2 subdev file handle * @code : pointer to v4l2_subdev_mbus_code_enum structure * return -EINVAL or zero on success */ static int ccdc_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_mbus_code_enum *code) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; switch (code->pad) { case CCDC_PAD_SINK: if (code->index >= ARRAY_SIZE(ccdc_fmts)) return -EINVAL; code->code = ccdc_fmts[code->index]; break; case CCDC_PAD_SOURCE_OF: format = __ccdc_get_format(ccdc, fh, code->pad, V4L2_SUBDEV_FORMAT_TRY); if (format->code == V4L2_MBUS_FMT_YUYV8_2X8 || format->code == V4L2_MBUS_FMT_UYVY8_2X8) { /* In YUV mode the CCDC can swap bytes. */ if (code->index == 0) code->code = V4L2_MBUS_FMT_YUYV8_1X16; else if (code->index == 1) code->code = V4L2_MBUS_FMT_UYVY8_1X16; else return -EINVAL; } else { /* In raw mode, no configurable format confversion is * available. */ if (code->index == 0) code->code = format->code; else return -EINVAL; } break; case CCDC_PAD_SOURCE_VP: /* The CCDC supports no configurable format conversion * compatible with the video port. Enumerate a single output * format code. */ if (code->index != 0) return -EINVAL; format = __ccdc_get_format(ccdc, fh, code->pad, V4L2_SUBDEV_FORMAT_TRY); /* A pixel code equal to 0 means that the video port doesn't * support the input format. Don't enumerate any pixel code. */ if (format->code == 0) return -EINVAL; code->code = format->code; break; default: return -EINVAL; } return 0; } static int ccdc_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_frame_size_enum *fse) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt format; if (fse->index != 0) return -EINVAL; format.code = fse->code; format.width = 1; format.height = 1; ccdc_try_format(ccdc, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY); fse->min_width = format.width; fse->min_height = format.height; if (format.code != fse->code) return -EINVAL; format.code = fse->code; format.width = -1; format.height = -1; ccdc_try_format(ccdc, fh, fse->pad, &format, V4L2_SUBDEV_FORMAT_TRY); fse->max_width = format.width; fse->max_height = format.height; return 0; } /* * ccdc_get_selection - Retrieve a selection rectangle on a pad * @sd: ISP CCDC V4L2 subdevice * @fh: V4L2 subdev file handle * @sel: Selection rectangle * * The only supported rectangles are the crop rectangles on the output formatter * source pad. * * Return 0 on success or a negative error code otherwise. */ static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_selection *sel) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; if (sel->pad != CCDC_PAD_SOURCE_OF) return -EINVAL; switch (sel->target) { case V4L2_SEL_TGT_CROP_BOUNDS: sel->r.left = 0; sel->r.top = 0; sel->r.width = INT_MAX; sel->r.height = INT_MAX; format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, sel->which); ccdc_try_crop(ccdc, format, &sel->r); break; case V4L2_SEL_TGT_CROP: sel->r = *__ccdc_get_crop(ccdc, fh, sel->which); break; default: return -EINVAL; } return 0; } /* * ccdc_set_selection - Set a selection rectangle on a pad * @sd: ISP CCDC V4L2 subdevice * @fh: V4L2 subdev file handle * @sel: Selection rectangle * * The only supported rectangle is the actual crop rectangle on the output * formatter source pad. * * Return 0 on success or a negative error code otherwise. */ static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_selection *sel) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; if (sel->target != V4L2_SEL_TGT_CROP || sel->pad != CCDC_PAD_SOURCE_OF) return -EINVAL; /* The crop rectangle can't be changed while streaming. */ if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED) return -EBUSY; /* Modifying the crop rectangle always changes the format on the source * pad. If the KEEP_CONFIG flag is set, just return the current crop * rectangle. */ if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) { sel->r = *__ccdc_get_crop(ccdc, fh, sel->which); return 0; } format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, sel->which); ccdc_try_crop(ccdc, format, &sel->r); *__ccdc_get_crop(ccdc, fh, sel->which) = sel->r; /* Update the source format. */ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_OF, sel->which); ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_OF, format, sel->which); return 0; } /* * ccdc_get_format - Retrieve the video format on a pad * @sd : ISP CCDC V4L2 subdevice * @fh : V4L2 subdev file handle * @fmt: Format * * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond * to the format type. */ static int ccdc_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *fmt) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; format = __ccdc_get_format(ccdc, fh, fmt->pad, fmt->which); if (format == NULL) return -EINVAL; fmt->format = *format; return 0; } /* * ccdc_set_format - Set the video format on a pad * @sd : ISP CCDC V4L2 subdevice * @fh : V4L2 subdev file handle * @fmt: Format * * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond * to the format type. */ static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *fmt) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; struct v4l2_rect *crop; format = __ccdc_get_format(ccdc, fh, fmt->pad, fmt->which); if (format == NULL) return -EINVAL; ccdc_try_format(ccdc, fh, fmt->pad, &fmt->format, fmt->which); *format = fmt->format; /* Propagate the format from sink to source */ if (fmt->pad == CCDC_PAD_SINK) { /* Reset the crop rectangle. */ crop = __ccdc_get_crop(ccdc, fh, fmt->which); crop->left = 0; crop->top = 0; crop->width = fmt->format.width; crop->height = fmt->format.height; ccdc_try_crop(ccdc, &fmt->format, crop); /* Update the source formats. */ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_OF, fmt->which); *format = fmt->format; ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_OF, format, fmt->which); format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_VP, fmt->which); *format = fmt->format; ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_VP, format, fmt->which); } return 0; } /* * Decide whether desired output pixel code can be obtained with * the lane shifter by shifting the input pixel code. * @in: input pixelcode to shifter * @out: output pixelcode from shifter * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0] * * return true if the combination is possible * return false otherwise */ static bool ccdc_is_shiftable(enum v4l2_mbus_pixelcode in, enum v4l2_mbus_pixelcode out, unsigned int additional_shift) { const struct isp_format_info *in_info, *out_info; if (in == out) return true; in_info = omap3isp_video_format_info(in); out_info = omap3isp_video_format_info(out); if ((in_info->flavor == 0) || (out_info->flavor == 0)) return false; if (in_info->flavor != out_info->flavor) return false; return in_info->width - out_info->width + additional_shift <= 6; } static int ccdc_link_validate(struct v4l2_subdev *sd, struct media_link *link, struct v4l2_subdev_format *source_fmt, struct v4l2_subdev_format *sink_fmt) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); unsigned long parallel_shift; /* Check if the two ends match */ if (source_fmt->format.width != sink_fmt->format.width || source_fmt->format.height != sink_fmt->format.height) return -EPIPE; /* We've got a parallel sensor here. */ if (ccdc->input == CCDC_INPUT_PARALLEL) { struct isp_parallel_platform_data *pdata = &((struct isp_v4l2_subdevs_group *) media_entity_to_v4l2_subdev(link->source->entity) ->host_priv)->bus.parallel; parallel_shift = pdata->data_lane_shift * 2; } else { parallel_shift = 0; } /* Lane shifter may be used to drop bits on CCDC sink pad */ if (!ccdc_is_shiftable(source_fmt->format.code, sink_fmt->format.code, parallel_shift)) return -EPIPE; return 0; } /* * ccdc_init_formats - Initialize formats on all pads * @sd: ISP CCDC V4L2 subdevice * @fh: V4L2 subdev file handle * * Initialize all pad formats with default values. If fh is not NULL, try * formats are initialized on the file handle. Otherwise active formats are * initialized on the device. */ static int ccdc_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct v4l2_subdev_format format; memset(&format, 0, sizeof(format)); format.pad = CCDC_PAD_SINK; format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10; format.format.width = 4096; format.format.height = 4096; ccdc_set_format(sd, fh, &format); return 0; } /* V4L2 subdev core operations */ static const struct v4l2_subdev_core_ops ccdc_v4l2_core_ops = { .ioctl = ccdc_ioctl, .subscribe_event = ccdc_subscribe_event, .unsubscribe_event = ccdc_unsubscribe_event, }; /* V4L2 subdev video operations */ static const struct v4l2_subdev_video_ops ccdc_v4l2_video_ops = { .s_stream = ccdc_set_stream, }; /* V4L2 subdev pad operations */ static const struct v4l2_subdev_pad_ops ccdc_v4l2_pad_ops = { .enum_mbus_code = ccdc_enum_mbus_code, .enum_frame_size = ccdc_enum_frame_size, .get_fmt = ccdc_get_format, .set_fmt = ccdc_set_format, .get_selection = ccdc_get_selection, .set_selection = ccdc_set_selection, .link_validate = ccdc_link_validate, }; /* V4L2 subdev operations */ static const struct v4l2_subdev_ops ccdc_v4l2_ops = { .core = &ccdc_v4l2_core_ops, .video = &ccdc_v4l2_video_ops, .pad = &ccdc_v4l2_pad_ops, }; /* V4L2 subdev internal operations */ static const struct v4l2_subdev_internal_ops ccdc_v4l2_internal_ops = { .open = ccdc_init_formats, }; /* ----------------------------------------------------------------------------- * Media entity operations */ /* * ccdc_link_setup - Setup CCDC connections * @entity: CCDC media entity * @local: Pad at the local end of the link * @remote: Pad at the remote end of the link * @flags: Link flags * * return -EINVAL or zero on success */ static int ccdc_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct isp_device *isp = to_isp_device(ccdc); switch (local->index | media_entity_type(remote->entity)) { case CCDC_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV: /* Read from the sensor (parallel interface), CCP2, CSI2a or * CSI2c. */ if (!(flags & MEDIA_LNK_FL_ENABLED)) { ccdc->input = CCDC_INPUT_NONE; break; } if (ccdc->input != CCDC_INPUT_NONE) return -EBUSY; if (remote->entity == &isp->isp_ccp2.subdev.entity) ccdc->input = CCDC_INPUT_CCP2B; else if (remote->entity == &isp->isp_csi2a.subdev.entity) ccdc->input = CCDC_INPUT_CSI2A; else if (remote->entity == &isp->isp_csi2c.subdev.entity) ccdc->input = CCDC_INPUT_CSI2C; else ccdc->input = CCDC_INPUT_PARALLEL; break; /* * The ISP core doesn't support pipelines with multiple video outputs. * Revisit this when it will be implemented, and return -EBUSY for now. */ case CCDC_PAD_SOURCE_VP | MEDIA_ENT_T_V4L2_SUBDEV: /* Write to preview engine, histogram and H3A. When none of * those links are active, the video port can be disabled. */ if (flags & MEDIA_LNK_FL_ENABLED) { if (ccdc->output & ~CCDC_OUTPUT_PREVIEW) return -EBUSY; ccdc->output |= CCDC_OUTPUT_PREVIEW; } else { ccdc->output &= ~CCDC_OUTPUT_PREVIEW; } break; case CCDC_PAD_SOURCE_OF | MEDIA_ENT_T_DEVNODE: /* Write to memory */ if (flags & MEDIA_LNK_FL_ENABLED) { if (ccdc->output & ~CCDC_OUTPUT_MEMORY) return -EBUSY; ccdc->output |= CCDC_OUTPUT_MEMORY; } else { ccdc->output &= ~CCDC_OUTPUT_MEMORY; } break; case CCDC_PAD_SOURCE_OF | MEDIA_ENT_T_V4L2_SUBDEV: /* Write to resizer */ if (flags & MEDIA_LNK_FL_ENABLED) { if (ccdc->output & ~CCDC_OUTPUT_RESIZER) return -EBUSY; ccdc->output |= CCDC_OUTPUT_RESIZER; } else { ccdc->output &= ~CCDC_OUTPUT_RESIZER; } break; default: return -EINVAL; } return 0; } /* media operations */ static const struct media_entity_operations ccdc_media_ops = { .link_setup = ccdc_link_setup, .link_validate = v4l2_subdev_link_validate, }; void omap3isp_ccdc_unregister_entities(struct isp_ccdc_device *ccdc) { v4l2_device_unregister_subdev(&ccdc->subdev); omap3isp_video_unregister(&ccdc->video_out); } int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc, struct v4l2_device *vdev) { int ret; /* Register the subdev and video node. */ ret = v4l2_device_register_subdev(vdev, &ccdc->subdev); if (ret < 0) goto error; ret = omap3isp_video_register(&ccdc->video_out, vdev); if (ret < 0) goto error; return 0; error: omap3isp_ccdc_unregister_entities(ccdc); return ret; } /* ----------------------------------------------------------------------------- * ISP CCDC initialisation and cleanup */ /* * ccdc_init_entities - Initialize V4L2 subdev and media entity * @ccdc: ISP CCDC module * * Return 0 on success and a negative error code on failure. */ static int ccdc_init_entities(struct isp_ccdc_device *ccdc) { struct v4l2_subdev *sd = &ccdc->subdev; struct media_pad *pads = ccdc->pads; struct media_entity *me = &sd->entity; int ret; ccdc->input = CCDC_INPUT_NONE; v4l2_subdev_init(sd, &ccdc_v4l2_ops); sd->internal_ops = &ccdc_v4l2_internal_ops; strlcpy(sd->name, "OMAP3 ISP CCDC", sizeof(sd->name)); sd->grp_id = 1 << 16; /* group ID for isp subdevs */ v4l2_set_subdevdata(sd, ccdc); sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE; pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK; pads[CCDC_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE; pads[CCDC_PAD_SOURCE_OF].flags = MEDIA_PAD_FL_SOURCE; me->ops = &ccdc_media_ops; ret = media_entity_init(me, CCDC_PADS_NUM, pads, 0); if (ret < 0) return ret; ccdc_init_formats(sd, NULL); ccdc->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ccdc->video_out.ops = &ccdc_video_ops; ccdc->video_out.isp = to_isp_device(ccdc); ccdc->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3; ccdc->video_out.bpl_alignment = 32; ret = omap3isp_video_init(&ccdc->video_out, "CCDC"); if (ret < 0) goto error_video; /* Connect the CCDC subdev to the video node. */ ret = media_entity_create_link(&ccdc->subdev.entity, CCDC_PAD_SOURCE_OF, &ccdc->video_out.video.entity, 0, 0); if (ret < 0) goto error_link; return 0; error_link: omap3isp_video_cleanup(&ccdc->video_out); error_video: media_entity_cleanup(me); return ret; } /* * omap3isp_ccdc_init - CCDC module initialization. * @dev: Device pointer specific to the OMAP3 ISP. * * TODO: Get the initialisation values from platform data. * * Return 0 on success or a negative error code otherwise. */ int omap3isp_ccdc_init(struct isp_device *isp) { struct isp_ccdc_device *ccdc = &isp->isp_ccdc; int ret; spin_lock_init(&ccdc->lock); init_waitqueue_head(&ccdc->wait); mutex_init(&ccdc->ioctl_lock); ccdc->stopping = CCDC_STOP_NOT_REQUESTED; INIT_WORK(&ccdc->lsc.table_work, ccdc_lsc_free_table_work); ccdc->lsc.state = LSC_STATE_STOPPED; INIT_LIST_HEAD(&ccdc->lsc.free_queue); spin_lock_init(&ccdc->lsc.req_lock); ccdc->clamp.oblen = 0; ccdc->clamp.dcsubval = 0; ccdc->update = OMAP3ISP_CCDC_BLCLAMP; ccdc_apply_controls(ccdc); ret = ccdc_init_entities(ccdc); if (ret < 0) { mutex_destroy(&ccdc->ioctl_lock); return ret; } return 0; } /* * omap3isp_ccdc_cleanup - CCDC module cleanup. * @dev: Device pointer specific to the OMAP3 ISP. */ void omap3isp_ccdc_cleanup(struct isp_device *isp) { struct isp_ccdc_device *ccdc = &isp->isp_ccdc; omap3isp_video_cleanup(&ccdc->video_out); media_entity_cleanup(&ccdc->subdev.entity); /* Free LSC requests. As the CCDC is stopped there's no active request, * so only the pending request and the free queue need to be handled. */ ccdc_lsc_free_request(ccdc, ccdc->lsc.request); cancel_work_sync(&ccdc->lsc.table_work); ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue); if (ccdc->fpc.fpcaddr != 0) omap_iommu_vfree(isp->domain, isp->dev, ccdc->fpc.fpcaddr); mutex_destroy(&ccdc->ioctl_lock); }
gpl-2.0
oppo-source/Find7-4.3-kernel-source
drivers/net/wireless/iwlwifi/iwl-mac80211.c
2715
41498
/****************************************************************************** * * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <net/mac80211.h> #include <asm/div64.h> #include "iwl-eeprom.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-io.h" #include "iwl-agn-calib.h" #include "iwl-agn.h" #include "iwl-shared.h" #include "iwl-trans.h" #include "iwl-op-mode.h" /***************************************************************************** * * mac80211 entry point functions * *****************************************************************************/ static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = { { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, { .max = 1, .types = BIT(NL80211_IFTYPE_AP), }, }; static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = { { .max = 2, .types = BIT(NL80211_IFTYPE_STATION), }, }; static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = { { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_AP), }, }; static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = { { .max = 2, .types = BIT(NL80211_IFTYPE_STATION), }, { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_CLIENT), }, }; static const struct ieee80211_iface_combination iwlagn_iface_combinations_dualmode[] = { { .num_different_channels = 1, .max_interfaces = 2, .beacon_int_infra_match = true, .limits = iwlagn_sta_ap_limits, .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits), }, { .num_different_channels = 1, .max_interfaces = 2, .limits = iwlagn_2sta_limits, .n_limits = ARRAY_SIZE(iwlagn_2sta_limits), }, }; static const struct ieee80211_iface_combination iwlagn_iface_combinations_p2p[] = { { .num_different_channels = 1, .max_interfaces = 2, .beacon_int_infra_match = true, .limits = iwlagn_p2p_sta_go_limits, .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits), }, { .num_different_channels = 1, .max_interfaces = 2, .limits = iwlagn_p2p_2sta_limits, .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits), }, }; /* * Not a mac80211 entry point function, but it fits in with all the * other mac80211 functions grouped here. */ int iwlagn_mac_setup_register(struct iwl_priv *priv, const struct iwl_ucode_capabilities *capa) { int ret; struct ieee80211_hw *hw = priv->hw; struct iwl_rxon_context *ctx; hw->rate_control_algorithm = "iwl-agn-rs"; /* Tell mac80211 our characteristics */ hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT | IEEE80211_HW_REPORTS_TX_ACK_STATUS; /* * Including the following line will crash some AP's. This * workaround removes the stimulus which causes the crash until * the AP software can be fixed. hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; */ hw->flags |= IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE) hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | IEEE80211_HW_SUPPORTS_STATIC_SMPS; #ifndef CONFIG_IWLWIFI_EXPERIMENTAL_MFP /* enable 11w if the uCode advertise */ if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP) #endif /* !CONFIG_IWLWIFI_EXPERIMENTAL_MFP */ hw->flags |= IEEE80211_HW_MFP_CAPABLE; hw->sta_data_size = sizeof(struct iwl_station_priv); hw->vif_data_size = sizeof(struct iwl_vif_priv); for_each_context(priv, ctx) { hw->wiphy->interface_modes |= ctx->interface_modes; hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; } BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) { hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p; hw->wiphy->n_iface_combinations = ARRAY_SIZE(iwlagn_iface_combinations_p2p); } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode; hw->wiphy->n_iface_combinations = ARRAY_SIZE(iwlagn_iface_combinations_dualmode); } hw->wiphy->max_remain_on_channel_duration = 1000; hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS | WIPHY_FLAG_IBSS_RSN; if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && trans(priv)->ops->wowlan_suspend && device_can_wakeup(trans(priv)->dev)) { hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | WIPHY_WOWLAN_EAP_IDENTITY_REQ | WIPHY_WOWLAN_RFKILL_RELEASE; if (!iwlagn_mod_params.sw_crypto) hw->wiphy->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_GTK_REKEY_FAILURE; hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; hw->wiphy->wowlan.pattern_min_len = IWLAGN_WOWLAN_MIN_PATTERN_LEN; hw->wiphy->wowlan.pattern_max_len = IWLAGN_WOWLAN_MAX_PATTERN_LEN; } if (iwlagn_mod_params.power_save) hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; else hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; /* we create the 802.11 header and a zero-length SSID element */ hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2; /* Default value; 4 EDCA QOS priorities */ hw->queues = 4; hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->bands[IEEE80211_BAND_2GHZ]; if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->bands[IEEE80211_BAND_5GHZ]; hw->wiphy->hw_version = trans(priv)->hw_id; iwl_leds_init(priv); ret = ieee80211_register_hw(priv->hw); if (ret) { IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); return ret; } priv->mac80211_registered = 1; return 0; } void iwlagn_mac_unregister(struct iwl_priv *priv) { if (!priv->mac80211_registered) return; iwl_leds_exit(priv); ieee80211_unregister_hw(priv->hw); priv->mac80211_registered = 0; } static int __iwl_up(struct iwl_priv *priv) { struct iwl_rxon_context *ctx; int ret; lockdep_assert_held(&priv->mutex); if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); return -EIO; } for_each_context(priv, ctx) { ret = iwlagn_alloc_bcast_station(priv, ctx); if (ret) { iwl_dealloc_bcast_stations(priv); return ret; } } ret = iwl_run_init_ucode(priv); if (ret) { IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret); goto error; } ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR); if (ret) { IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret); goto error; } ret = iwl_alive_start(priv); if (ret) goto error; return 0; error: set_bit(STATUS_EXIT_PENDING, &priv->status); iwl_down(priv); clear_bit(STATUS_EXIT_PENDING, &priv->status); IWL_ERR(priv, "Unable to initialize device.\n"); return ret; } static int iwlagn_mac_start(struct ieee80211_hw *hw) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); int ret; IWL_DEBUG_MAC80211(priv, "enter\n"); /* we should be verifying the device is ready to be opened */ mutex_lock(&priv->mutex); ret = __iwl_up(priv); mutex_unlock(&priv->mutex); if (ret) return ret; IWL_DEBUG_INFO(priv, "Start UP work done.\n"); /* Now we should be done, and the READY bit should be set. */ if (WARN_ON(!test_bit(STATUS_READY, &priv->status))) ret = -EIO; iwlagn_led_enable(priv); priv->is_open = 1; IWL_DEBUG_MAC80211(priv, "leave\n"); return 0; } static void iwlagn_mac_stop(struct ieee80211_hw *hw) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); IWL_DEBUG_MAC80211(priv, "enter\n"); if (!priv->is_open) return; priv->is_open = 0; mutex_lock(&priv->mutex); iwl_down(priv); mutex_unlock(&priv->mutex); iwl_cancel_deferred_work(priv); flush_workqueue(priv->workqueue); /* User space software may expect getting rfkill changes * even if interface is down, trans->down will leave the RF * kill interrupt enabled */ iwl_trans_stop_hw(trans(priv)); IWL_DEBUG_MAC80211(priv, "leave\n"); } static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *data) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); if (iwlagn_mod_params.sw_crypto) return; IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif) goto out; memcpy(priv->kek, data->kek, NL80211_KEK_LEN); memcpy(priv->kck, data->kck, NL80211_KCK_LEN); priv->replay_ctr = cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr)); priv->have_rekey_data = true; out: mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); } #ifdef CONFIG_PM_SLEEP static int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; int ret; if (WARN_ON(!wowlan)) return -EINVAL; IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); /* Don't attempt WoWLAN when not associated, tear down instead. */ if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION || !iwl_is_associated_ctx(ctx)) { ret = 1; goto out; } ret = iwlagn_suspend(priv, wowlan); if (ret) goto error; device_set_wakeup_enable(trans(priv)->dev, true); iwl_trans_wowlan_suspend(trans(priv)); goto out; error: priv->wowlan = false; iwlagn_prepare_restart(priv); ieee80211_restart_hw(priv->hw); out: mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); return ret; } static int iwlagn_mac_resume(struct ieee80211_hw *hw) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; struct ieee80211_vif *vif; unsigned long flags; u32 base, status = 0xffffffff; int ret = -EIO; const struct fw_img *img; IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); base = priv->shrd->device_pointers.error_event_table; if (iwlagn_hw_valid_rtc_data_addr(base)) { spin_lock_irqsave(&trans(priv)->reg_lock, flags); ret = iwl_grab_nic_access_silent(trans(priv)); if (likely(ret == 0)) { iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, base); status = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); iwl_release_nic_access(trans(priv)); } spin_unlock_irqrestore(&trans(priv)->reg_lock, flags); #ifdef CONFIG_IWLWIFI_DEBUGFS if (ret == 0) { img = &(priv->fw->img[IWL_UCODE_WOWLAN]); if (!priv->wowlan_sram) { priv->wowlan_sram = kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len, GFP_KERNEL); } if (priv->wowlan_sram) _iwl_read_targ_mem_words( trans(priv), 0x800000, priv->wowlan_sram, img->sec[IWL_UCODE_SECTION_DATA].len / 4); } #endif } /* we'll clear ctx->vif during iwlagn_prepare_restart() */ vif = ctx->vif; priv->wowlan = false; device_set_wakeup_enable(trans(priv)->dev, false); iwlagn_prepare_restart(priv); memset((void *)&ctx->active, 0, sizeof(ctx->active)); iwl_connection_init_rx_config(priv, ctx); iwlagn_set_rxon_chain(priv, ctx); mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); ieee80211_resume_disconnect(vif); return 1; } #endif static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); if (iwlagn_tx_skb(priv, skb)) dev_kfree_skb_any(skb); } static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key); } static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; struct iwl_rxon_context *ctx = vif_priv->ctx; int ret; bool is_default_wep_key = false; IWL_DEBUG_MAC80211(priv, "enter\n"); if (iwlagn_mod_params.sw_crypto) { IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); return -EOPNOTSUPP; } switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; /* fall through */ case WLAN_CIPHER_SUITE_CCMP: key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; break; default: break; } /* * We could program these keys into the hardware as well, but we * don't expect much multicast traffic in IBSS and having keys * for more stations is probably more useful. * * Mark key TX-only and return 0. */ if (vif->type == NL80211_IFTYPE_ADHOC && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { key->hw_key_idx = WEP_INVALID_OFFSET; return 0; } /* If they key was TX-only, accept deletion */ if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET) return 0; mutex_lock(&priv->mutex); iwl_scan_cancel_timeout(priv, 100); BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT); /* * If we are getting WEP group key and we didn't receive any key mapping * so far, we are in legacy wep mode (group key only), otherwise we are * in 1X mode. * In legacy wep mode, we use another host command to the uCode. */ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) { if (cmd == SET_KEY) is_default_wep_key = !ctx->key_mapping_keys; else is_default_wep_key = key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT; } switch (cmd) { case SET_KEY: if (is_default_wep_key) { ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key); break; } ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta); if (ret) { /* * can't add key for RX, but we don't need it * in the device for TX so still return 0 */ ret = 0; key->hw_key_idx = WEP_INVALID_OFFSET; } IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); break; case DISABLE_KEY: if (is_default_wep_key) ret = iwl_remove_default_wep_key(priv, ctx, key); else ret = iwl_remove_dynamic_key(priv, ctx, key, sta); IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); break; default: ret = -EINVAL; } mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); return ret; } static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); int ret = -EINVAL; struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", sta->addr, tid); if (!(hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)) return -EACCES; IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); switch (action) { case IEEE80211_AMPDU_RX_START: if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) break; IWL_DEBUG_HT(priv, "start Rx\n"); ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); break; case IEEE80211_AMPDU_RX_STOP: IWL_DEBUG_HT(priv, "stop Rx\n"); ret = iwl_sta_rx_agg_stop(priv, sta, tid); break; case IEEE80211_AMPDU_TX_START: if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) break; IWL_DEBUG_HT(priv, "start Tx\n"); ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); break; case IEEE80211_AMPDU_TX_STOP: IWL_DEBUG_HT(priv, "stop Tx\n"); ret = iwlagn_tx_agg_stop(priv, vif, sta, tid); if ((ret == 0) && (priv->agg_tids_count > 0)) { priv->agg_tids_count--; IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", priv->agg_tids_count); } if (!priv->agg_tids_count && hw_params(priv).use_rts_for_aggregation) { /* * switch off RTS/CTS if it was previously enabled */ sta_priv->lq_sta.lq.general_params.flags &= ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif), &sta_priv->lq_sta.lq, CMD_ASYNC, false); } break; case IEEE80211_AMPDU_TX_OPERATIONAL: ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size); break; } mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); return ret; } static int iwlagn_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; bool is_ap = vif->type == NL80211_IFTYPE_STATION; int ret; u8 sta_id; IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", sta->addr); sta_priv->sta_id = IWL_INVALID_STATION; atomic_set(&sta_priv->pending_frames, 0); if (vif->type == NL80211_IFTYPE_AP) sta_priv->client = true; ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr, is_ap, sta, &sta_id); if (ret) { IWL_ERR(priv, "Unable to add station %pM (%d)\n", sta->addr, ret); /* Should we return success if return code is EEXIST ? */ return ret; } sta_priv->sta_id = sta_id; return 0; } static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; int ret; IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", sta->addr); if (vif->type == NL80211_IFTYPE_STATION) { /* * Station will be removed from device when the RXON * is set to unassociated -- just deactivate it here * to avoid re-programming it. */ ret = 0; iwl_deactivate_station(priv, sta_priv->sta_id, sta->addr); } else { ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr); if (ret) IWL_DEBUG_QUIET_RFKILL(priv, "Error removing station %pM\n", sta->addr); } return ret; } static int iwlagn_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; enum { NONE, ADD, REMOVE, HT_RATE_INIT, ADD_RATE_INIT, } op = NONE; int ret; IWL_DEBUG_MAC80211(priv, "station %pM state change %d->%d\n", sta->addr, old_state, new_state); mutex_lock(&priv->mutex); if (vif->type == NL80211_IFTYPE_STATION) { if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) op = ADD; else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) op = REMOVE; else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) op = HT_RATE_INIT; } else { if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) op = ADD_RATE_INIT; else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) op = REMOVE; } switch (op) { case ADD: ret = iwlagn_mac_sta_add(hw, vif, sta); break; case REMOVE: ret = iwlagn_mac_sta_remove(hw, vif, sta); break; case ADD_RATE_INIT: ret = iwlagn_mac_sta_add(hw, vif, sta); if (ret) break; /* Initialize rate scaling */ IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", sta->addr); iwl_rs_rate_init(priv, sta, iwl_sta_id(sta)); ret = 0; break; case HT_RATE_INIT: /* Initialize rate scaling */ ret = iwl_sta_update_ht(priv, vif_priv->ctx, sta); if (ret) break; IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", sta->addr); iwl_rs_rate_init(priv, sta, iwl_sta_id(sta)); ret = 0; break; default: ret = 0; break; } /* * mac80211 might WARN if we fail, but due the way we * (badly) handle hard rfkill, we might fail here */ if (iwl_is_rfkill(priv)) ret = 0; mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); return ret; } static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_channel_switch *ch_switch) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); const struct iwl_channel_info *ch_info; struct ieee80211_conf *conf = &hw->conf; struct ieee80211_channel *channel = ch_switch->channel; struct iwl_ht_config *ht_conf = &priv->current_ht_config; /* * MULTI-FIXME * When we add support for multiple interfaces, we need to * revisit this. The channel switch command in the device * only affects the BSS context, but what does that really * mean? And what if we get a CSA on the second interface? * This needs a lot of work. */ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; u16 ch; IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); if (iwl_is_rfkill(priv)) goto out; if (test_bit(STATUS_EXIT_PENDING, &priv->status) || test_bit(STATUS_SCANNING, &priv->status) || test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) goto out; if (!iwl_is_associated_ctx(ctx)) goto out; if (!cfg(priv)->lib->set_channel_switch) goto out; ch = channel->hw_value; if (le16_to_cpu(ctx->active.channel) == ch) goto out; ch_info = iwl_get_channel_info(priv, channel->band, ch); if (!is_channel_valid(ch_info)) { IWL_DEBUG_MAC80211(priv, "invalid channel\n"); goto out; } priv->current_ht_config.smps = conf->smps_mode; /* Configure HT40 channels */ ctx->ht.enabled = conf_is_ht(conf); if (ctx->ht.enabled) iwlagn_config_ht40(conf, ctx); else ctx->ht.is_40mhz = false; if ((le16_to_cpu(ctx->staging.channel) != ch)) ctx->staging.flags = 0; iwl_set_rxon_channel(priv, channel, ctx); iwl_set_rxon_ht(priv, ht_conf); iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif); iwl_set_rate(priv); /* * at this point, staging_rxon has the * configuration for channel switch */ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); priv->switch_channel = cpu_to_le16(ch); if (cfg(priv)->lib->set_channel_switch(priv, ch_switch)) { clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); priv->switch_channel = 0; ieee80211_chswitch_done(ctx->vif, false); } out: mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); } static void iwlagn_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); __le32 filter_or = 0, filter_nand = 0; struct iwl_rxon_context *ctx; #define CHK(test, flag) do { \ if (*total_flags & (test)) \ filter_or |= (flag); \ else \ filter_nand |= (flag); \ } while (0) IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", changed_flags, *total_flags); CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); #undef CHK mutex_lock(&priv->mutex); for_each_context(priv, ctx) { ctx->staging.filter_flags &= ~filter_nand; ctx->staging.filter_flags |= filter_or; /* * Not committing directly because hardware can perform a scan, * but we'll eventually commit the filter flags change anyway. */ } mutex_unlock(&priv->mutex); /* * Receiving all multicast frames is always enabled by the * default flags setup in iwl_connection_init_rx_config() * since we currently do not support programming multicast * filters into the device. */ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; } static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); mutex_lock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "enter\n"); if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n"); goto done; } if (iwl_is_rfkill(priv)) { IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n"); goto done; } /* * mac80211 will not push any more frames for transmit * until the flush is completed */ if (drop) { IWL_DEBUG_MAC80211(priv, "send flush command\n"); if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) { IWL_ERR(priv, "flush request fail\n"); goto done; } } IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); iwl_trans_wait_tx_queue_empty(trans(priv)); done: mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); } static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_channel *channel, enum nl80211_channel_type channel_type, int duration) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; int err = 0; if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) return -EOPNOTSUPP; if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT))) return -EOPNOTSUPP; IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); if (test_bit(STATUS_SCAN_HW, &priv->status)) { err = -EBUSY; goto out; } priv->hw_roc_channel = channel; priv->hw_roc_chantype = channel_type; /* convert from ms to TU */ priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024); priv->hw_roc_start_notified = false; cancel_delayed_work(&priv->hw_roc_disable_work); if (!ctx->is_active) { static const struct iwl_qos_info default_qos_data = { .def_qos_parm = { .ac[0] = { .cw_min = cpu_to_le16(3), .cw_max = cpu_to_le16(7), .aifsn = 2, .edca_txop = cpu_to_le16(1504), }, .ac[1] = { .cw_min = cpu_to_le16(7), .cw_max = cpu_to_le16(15), .aifsn = 2, .edca_txop = cpu_to_le16(3008), }, .ac[2] = { .cw_min = cpu_to_le16(15), .cw_max = cpu_to_le16(1023), .aifsn = 3, }, .ac[3] = { .cw_min = cpu_to_le16(15), .cw_max = cpu_to_le16(1023), .aifsn = 7, }, }, }; ctx->is_active = true; ctx->qos_data = default_qos_data; ctx->staging.dev_type = RXON_DEV_TYPE_P2P; memcpy(ctx->staging.node_addr, priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr, ETH_ALEN); memcpy(ctx->staging.bssid_addr, priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr, ETH_ALEN); err = iwlagn_commit_rxon(priv, ctx); if (err) goto out; ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK | RXON_FILTER_PROMISC_MSK | RXON_FILTER_CTL2HOST_MSK; err = iwlagn_commit_rxon(priv, ctx); if (err) { iwlagn_disable_roc(priv); goto out; } priv->hw_roc_setup = true; } err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band); if (err) iwlagn_disable_roc(priv); out: mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); return err; } static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) return -EOPNOTSUPP; IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); iwl_scan_cancel_timeout(priv, priv->hw_roc_duration); iwlagn_disable_roc(priv); mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); return 0; } static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, enum ieee80211_rssi_event rssi_event) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist) { if (rssi_event == RSSI_EVENT_LOW) priv->bt_enable_pspoll = true; else if (rssi_event == RSSI_EVENT_HIGH) priv->bt_enable_pspoll = false; iwlagn_send_advance_bt_config(priv); } else { IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled," "ignoring RSSI callback\n"); } mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); } static int iwlagn_mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); queue_work(priv->workqueue, &priv->beacon_update); return 0; } static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, const struct ieee80211_tx_queue_params *params) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; struct iwl_rxon_context *ctx = vif_priv->ctx; int q; if (WARN_ON(!ctx)) return -EINVAL; IWL_DEBUG_MAC80211(priv, "enter\n"); if (!iwl_is_ready_rf(priv)) { IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); return -EIO; } if (queue >= AC_NUM) { IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue); return 0; } q = AC_NUM - 1 - queue; mutex_lock(&priv->mutex); ctx->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min); ctx->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max); ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; ctx->qos_data.def_qos_parm.ac[q].edca_txop = cpu_to_le16((params->txop * 32)); ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0; mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); return 0; } static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); return priv->ibss_manager == IWL_IBSS_MANAGER; } static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { iwl_connection_init_rx_config(priv, ctx); iwlagn_set_rxon_chain(priv, ctx); return iwlagn_commit_rxon(priv, ctx); } static int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { struct ieee80211_vif *vif = ctx->vif; int err; lockdep_assert_held(&priv->mutex); /* * This variable will be correct only when there's just * a single context, but all code using it is for hardware * that supports only one context. */ priv->iw_mode = vif->type; ctx->is_active = true; err = iwl_set_mode(priv, ctx); if (err) { if (!ctx->always_active) ctx->is_active = false; return err; } if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist && vif->type == NL80211_IFTYPE_ADHOC) { /* * pretend to have high BT traffic as long as we * are operating in IBSS mode, as this will cause * the rate scaling etc. to behave as intended. */ priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH; } return 0; } static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; struct iwl_rxon_context *tmp, *ctx = NULL; int err; enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif); bool reset = false; IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", viftype, vif->addr); cancel_delayed_work_sync(&priv->hw_roc_disable_work); mutex_lock(&priv->mutex); iwlagn_disable_roc(priv); if (!iwl_is_ready_rf(priv)) { IWL_WARN(priv, "Try to add interface when device not ready\n"); err = -EINVAL; goto out; } for_each_context(priv, tmp) { u32 possible_modes = tmp->interface_modes | tmp->exclusive_interface_modes; if (tmp->vif) { /* On reset we need to add the same interface again */ if (tmp->vif == vif) { reset = true; ctx = tmp; break; } /* check if this busy context is exclusive */ if (tmp->exclusive_interface_modes & BIT(tmp->vif->type)) { err = -EINVAL; goto out; } continue; } if (!(possible_modes & BIT(viftype))) continue; /* have maybe usable context w/o interface */ ctx = tmp; break; } if (!ctx) { err = -EOPNOTSUPP; goto out; } vif_priv->ctx = ctx; ctx->vif = vif; err = iwl_setup_interface(priv, ctx); if (!err || reset) goto out; ctx->vif = NULL; priv->iw_mode = NL80211_IFTYPE_STATION; out: mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); return err; } static void iwl_teardown_interface(struct iwl_priv *priv, struct ieee80211_vif *vif, bool mode_change) { struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); lockdep_assert_held(&priv->mutex); if (priv->scan_vif == vif) { iwl_scan_cancel_timeout(priv, 200); iwl_force_scan_end(priv); } if (!mode_change) { iwl_set_mode(priv, ctx); if (!ctx->always_active) ctx->is_active = false; } /* * When removing the IBSS interface, overwrite the * BT traffic load with the stored one from the last * notification, if any. If this is a device that * doesn't implement this, this has no effect since * both values are the same and zero. */ if (vif->type == NL80211_IFTYPE_ADHOC) priv->bt_traffic_load = priv->last_bt_traffic_load; } static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); if (WARN_ON(ctx->vif != vif)) { struct iwl_rxon_context *tmp; IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif); for_each_context(priv, tmp) IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n", tmp->ctxid, tmp, tmp->vif); } ctx->vif = NULL; iwl_teardown_interface(priv, vif, false); mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); } static int iwlagn_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_iftype newtype, bool newp2p) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS]; struct iwl_rxon_context *tmp; enum nl80211_iftype newviftype = newtype; u32 interface_modes; int err; IWL_DEBUG_MAC80211(priv, "enter\n"); newtype = ieee80211_iftype_p2p(newtype, newp2p); mutex_lock(&priv->mutex); if (!ctx->vif || !iwl_is_ready_rf(priv)) { /* * Huh? But wait ... this can maybe happen when * we're in the middle of a firmware restart! */ err = -EBUSY; goto out; } interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; if (!(interface_modes & BIT(newtype))) { err = -EBUSY; goto out; } /* * Refuse a change that should be done by moving from the PAN * context to the BSS context instead, if the BSS context is * available and can support the new interface type. */ if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif && (bss_ctx->interface_modes & BIT(newtype) || bss_ctx->exclusive_interface_modes & BIT(newtype))) { BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); err = -EBUSY; goto out; } if (ctx->exclusive_interface_modes & BIT(newtype)) { for_each_context(priv, tmp) { if (ctx == tmp) continue; if (!tmp->vif) continue; /* * The current mode switch would be exclusive, but * another context is active ... refuse the switch. */ err = -EBUSY; goto out; } } /* success */ iwl_teardown_interface(priv, vif, true); vif->type = newviftype; vif->p2p = newp2p; err = iwl_setup_interface(priv, ctx); WARN_ON(err); /* * We've switched internally, but submitting to the * device may have failed for some reason. Mask this * error, because otherwise mac80211 will not switch * (and set the interface type back) and we'll be * out of sync with it. */ err = 0; out: mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); return err; } static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_scan_request *req) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); int ret; IWL_DEBUG_MAC80211(priv, "enter\n"); if (req->n_channels == 0) return -EINVAL; mutex_lock(&priv->mutex); /* * If an internal scan is in progress, just set * up the scan_request as per above. */ if (priv->scan_type != IWL_SCAN_NORMAL) { IWL_DEBUG_SCAN(priv, "SCAN request during internal scan - defer\n"); priv->scan_request = req; priv->scan_vif = vif; ret = 0; } else { priv->scan_request = req; priv->scan_vif = vif; /* * mac80211 will only ask for one band at a time * so using channels[0] here is ok */ ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, req->channels[0]->band); if (ret) { priv->scan_request = NULL; priv->scan_vif = NULL; } } IWL_DEBUG_MAC80211(priv, "leave\n"); mutex_unlock(&priv->mutex); return ret; } static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) { struct iwl_addsta_cmd cmd = { .mode = STA_CONTROL_MODIFY_MSK, .station_flags_msk = STA_FLG_PWR_SAVE_MSK, .sta.sta_id = sta_id, }; iwl_send_add_sta(priv, &cmd, CMD_ASYNC); } static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; int sta_id; IWL_DEBUG_MAC80211(priv, "enter\n"); switch (cmd) { case STA_NOTIFY_SLEEP: WARN_ON(!sta_priv->client); sta_priv->asleep = true; if (atomic_read(&sta_priv->pending_frames) > 0) ieee80211_sta_block_awake(hw, sta, true); break; case STA_NOTIFY_AWAKE: WARN_ON(!sta_priv->client); if (!sta_priv->asleep) break; sta_priv->asleep = false; sta_id = iwl_sta_id(sta); if (sta_id != IWL_INVALID_STATION) iwl_sta_modify_ps_wake(priv, sta_id); break; default: break; } IWL_DEBUG_MAC80211(priv, "leave\n"); } struct ieee80211_ops iwlagn_hw_ops = { .tx = iwlagn_mac_tx, .start = iwlagn_mac_start, .stop = iwlagn_mac_stop, #ifdef CONFIG_PM_SLEEP .suspend = iwlagn_mac_suspend, .resume = iwlagn_mac_resume, #endif .add_interface = iwlagn_mac_add_interface, .remove_interface = iwlagn_mac_remove_interface, .change_interface = iwlagn_mac_change_interface, .config = iwlagn_mac_config, .configure_filter = iwlagn_configure_filter, .set_key = iwlagn_mac_set_key, .update_tkip_key = iwlagn_mac_update_tkip_key, .set_rekey_data = iwlagn_mac_set_rekey_data, .conf_tx = iwlagn_mac_conf_tx, .bss_info_changed = iwlagn_bss_info_changed, .ampdu_action = iwlagn_mac_ampdu_action, .hw_scan = iwlagn_mac_hw_scan, .sta_notify = iwlagn_mac_sta_notify, .sta_state = iwlagn_mac_sta_state, .channel_switch = iwlagn_mac_channel_switch, .flush = iwlagn_mac_flush, .tx_last_beacon = iwlagn_mac_tx_last_beacon, .remain_on_channel = iwlagn_mac_remain_on_channel, .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel, .rssi_callback = iwlagn_mac_rssi_callback, CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd) CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump) .set_tim = iwlagn_mac_set_tim, }; /* This function both allocates and initializes hw and priv. */ struct ieee80211_hw *iwl_alloc_all(void) { struct iwl_priv *priv; struct iwl_op_mode *op_mode; /* mac80211 allocates memory for this device instance, including * space for this driver's private structure */ struct ieee80211_hw *hw; hw = ieee80211_alloc_hw(sizeof(struct iwl_priv) + sizeof(struct iwl_op_mode), &iwlagn_hw_ops); if (!hw) goto out; op_mode = hw->priv; priv = IWL_OP_MODE_GET_DVM(op_mode); priv->hw = hw; out: return hw; }
gpl-2.0
thehelios/pm-linux-3.8.y
kernel/lglock.c
2971
1962
/* See include/linux/lglock.h for description */ #include <linux/module.h> #include <linux/lglock.h> #include <linux/cpu.h> #include <linux/string.h> /* * Note there is no uninit, so lglocks cannot be defined in * modules (but it's fine to use them from there) * Could be added though, just undo lg_lock_init */ void lg_lock_init(struct lglock *lg, char *name) { LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0); } EXPORT_SYMBOL(lg_lock_init); void lg_local_lock(struct lglock *lg) { arch_spinlock_t *lock; preempt_disable(); rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); lock = this_cpu_ptr(lg->lock); arch_spin_lock(lock); } EXPORT_SYMBOL(lg_local_lock); void lg_local_unlock(struct lglock *lg) { arch_spinlock_t *lock; rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); lock = this_cpu_ptr(lg->lock); arch_spin_unlock(lock); preempt_enable(); } EXPORT_SYMBOL(lg_local_unlock); void lg_local_lock_cpu(struct lglock *lg, int cpu) { arch_spinlock_t *lock; preempt_disable(); rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); lock = per_cpu_ptr(lg->lock, cpu); arch_spin_lock(lock); } EXPORT_SYMBOL(lg_local_lock_cpu); void lg_local_unlock_cpu(struct lglock *lg, int cpu) { arch_spinlock_t *lock; rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); lock = per_cpu_ptr(lg->lock, cpu); arch_spin_unlock(lock); preempt_enable(); } EXPORT_SYMBOL(lg_local_unlock_cpu); void lg_global_lock(struct lglock *lg) { int i; preempt_disable(); rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_); for_each_possible_cpu(i) { arch_spinlock_t *lock; lock = per_cpu_ptr(lg->lock, i); arch_spin_lock(lock); } } EXPORT_SYMBOL(lg_global_lock); void lg_global_unlock(struct lglock *lg) { int i; rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); for_each_possible_cpu(i) { arch_spinlock_t *lock; lock = per_cpu_ptr(lg->lock, i); arch_spin_unlock(lock); } preempt_enable(); } EXPORT_SYMBOL(lg_global_unlock);
gpl-2.0
stariver/qt210-kernel
drivers/acpi/acpica/utmath.c
3227
9642
/******************************************************************************* * * Module Name: utmath - Integer math support routines * ******************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utmath") /* * Optional support for 64-bit double-precision integer divide. This code * is configurable and is implemented in order to support 32-bit kernel * environments where a 64-bit double-precision math library is not available. * * Support for a more normal 64-bit divide/modulo (with check for a divide- * by-zero) appears after this optional section of code. */ #ifndef ACPI_USE_NATIVE_DIVIDE /* Structures used only for 64-bit divide */ typedef struct uint64_struct { u32 lo; u32 hi; } uint64_struct; typedef union uint64_overlay { u64 full; struct uint64_struct part; } uint64_overlay; /******************************************************************************* * * FUNCTION: acpi_ut_short_divide * * PARAMETERS: Dividend - 64-bit dividend * Divisor - 32-bit divisor * out_quotient - Pointer to where the quotient is returned * out_remainder - Pointer to where the remainder is returned * * RETURN: Status (Checks for divide-by-zero) * * DESCRIPTION: Perform a short (maximum 64 bits divided by 32 bits) * divide and modulo. The result is a 64-bit quotient and a * 32-bit remainder. * ******************************************************************************/ acpi_status acpi_ut_short_divide(u64 dividend, u32 divisor, u64 *out_quotient, u32 *out_remainder) { union uint64_overlay dividend_ovl; union uint64_overlay quotient; u32 remainder32; ACPI_FUNCTION_TRACE(ut_short_divide); /* Always check for a zero divisor */ if (divisor == 0) { ACPI_ERROR((AE_INFO, "Divide by zero")); return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO); } dividend_ovl.full = dividend; /* * The quotient is 64 bits, the remainder is always 32 bits, * and is generated by the second divide. */ ACPI_DIV_64_BY_32(0, dividend_ovl.part.hi, divisor, quotient.part.hi, remainder32); ACPI_DIV_64_BY_32(remainder32, dividend_ovl.part.lo, divisor, quotient.part.lo, remainder32); /* Return only what was requested */ if (out_quotient) { *out_quotient = quotient.full; } if (out_remainder) { *out_remainder = remainder32; } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_divide * * PARAMETERS: in_dividend - Dividend * in_divisor - Divisor * out_quotient - Pointer to where the quotient is returned * out_remainder - Pointer to where the remainder is returned * * RETURN: Status (Checks for divide-by-zero) * * DESCRIPTION: Perform a divide and modulo. * ******************************************************************************/ acpi_status acpi_ut_divide(u64 in_dividend, u64 in_divisor, u64 *out_quotient, u64 *out_remainder) { union uint64_overlay dividend; union uint64_overlay divisor; union uint64_overlay quotient; union uint64_overlay remainder; union uint64_overlay normalized_dividend; union uint64_overlay normalized_divisor; u32 partial1; union uint64_overlay partial2; union uint64_overlay partial3; ACPI_FUNCTION_TRACE(ut_divide); /* Always check for a zero divisor */ if (in_divisor == 0) { ACPI_ERROR((AE_INFO, "Divide by zero")); return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO); } divisor.full = in_divisor; dividend.full = in_dividend; if (divisor.part.hi == 0) { /* * 1) Simplest case is where the divisor is 32 bits, we can * just do two divides */ remainder.part.hi = 0; /* * The quotient is 64 bits, the remainder is always 32 bits, * and is generated by the second divide. */ ACPI_DIV_64_BY_32(0, dividend.part.hi, divisor.part.lo, quotient.part.hi, partial1); ACPI_DIV_64_BY_32(partial1, dividend.part.lo, divisor.part.lo, quotient.part.lo, remainder.part.lo); } else { /* * 2) The general case where the divisor is a full 64 bits * is more difficult */ quotient.part.hi = 0; normalized_dividend = dividend; normalized_divisor = divisor; /* Normalize the operands (shift until the divisor is < 32 bits) */ do { ACPI_SHIFT_RIGHT_64(normalized_divisor.part.hi, normalized_divisor.part.lo); ACPI_SHIFT_RIGHT_64(normalized_dividend.part.hi, normalized_dividend.part.lo); } while (normalized_divisor.part.hi != 0); /* Partial divide */ ACPI_DIV_64_BY_32(normalized_dividend.part.hi, normalized_dividend.part.lo, normalized_divisor.part.lo, quotient.part.lo, partial1); /* * The quotient is always 32 bits, and simply requires adjustment. * The 64-bit remainder must be generated. */ partial1 = quotient.part.lo * divisor.part.hi; partial2.full = (u64) quotient.part.lo * divisor.part.lo; partial3.full = (u64) partial2.part.hi + partial1; remainder.part.hi = partial3.part.lo; remainder.part.lo = partial2.part.lo; if (partial3.part.hi == 0) { if (partial3.part.lo >= dividend.part.hi) { if (partial3.part.lo == dividend.part.hi) { if (partial2.part.lo > dividend.part.lo) { quotient.part.lo--; remainder.full -= divisor.full; } } else { quotient.part.lo--; remainder.full -= divisor.full; } } remainder.full = remainder.full - dividend.full; remainder.part.hi = (u32) - ((s32) remainder.part.hi); remainder.part.lo = (u32) - ((s32) remainder.part.lo); if (remainder.part.lo) { remainder.part.hi--; } } } /* Return only what was requested */ if (out_quotient) { *out_quotient = quotient.full; } if (out_remainder) { *out_remainder = remainder.full; } return_ACPI_STATUS(AE_OK); } #else /******************************************************************************* * * FUNCTION: acpi_ut_short_divide, acpi_ut_divide * * PARAMETERS: See function headers above * * DESCRIPTION: Native versions of the ut_divide functions. Use these if either * 1) The target is a 64-bit platform and therefore 64-bit * integer math is supported directly by the machine. * 2) The target is a 32-bit or 16-bit platform, and the * double-precision integer math library is available to * perform the divide. * ******************************************************************************/ acpi_status acpi_ut_short_divide(u64 in_dividend, u32 divisor, u64 *out_quotient, u32 *out_remainder) { ACPI_FUNCTION_TRACE(ut_short_divide); /* Always check for a zero divisor */ if (divisor == 0) { ACPI_ERROR((AE_INFO, "Divide by zero")); return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO); } /* Return only what was requested */ if (out_quotient) { *out_quotient = in_dividend / divisor; } if (out_remainder) { *out_remainder = (u32) (in_dividend % divisor); } return_ACPI_STATUS(AE_OK); } acpi_status acpi_ut_divide(u64 in_dividend, u64 in_divisor, u64 *out_quotient, u64 *out_remainder) { ACPI_FUNCTION_TRACE(ut_divide); /* Always check for a zero divisor */ if (in_divisor == 0) { ACPI_ERROR((AE_INFO, "Divide by zero")); return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO); } /* Return only what was requested */ if (out_quotient) { *out_quotient = in_dividend / in_divisor; } if (out_remainder) { *out_remainder = in_dividend % in_divisor; } return_ACPI_STATUS(AE_OK); } #endif
gpl-2.0